Bug Summary

File:llvm/include/llvm/IR/Instructions.h
Warning:line 2678, column 5
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name IndirectBrExpandPass.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/CodeGen -I /build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/llvm/lib/CodeGen -I include -I /build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-01-26-130535-15419-1 -x c++ /build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/llvm/lib/CodeGen/IndirectBrExpandPass.cpp

/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/llvm/lib/CodeGen/IndirectBrExpandPass.cpp

1//===- IndirectBrExpandPass.cpp - Expand indirectbr to switch -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// Implements an expansion pass to turn `indirectbr` instructions in the IR
11/// into `switch` instructions. This works by enumerating the basic blocks in
12/// a dense range of integers, replacing each `blockaddr` constant with the
13/// corresponding integer constant, and then building a switch that maps from
14/// the integers to the actual blocks. All of the indirectbr instructions in the
15/// function are redirected to this common switch.
16///
17/// While this is generically useful if a target is unable to codegen
18/// `indirectbr` natively, it is primarily useful when there is some desire to
19/// get the builtin non-jump-table lowering of a switch even when the input
20/// source contained an explicit indirect branch construct.
21///
22/// Note that it doesn't make any sense to enable this pass unless a target also
23/// disables jump-table lowering of switches. Doing that is likely to pessimize
24/// the code.
25///
26//===----------------------------------------------------------------------===//
27
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/Sequence.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/Analysis/DomTreeUpdater.h"
32#include "llvm/CodeGen/TargetPassConfig.h"
33#include "llvm/CodeGen/TargetSubtargetInfo.h"
34#include "llvm/IR/BasicBlock.h"
35#include "llvm/IR/Dominators.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/IRBuilder.h"
38#include "llvm/IR/InstIterator.h"
39#include "llvm/IR/Instruction.h"
40#include "llvm/IR/Instructions.h"
41#include "llvm/InitializePasses.h"
42#include "llvm/Pass.h"
43#include "llvm/Support/Debug.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/raw_ostream.h"
46#include "llvm/Target/TargetMachine.h"
47
48using namespace llvm;
49
50#define DEBUG_TYPE"indirectbr-expand" "indirectbr-expand"
51
52namespace {
53
54class IndirectBrExpandPass : public FunctionPass {
55 const TargetLowering *TLI = nullptr;
56
57public:
58 static char ID; // Pass identification, replacement for typeid
59
60 IndirectBrExpandPass() : FunctionPass(ID) {
61 initializeIndirectBrExpandPassPass(*PassRegistry::getPassRegistry());
62 }
63
64 void getAnalysisUsage(AnalysisUsage &AU) const override {
65 AU.addPreserved<DominatorTreeWrapperPass>();
66 }
67
68 bool runOnFunction(Function &F) override;
69};
70
71} // end anonymous namespace
72
73char IndirectBrExpandPass::ID = 0;
74
75INITIALIZE_PASS_BEGIN(IndirectBrExpandPass, DEBUG_TYPE,static void *initializeIndirectBrExpandPassPassOnce(PassRegistry
&Registry) {
76 "Expand indirectbr instructions", false, false)static void *initializeIndirectBrExpandPassPassOnce(PassRegistry
&Registry) {
77INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
78INITIALIZE_PASS_END(IndirectBrExpandPass, DEBUG_TYPE,PassInfo *PI = new PassInfo( "Expand indirectbr instructions"
, "indirectbr-expand", &IndirectBrExpandPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<IndirectBrExpandPass>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeIndirectBrExpandPassPassFlag; void
llvm::initializeIndirectBrExpandPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeIndirectBrExpandPassPassFlag, initializeIndirectBrExpandPassPassOnce
, std::ref(Registry)); }
79 "Expand indirectbr instructions", false, false)PassInfo *PI = new PassInfo( "Expand indirectbr instructions"
, "indirectbr-expand", &IndirectBrExpandPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<IndirectBrExpandPass>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeIndirectBrExpandPassPassFlag; void
llvm::initializeIndirectBrExpandPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeIndirectBrExpandPassPassFlag, initializeIndirectBrExpandPassPassOnce
, std::ref(Registry)); }
80
81FunctionPass *llvm::createIndirectBrExpandPass() {
82 return new IndirectBrExpandPass();
83}
84
85bool IndirectBrExpandPass::runOnFunction(Function &F) {
86 auto &DL = F.getParent()->getDataLayout();
87 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
1
Calling 'Pass::getAnalysisIfAvailable'
7
Returning from 'Pass::getAnalysisIfAvailable'
88 if (!TPC)
8
Assuming 'TPC' is non-null
9
Taking false branch
89 return false;
90
91 auto &TM = TPC->getTM<TargetMachine>();
92 auto &STI = *TM.getSubtargetImpl(F);
93 if (!STI.enableIndirectBrExpand())
10
Assuming the condition is false
11
Taking false branch
94 return false;
95 TLI = STI.getTargetLowering();
96
97 Optional<DomTreeUpdater> DTU;
98 if (auto *DTWP
11.1
'DTWP' is null
11.1
'DTWP' is null
11.1
'DTWP' is null
11.1
'DTWP' is null
= getAnalysisIfAvailable<DominatorTreeWrapperPass>())
12
Taking false branch
99 DTU.emplace(DTWP->getDomTree(), DomTreeUpdater::UpdateStrategy::Lazy);
100
101 SmallVector<IndirectBrInst *, 1> IndirectBrs;
102
103 // Set of all potential successors for indirectbr instructions.
104 SmallPtrSet<BasicBlock *, 4> IndirectBrSuccs;
105
106 // Build a list of indirectbrs that we want to rewrite.
107 for (BasicBlock &BB : F)
108 if (auto *IBr = dyn_cast<IndirectBrInst>(BB.getTerminator())) {
109 // Handle the degenerate case of no successors by replacing the indirectbr
110 // with unreachable as there is no successor available.
111 if (IBr->getNumSuccessors() == 0) {
112 (void)new UnreachableInst(F.getContext(), IBr);
113 IBr->eraseFromParent();
114 continue;
115 }
116
117 IndirectBrs.push_back(IBr);
118 for (BasicBlock *SuccBB : IBr->successors())
119 IndirectBrSuccs.insert(SuccBB);
120 }
121
122 if (IndirectBrs.empty())
13
Calling 'SmallVectorBase::empty'
16
Returning from 'SmallVectorBase::empty'
17
Taking false branch
123 return false;
124
125 // If we need to replace any indirectbrs we need to establish integer
126 // constants that will correspond to each of the basic blocks in the function
127 // whose address escapes. We do that here and rewrite all the blockaddress
128 // constants to just be those integer constants cast to a pointer type.
129 SmallVector<BasicBlock *, 4> BBs;
130
131 for (BasicBlock &BB : F) {
132 // Skip blocks that aren't successors to an indirectbr we're going to
133 // rewrite.
134 if (!IndirectBrSuccs.count(&BB))
135 continue;
136
137 auto IsBlockAddressUse = [&](const Use &U) {
138 return isa<BlockAddress>(U.getUser());
139 };
140 auto BlockAddressUseIt = llvm::find_if(BB.uses(), IsBlockAddressUse);
141 if (BlockAddressUseIt == BB.use_end())
142 continue;
143
144 assert(std::find_if(std::next(BlockAddressUseIt), BB.use_end(),(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 147, __extension__
__PRETTY_FUNCTION__))
145 IsBlockAddressUse) == BB.use_end() &&(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 147, __extension__
__PRETTY_FUNCTION__))
146 "There should only ever be a single blockaddress use because it is "(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 147, __extension__
__PRETTY_FUNCTION__))
147 "a constant and should be uniqued.")(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 147, __extension__
__PRETTY_FUNCTION__))
;
148
149 auto *BA = cast<BlockAddress>(BlockAddressUseIt->getUser());
150
151 // Skip if the constant was formed but ended up not being used (due to DCE
152 // or whatever).
153 if (!BA->isConstantUsed())
154 continue;
155
156 // Compute the index we want to use for this basic block. We can't use zero
157 // because null can be compared with block addresses.
158 int BBIndex = BBs.size() + 1;
159 BBs.push_back(&BB);
160
161 auto *ITy = cast<IntegerType>(DL.getIntPtrType(BA->getType()));
162 ConstantInt *BBIndexC = ConstantInt::get(ITy, BBIndex);
163
164 // Now rewrite the blockaddress to an integer constant based on the index.
165 // FIXME: This part doesn't properly recognize other uses of blockaddress
166 // expressions, for instance, where they are used to pass labels to
167 // asm-goto. This part of the pass needs a rework.
168 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(BBIndexC, BA->getType()));
169 }
170
171 if (BBs.empty()) {
18
Calling 'SmallVectorBase::empty'
21
Returning from 'SmallVectorBase::empty'
22
Taking false branch
172 // There are no blocks whose address is taken, so any indirectbr instruction
173 // cannot get a valid input and we can replace all of them with unreachable.
174 SmallVector<DominatorTree::UpdateType, 8> Updates;
175 if (DTU)
176 Updates.reserve(IndirectBrSuccs.size());
177 for (auto *IBr : IndirectBrs) {
178 if (DTU) {
179 for (BasicBlock *SuccBB : IBr->successors())
180 Updates.push_back({DominatorTree::Delete, IBr->getParent(), SuccBB});
181 }
182 (void)new UnreachableInst(F.getContext(), IBr);
183 IBr->eraseFromParent();
184 }
185 if (DTU) {
186 assert(Updates.size() == IndirectBrSuccs.size() &&(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 187, __extension__
__PRETTY_FUNCTION__))
187 "Got unexpected update count.")(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 187, __extension__
__PRETTY_FUNCTION__))
;
188 DTU->applyUpdates(Updates);
189 }
190 return true;
191 }
192
193 BasicBlock *SwitchBB;
194 Value *SwitchValue;
195
196 // Compute a common integer type across all the indirectbr instructions.
197 IntegerType *CommonITy = nullptr;
23
'CommonITy' initialized to a null pointer value
198 for (auto *IBr : IndirectBrs) {
24
Assuming '__begin1' is equal to '__end1'
199 auto *ITy =
200 cast<IntegerType>(DL.getIntPtrType(IBr->getAddress()->getType()));
201 if (!CommonITy || ITy->getBitWidth() > CommonITy->getBitWidth())
202 CommonITy = ITy;
203 }
204
205 auto GetSwitchValue = [DL, CommonITy](IndirectBrInst *IBr) {
206 return CastInst::CreatePointerCast(
207 IBr->getAddress(), CommonITy,
208 Twine(IBr->getAddress()->getName()) + ".switch_cast", IBr);
209 };
210
211 SmallVector<DominatorTree::UpdateType, 8> Updates;
212
213 if (IndirectBrs.size() == 1) {
25
Assuming the condition is false
26
Taking false branch
214 // If we only have one indirectbr, we can just directly replace it within
215 // its block.
216 IndirectBrInst *IBr = IndirectBrs[0];
217 SwitchBB = IBr->getParent();
218 SwitchValue = GetSwitchValue(IBr);
219 if (DTU) {
220 Updates.reserve(IndirectBrSuccs.size());
221 for (BasicBlock *SuccBB : IBr->successors())
222 Updates.push_back({DominatorTree::Delete, IBr->getParent(), SuccBB});
223 assert(Updates.size() == IndirectBrSuccs.size() &&(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 224, __extension__
__PRETTY_FUNCTION__))
224 "Got unexpected update count.")(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "llvm/lib/CodeGen/IndirectBrExpandPass.cpp", 224, __extension__
__PRETTY_FUNCTION__))
;
225 }
226 IBr->eraseFromParent();
227 } else {
228 // Otherwise we need to create a new block to hold the switch across BBs,
229 // jump to that block instead of each indirectbr, and phi together the
230 // values for the switch.
231 SwitchBB = BasicBlock::Create(F.getContext(), "switch_bb", &F);
232 auto *SwitchPN = PHINode::Create(CommonITy, IndirectBrs.size(),
27
Passing null pointer value via 1st parameter 'Ty'
28
Calling 'PHINode::Create'
233 "switch_value_phi", SwitchBB);
234 SwitchValue = SwitchPN;
235
236 // Now replace the indirectbr instructions with direct branches to the
237 // switch block and fill out the PHI operands.
238 if (DTU)
239 Updates.reserve(IndirectBrs.size() + 2 * IndirectBrSuccs.size());
240 for (auto *IBr : IndirectBrs) {
241 SwitchPN->addIncoming(GetSwitchValue(IBr), IBr->getParent());
242 BranchInst::Create(SwitchBB, IBr);
243 if (DTU) {
244 Updates.push_back({DominatorTree::Insert, IBr->getParent(), SwitchBB});
245 for (BasicBlock *SuccBB : IBr->successors())
246 Updates.push_back({DominatorTree::Delete, IBr->getParent(), SuccBB});
247 }
248 IBr->eraseFromParent();
249 }
250 }
251
252 // Now build the switch in the block. The block will have no terminator
253 // already.
254 auto *SI = SwitchInst::Create(SwitchValue, BBs[0], BBs.size(), SwitchBB);
255
256 // Add a case for each block.
257 for (int i : llvm::seq<int>(1, BBs.size()))
258 SI->addCase(ConstantInt::get(CommonITy, i + 1), BBs[i]);
259
260 if (DTU) {
261 // If there were multiple indirectbr's, they may have common successors,
262 // but in the dominator tree, we only track unique edges.
263 SmallPtrSet<BasicBlock *, 8> UniqueSuccessors;
264 Updates.reserve(Updates.size() + BBs.size());
265 for (BasicBlock *BB : BBs) {
266 if (UniqueSuccessors.insert(BB).second)
267 Updates.push_back({DominatorTree::Insert, SwitchBB, BB});
268 }
269 DTU->applyUpdates(Updates);
270 }
271
272 return true;
273}

/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/llvm/include/llvm/PassAnalysisSupport.h

1//===- llvm/PassAnalysisSupport.h - Analysis Pass Support code --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines stuff that is used to define and "use" Analysis Passes.
10// This file is automatically #included by Pass.h, so:
11//
12// NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY
13//
14// Instead, #include Pass.h
15//
16//===----------------------------------------------------------------------===//
17
18#if !defined(LLVM_PASS_H) || defined(LLVM_PASSANALYSISSUPPORT_H)
19#error "Do not include <PassAnalysisSupport.h>; include <Pass.h> instead"
20#endif
21
22#ifndef LLVM_PASSANALYSISSUPPORT_H
23#define LLVM_PASSANALYSISSUPPORT_H
24
25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/SmallVector.h"
27#include <cassert>
28#include <tuple>
29#include <utility>
30#include <vector>
31
32namespace llvm {
33
34class Function;
35class Pass;
36class PMDataManager;
37class StringRef;
38
39//===----------------------------------------------------------------------===//
40/// Represent the analysis usage information of a pass. This tracks analyses
41/// that the pass REQUIRES (must be available when the pass runs), REQUIRES
42/// TRANSITIVE (must be available throughout the lifetime of the pass), and
43/// analyses that the pass PRESERVES (the pass does not invalidate the results
44/// of these analyses). This information is provided by a pass to the Pass
45/// infrastructure through the getAnalysisUsage virtual function.
46///
47class AnalysisUsage {
48public:
49 using VectorType = SmallVectorImpl<AnalysisID>;
50
51private:
52 /// Sets of analyses required and preserved by a pass
53 // TODO: It's not clear that SmallVector is an appropriate data structure for
54 // this usecase. The sizes were picked to minimize wasted space, but are
55 // otherwise fairly meaningless.
56 SmallVector<AnalysisID, 8> Required;
57 SmallVector<AnalysisID, 2> RequiredTransitive;
58 SmallVector<AnalysisID, 2> Preserved;
59 SmallVector<AnalysisID, 0> Used;
60 bool PreservesAll = false;
61
62 void pushUnique(VectorType &Set, AnalysisID ID) {
63 if (!llvm::is_contained(Set, ID))
64 Set.push_back(ID);
65 }
66
67public:
68 AnalysisUsage() = default;
69
70 ///@{
71 /// Add the specified ID to the required set of the usage info for a pass.
72 AnalysisUsage &addRequiredID(const void *ID);
73 AnalysisUsage &addRequiredID(char &ID);
74 template<class PassClass>
75 AnalysisUsage &addRequired() {
76 return addRequiredID(PassClass::ID);
77 }
78
79 AnalysisUsage &addRequiredTransitiveID(char &ID);
80 template<class PassClass>
81 AnalysisUsage &addRequiredTransitive() {
82 return addRequiredTransitiveID(PassClass::ID);
83 }
84 ///@}
85
86 ///@{
87 /// Add the specified ID to the set of analyses preserved by this pass.
88 AnalysisUsage &addPreservedID(const void *ID) {
89 pushUnique(Preserved, ID);
90 return *this;
91 }
92 AnalysisUsage &addPreservedID(char &ID) {
93 pushUnique(Preserved, &ID);
94 return *this;
95 }
96 /// Add the specified Pass class to the set of analyses preserved by this pass.
97 template<class PassClass>
98 AnalysisUsage &addPreserved() {
99 pushUnique(Preserved, &PassClass::ID);
100 return *this;
101 }
102 ///@}
103
104 ///@{
105 /// Add the specified ID to the set of analyses used by this pass if they are
106 /// available..
107 AnalysisUsage &addUsedIfAvailableID(const void *ID) {
108 pushUnique(Used, ID);
109 return *this;
110 }
111 AnalysisUsage &addUsedIfAvailableID(char &ID) {
112 pushUnique(Used, &ID);
113 return *this;
114 }
115 /// Add the specified Pass class to the set of analyses used by this pass.
116 template<class PassClass>
117 AnalysisUsage &addUsedIfAvailable() {
118 pushUnique(Used, &PassClass::ID);
119 return *this;
120 }
121 ///@}
122
123 /// Add the Pass with the specified argument string to the set of analyses
124 /// preserved by this pass. If no such Pass exists, do nothing. This can be
125 /// useful when a pass is trivially preserved, but may not be linked in. Be
126 /// careful about spelling!
127 AnalysisUsage &addPreserved(StringRef Arg);
128
129 /// Set by analyses that do not transform their input at all
130 void setPreservesAll() { PreservesAll = true; }
131
132 /// Determine whether a pass said it does not transform its input at all
133 bool getPreservesAll() const { return PreservesAll; }
134
135 /// This function should be called by the pass, iff they do not:
136 ///
137 /// 1. Add or remove basic blocks from the function
138 /// 2. Modify terminator instructions in any way.
139 ///
140 /// This function annotates the AnalysisUsage info object to say that analyses
141 /// that only depend on the CFG are preserved by this pass.
142 void setPreservesCFG();
143
144 const VectorType &getRequiredSet() const { return Required; }
145 const VectorType &getRequiredTransitiveSet() const {
146 return RequiredTransitive;
147 }
148 const VectorType &getPreservedSet() const { return Preserved; }
149 const VectorType &getUsedSet() const { return Used; }
150};
151
152//===----------------------------------------------------------------------===//
153/// AnalysisResolver - Simple interface used by Pass objects to pull all
154/// analysis information out of pass manager that is responsible to manage
155/// the pass.
156///
157class AnalysisResolver {
158public:
159 AnalysisResolver() = delete;
160 explicit AnalysisResolver(PMDataManager &P) : PM(P) {}
161
162 PMDataManager &getPMDataManager() { return PM; }
163
164 /// Find pass that is implementing PI.
165 Pass *findImplPass(AnalysisID PI) {
166 Pass *ResultPass = nullptr;
167 for (const auto &AnalysisImpl : AnalysisImpls) {
168 if (AnalysisImpl.first == PI) {
169 ResultPass = AnalysisImpl.second;
170 break;
171 }
172 }
173 return ResultPass;
174 }
175
176 /// Find pass that is implementing PI. Initialize pass for Function F.
177 std::tuple<Pass *, bool> findImplPass(Pass *P, AnalysisID PI, Function &F);
178
179 void addAnalysisImplsPair(AnalysisID PI, Pass *P) {
180 if (findImplPass(PI) == P)
181 return;
182 std::pair<AnalysisID, Pass*> pir = std::make_pair(PI,P);
183 AnalysisImpls.push_back(pir);
184 }
185
186 /// Clear cache that is used to connect a pass to the analysis (PassInfo).
187 void clearAnalysisImpls() {
188 AnalysisImpls.clear();
189 }
190
191 /// Return analysis result or null if it doesn't exist.
192 Pass *getAnalysisIfAvailable(AnalysisID ID) const;
193
194private:
195 /// This keeps track of which passes implements the interfaces that are
196 /// required by the current pass (to implement getAnalysis()).
197 std::vector<std::pair<AnalysisID, Pass *>> AnalysisImpls;
198
199 /// PassManager that is used to resolve analysis info
200 PMDataManager &PM;
201};
202
203/// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
204/// get analysis information that might be around, for example to update it.
205/// This is different than getAnalysis in that it can fail (if the analysis
206/// results haven't been computed), so should only be used if you can handle
207/// the case when the analysis is not available. This method is often used by
208/// transformation APIs to update analysis results for a pass automatically as
209/// the transform is performed.
210template<typename AnalysisType>
211AnalysisType *Pass::getAnalysisIfAvailable() const {
212 assert(Resolver && "Pass not resident in a PassManager object!")(static_cast <bool> (Resolver && "Pass not resident in a PassManager object!"
) ? void (0) : __assert_fail ("Resolver && \"Pass not resident in a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 212, __extension__
__PRETTY_FUNCTION__))
;
2
Assuming field 'Resolver' is non-null
3
'?' condition is true
213
214 const void *PI = &AnalysisType::ID;
215
216 Pass *ResultPass = Resolver->getAnalysisIfAvailable(PI);
217 if (!ResultPass) return nullptr;
4
Assuming 'ResultPass' is non-null
5
Taking false branch
218
219 // Because the AnalysisType may not be a subclass of pass (for
220 // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
221 // adjust the return pointer (because the class may multiply inherit, once
222 // from pass, once from AnalysisType).
223 return (AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
6
Returning pointer, which participates in a condition later
224}
225
226/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
227/// to the analysis information that they claim to use by overriding the
228/// getAnalysisUsage function.
229template<typename AnalysisType>
230AnalysisType &Pass::getAnalysis() const {
231 assert(Resolver && "Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver && "Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver && \"Pass has not been inserted into a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 231, __extension__
__PRETTY_FUNCTION__))
;
232 return getAnalysisID<AnalysisType>(&AnalysisType::ID);
233}
234
235template<typename AnalysisType>
236AnalysisType &Pass::getAnalysisID(AnalysisID PI) const {
237 assert(PI && "getAnalysis for unregistered pass!")(static_cast <bool> (PI && "getAnalysis for unregistered pass!"
) ? void (0) : __assert_fail ("PI && \"getAnalysis for unregistered pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 237, __extension__
__PRETTY_FUNCTION__))
;
238 assert(Resolver&&"Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver&&"Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver&&\"Pass has not been inserted into a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 238, __extension__
__PRETTY_FUNCTION__))
;
239 // PI *must* appear in AnalysisImpls. Because the number of passes used
240 // should be a small number, we just do a linear search over a (dense)
241 // vector.
242 Pass *ResultPass = Resolver->findImplPass(PI);
243 assert(ResultPass &&(static_cast <bool> (ResultPass && "getAnalysis*() called on an analysis that was not "
"'required' by pass!") ? void (0) : __assert_fail ("ResultPass && \"getAnalysis*() called on an analysis that was not \" \"'required' by pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 245, __extension__
__PRETTY_FUNCTION__))
244 "getAnalysis*() called on an analysis that was not "(static_cast <bool> (ResultPass && "getAnalysis*() called on an analysis that was not "
"'required' by pass!") ? void (0) : __assert_fail ("ResultPass && \"getAnalysis*() called on an analysis that was not \" \"'required' by pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 245, __extension__
__PRETTY_FUNCTION__))
245 "'required' by pass!")(static_cast <bool> (ResultPass && "getAnalysis*() called on an analysis that was not "
"'required' by pass!") ? void (0) : __assert_fail ("ResultPass && \"getAnalysis*() called on an analysis that was not \" \"'required' by pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 245, __extension__
__PRETTY_FUNCTION__))
;
246
247 // Because the AnalysisType may not be a subclass of pass (for
248 // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
249 // adjust the return pointer (because the class may multiply inherit, once
250 // from pass, once from AnalysisType).
251 return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
252}
253
254/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
255/// to the analysis information that they claim to use by overriding the
256/// getAnalysisUsage function. If as part of the dependencies, an IR
257/// transformation is triggered (e.g. because the analysis requires
258/// BreakCriticalEdges), and Changed is non null, *Changed is updated.
259template <typename AnalysisType>
260AnalysisType &Pass::getAnalysis(Function &F, bool *Changed) {
261 assert(Resolver &&"Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver &&"Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver &&\"Pass has not been inserted into a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 261, __extension__
__PRETTY_FUNCTION__))
;
262
263 return getAnalysisID<AnalysisType>(&AnalysisType::ID, F, Changed);
264}
265
266template <typename AnalysisType>
267AnalysisType &Pass::getAnalysisID(AnalysisID PI, Function &F, bool *Changed) {
268 assert(PI && "getAnalysis for unregistered pass!")(static_cast <bool> (PI && "getAnalysis for unregistered pass!"
) ? void (0) : __assert_fail ("PI && \"getAnalysis for unregistered pass!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 268, __extension__
__PRETTY_FUNCTION__))
;
269 assert(Resolver && "Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver && "Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver && \"Pass has not been inserted into a PassManager object!\""
, "llvm/include/llvm/PassAnalysisSupport.h", 269, __extension__
__PRETTY_FUNCTION__))
;
270 // PI *must* appear in AnalysisImpls. Because the number of passes used
271 // should be a small number, we just do a linear search over a (dense)
272 // vector.
273 Pass *ResultPass;
274 bool LocalChanged;
275 std::tie(ResultPass, LocalChanged) = Resolver->findImplPass(this, PI, F);
276
277 assert(ResultPass && "Unable to find requested analysis info")(static_cast <bool> (ResultPass && "Unable to find requested analysis info"
) ? void (0) : __assert_fail ("ResultPass && \"Unable to find requested analysis info\""
, "llvm/include/llvm/PassAnalysisSupport.h", 277, __extension__
__PRETTY_FUNCTION__))
;
278 if (Changed)
279 *Changed |= LocalChanged;
280 else
281 assert(!LocalChanged &&(static_cast <bool> (!LocalChanged && "A pass trigged a code update but the update status is lost"
) ? void (0) : __assert_fail ("!LocalChanged && \"A pass trigged a code update but the update status is lost\""
, "llvm/include/llvm/PassAnalysisSupport.h", 282, __extension__
__PRETTY_FUNCTION__))
282 "A pass trigged a code update but the update status is lost")(static_cast <bool> (!LocalChanged && "A pass trigged a code update but the update status is lost"
) ? void (0) : __assert_fail ("!LocalChanged && \"A pass trigged a code update but the update status is lost\""
, "llvm/include/llvm/PassAnalysisSupport.h", 282, __extension__
__PRETTY_FUNCTION__))
;
283
284 // Because the AnalysisType may not be a subclass of pass (for
285 // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
286 // adjust the return pointer (because the class may multiply inherit, once
287 // from pass, once from AnalysisType).
288 return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
289}
290
291} // end namespace llvm
292
293#endif // LLVM_PASSANALYSISSUPPORT_H

/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/Support/Compiler.h"
17#include "llvm/Support/type_traits.h"
18#include <algorithm>
19#include <cassert>
20#include <cstddef>
21#include <cstdlib>
22#include <cstring>
23#include <functional>
24#include <initializer_list>
25#include <iterator>
26#include <limits>
27#include <memory>
28#include <new>
29#include <type_traits>
30#include <utility>
31
32namespace llvm {
33
34template <typename IteratorT> class iterator_range;
35
36/// This is all the stuff common to all SmallVectors.
37///
38/// The template parameter specifies the type which should be used to hold the
39/// Size and Capacity of the SmallVector, so it can be adjusted.
40/// Using 32 bit size is desirable to shrink the size of the SmallVector.
41/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
42/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
43/// buffering bitcode output - which can exceed 4GB.
44template <class Size_T> class SmallVectorBase {
45protected:
46 void *BeginX;
47 Size_T Size = 0, Capacity;
48
49 /// The maximum value of the Size_T used.
50 static constexpr size_t SizeTypeMax() {
51 return std::numeric_limits<Size_T>::max();
52 }
53
54 SmallVectorBase() = delete;
55 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
56 : BeginX(FirstEl), Capacity(TotalCapacity) {}
57
58 /// This is a helper for \a grow() that's out of line to reduce code
59 /// duplication. This function will report a fatal error if it can't grow at
60 /// least to \p MinSize.
61 void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity);
62
63 /// This is an implementation of the grow() method which only works
64 /// on POD-like data types and is out of line to reduce code duplication.
65 /// This function will report a fatal error if it cannot increase capacity.
66 void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
67
68public:
69 size_t size() const { return Size; }
70 size_t capacity() const { return Capacity; }
71
72 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
14
Assuming field 'Size' is not equal to 0
15
Returning zero, which participates in a condition later
19
Assuming field 'Size' is not equal to 0
20
Returning zero, which participates in a condition later
73
74protected:
75 /// Set the array size to \p N, which the current array must have enough
76 /// capacity for.
77 ///
78 /// This does not construct or destroy any elements in the vector.
79 void set_size(size_t N) {
80 assert(N <= capacity())(static_cast <bool> (N <= capacity()) ? void (0) : __assert_fail
("N <= capacity()", "llvm/include/llvm/ADT/SmallVector.h"
, 80, __extension__ __PRETTY_FUNCTION__))
;
81 Size = N;
82 }
83};
84
85template <class T>
86using SmallVectorSizeType =
87 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
88 uint32_t>::type;
89
90/// Figure out the offset of the first element.
91template <class T, typename = void> struct SmallVectorAlignmentAndSize {
92 alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
93 SmallVectorBase<SmallVectorSizeType<T>>)];
94 alignas(T) char FirstEl[sizeof(T)];
95};
96
97/// This is the part of SmallVectorTemplateBase which does not depend on whether
98/// the type T is a POD. The extra dummy template argument is used by ArrayRef
99/// to avoid unnecessarily requiring T to be complete.
100template <typename T, typename = void>
101class SmallVectorTemplateCommon
102 : public SmallVectorBase<SmallVectorSizeType<T>> {
103 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
104
105 /// Find the address of the first element. For this pointer math to be valid
106 /// with small-size of 0 for T with lots of alignment, it's important that
107 /// SmallVectorStorage is properly-aligned even for small-size of 0.
108 void *getFirstEl() const {
109 return const_cast<void *>(reinterpret_cast<const void *>(
110 reinterpret_cast<const char *>(this) +
111 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
112 }
113 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
114
115protected:
116 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
117
118 void grow_pod(size_t MinSize, size_t TSize) {
119 Base::grow_pod(getFirstEl(), MinSize, TSize);
120 }
121
122 /// Return true if this is a smallvector which has not had dynamic
123 /// memory allocated for it.
124 bool isSmall() const { return this->BeginX == getFirstEl(); }
125
126 /// Put this vector in a state of being small.
127 void resetToSmall() {
128 this->BeginX = getFirstEl();
129 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
130 }
131
132 /// Return true if V is an internal reference to the given range.
133 bool isReferenceToRange(const void *V, const void *First, const void *Last) const {
134 // Use std::less to avoid UB.
135 std::less<> LessThan;
136 return !LessThan(V, First) && LessThan(V, Last);
137 }
138
139 /// Return true if V is an internal reference to this vector.
140 bool isReferenceToStorage(const void *V) const {
141 return isReferenceToRange(V, this->begin(), this->end());
142 }
143
144 /// Return true if First and Last form a valid (possibly empty) range in this
145 /// vector's storage.
146 bool isRangeInStorage(const void *First, const void *Last) const {
147 // Use std::less to avoid UB.
148 std::less<> LessThan;
149 return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
150 !LessThan(this->end(), Last);
151 }
152
153 /// Return true unless Elt will be invalidated by resizing the vector to
154 /// NewSize.
155 bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
156 // Past the end.
157 if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true))
158 return true;
159
160 // Return false if Elt will be destroyed by shrinking.
161 if (NewSize <= this->size())
162 return Elt < this->begin() + NewSize;
163
164 // Return false if we need to grow.
165 return NewSize <= this->capacity();
166 }
167
168 /// Check whether Elt will be invalidated by resizing the vector to NewSize.
169 void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
170 assert(isSafeToReferenceAfterResize(Elt, NewSize) &&(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "llvm/include/llvm/ADT/SmallVector.h", 172, __extension__ __PRETTY_FUNCTION__
))
171 "Attempting to reference an element of the vector in an operation "(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "llvm/include/llvm/ADT/SmallVector.h", 172, __extension__ __PRETTY_FUNCTION__
))
172 "that invalidates it")(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "llvm/include/llvm/ADT/SmallVector.h", 172, __extension__ __PRETTY_FUNCTION__
))
;
173 }
174
175 /// Check whether Elt will be invalidated by increasing the size of the
176 /// vector by N.
177 void assertSafeToAdd(const void *Elt, size_t N = 1) {
178 this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
179 }
180
181 /// Check whether any part of the range will be invalidated by clearing.
182 void assertSafeToReferenceAfterClear(const T *From, const T *To) {
183 if (From == To)
184 return;
185 this->assertSafeToReferenceAfterResize(From, 0);
186 this->assertSafeToReferenceAfterResize(To - 1, 0);
187 }
188 template <
189 class ItTy,
190 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
191 bool> = false>
192 void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
193
194 /// Check whether any part of the range will be invalidated by growing.
195 void assertSafeToAddRange(const T *From, const T *To) {
196 if (From == To)
197 return;
198 this->assertSafeToAdd(From, To - From);
199 this->assertSafeToAdd(To - 1, To - From);
200 }
201 template <
202 class ItTy,
203 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
204 bool> = false>
205 void assertSafeToAddRange(ItTy, ItTy) {}
206
207 /// Reserve enough space to add one element, and return the updated element
208 /// pointer in case it was a reference to the storage.
209 template <class U>
210 static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
211 size_t N) {
212 size_t NewSize = This->size() + N;
213 if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true
)
)
214 return &Elt;
215
216 bool ReferencesStorage = false;
217 int64_t Index = -1;
218 if (!U::TakesParamByValue) {
219 if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt
)), false)
) {
220 ReferencesStorage = true;
221 Index = &Elt - This->begin();
222 }
223 }
224 This->grow(NewSize);
225 return ReferencesStorage ? This->begin() + Index : &Elt;
226 }
227
228public:
229 using size_type = size_t;
230 using difference_type = ptrdiff_t;
231 using value_type = T;
232 using iterator = T *;
233 using const_iterator = const T *;
234
235 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
236 using reverse_iterator = std::reverse_iterator<iterator>;
237
238 using reference = T &;
239 using const_reference = const T &;
240 using pointer = T *;
241 using const_pointer = const T *;
242
243 using Base::capacity;
244 using Base::empty;
245 using Base::size;
246
247 // forward iterator creation methods.
248 iterator begin() { return (iterator)this->BeginX; }
249 const_iterator begin() const { return (const_iterator)this->BeginX; }
250 iterator end() { return begin() + size(); }
251 const_iterator end() const { return begin() + size(); }
252
253 // reverse iterator creation methods.
254 reverse_iterator rbegin() { return reverse_iterator(end()); }
255 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
256 reverse_iterator rend() { return reverse_iterator(begin()); }
257 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
258
259 size_type size_in_bytes() const { return size() * sizeof(T); }
260 size_type max_size() const {
261 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
262 }
263
264 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
265
266 /// Return a pointer to the vector's buffer, even if empty().
267 pointer data() { return pointer(begin()); }
268 /// Return a pointer to the vector's buffer, even if empty().
269 const_pointer data() const { return const_pointer(begin()); }
270
271 reference operator[](size_type idx) {
272 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "llvm/include/llvm/ADT/SmallVector.h", 272
, __extension__ __PRETTY_FUNCTION__))
;
273 return begin()[idx];
274 }
275 const_reference operator[](size_type idx) const {
276 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "llvm/include/llvm/ADT/SmallVector.h", 276
, __extension__ __PRETTY_FUNCTION__))
;
277 return begin()[idx];
278 }
279
280 reference front() {
281 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 281, __extension__
__PRETTY_FUNCTION__))
;
282 return begin()[0];
283 }
284 const_reference front() const {
285 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 285, __extension__
__PRETTY_FUNCTION__))
;
286 return begin()[0];
287 }
288
289 reference back() {
290 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 290, __extension__
__PRETTY_FUNCTION__))
;
291 return end()[-1];
292 }
293 const_reference back() const {
294 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "llvm/include/llvm/ADT/SmallVector.h", 294, __extension__
__PRETTY_FUNCTION__))
;
295 return end()[-1];
296 }
297};
298
299/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
300/// method implementations that are designed to work with non-trivial T's.
301///
302/// We approximate is_trivially_copyable with trivial move/copy construction and
303/// trivial destruction. While the standard doesn't specify that you're allowed
304/// copy these types with memcpy, there is no way for the type to observe this.
305/// This catches the important case of std::pair<POD, POD>, which is not
306/// trivially assignable.
307template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
308 (is_trivially_move_constructible<T>::value) &&
309 std::is_trivially_destructible<T>::value>
310class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
311 friend class SmallVectorTemplateCommon<T>;
312
313protected:
314 static constexpr bool TakesParamByValue = false;
315 using ValueParamT = const T &;
316
317 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
318
319 static void destroy_range(T *S, T *E) {
320 while (S != E) {
321 --E;
322 E->~T();
323 }
324 }
325
326 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
327 /// constructing elements as needed.
328 template<typename It1, typename It2>
329 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
330 std::uninitialized_copy(std::make_move_iterator(I),
331 std::make_move_iterator(E), Dest);
332 }
333
334 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
335 /// constructing elements as needed.
336 template<typename It1, typename It2>
337 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
338 std::uninitialized_copy(I, E, Dest);
339 }
340
341 /// Grow the allocated memory (without initializing new elements), doubling
342 /// the size of the allocated memory. Guarantees space for at least one more
343 /// element, or MinSize more elements if specified.
344 void grow(size_t MinSize = 0);
345
346 /// Create a new allocation big enough for \p MinSize and pass back its size
347 /// in \p NewCapacity. This is the first section of \a grow().
348 T *mallocForGrow(size_t MinSize, size_t &NewCapacity) {
349 return static_cast<T *>(
350 SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow(
351 MinSize, sizeof(T), NewCapacity));
352 }
353
354 /// Move existing elements over to the new allocation \p NewElts, the middle
355 /// section of \a grow().
356 void moveElementsForGrow(T *NewElts);
357
358 /// Transfer ownership of the allocation, finishing up \a grow().
359 void takeAllocationForGrow(T *NewElts, size_t NewCapacity);
360
361 /// Reserve enough space to add one element, and return the updated element
362 /// pointer in case it was a reference to the storage.
363 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
364 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
365 }
366
367 /// Reserve enough space to add one element, and return the updated element
368 /// pointer in case it was a reference to the storage.
369 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
370 return const_cast<T *>(
371 this->reserveForParamAndGetAddressImpl(this, Elt, N));
372 }
373
374 static T &&forward_value_param(T &&V) { return std::move(V); }
375 static const T &forward_value_param(const T &V) { return V; }
376
377 void growAndAssign(size_t NumElts, const T &Elt) {
378 // Grow manually in case Elt is an internal reference.
379 size_t NewCapacity;
380 T *NewElts = mallocForGrow(NumElts, NewCapacity);
381 std::uninitialized_fill_n(NewElts, NumElts, Elt);
382 this->destroy_range(this->begin(), this->end());
383 takeAllocationForGrow(NewElts, NewCapacity);
384 this->set_size(NumElts);
385 }
386
387 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
388 // Grow manually in case one of Args is an internal reference.
389 size_t NewCapacity;
390 T *NewElts = mallocForGrow(0, NewCapacity);
391 ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
392 moveElementsForGrow(NewElts);
393 takeAllocationForGrow(NewElts, NewCapacity);
394 this->set_size(this->size() + 1);
395 return this->back();
396 }
397
398public:
399 void push_back(const T &Elt) {
400 const T *EltPtr = reserveForParamAndGetAddress(Elt);
401 ::new ((void *)this->end()) T(*EltPtr);
402 this->set_size(this->size() + 1);
403 }
404
405 void push_back(T &&Elt) {
406 T *EltPtr = reserveForParamAndGetAddress(Elt);
407 ::new ((void *)this->end()) T(::std::move(*EltPtr));
408 this->set_size(this->size() + 1);
409 }
410
411 void pop_back() {
412 this->set_size(this->size() - 1);
413 this->end()->~T();
414 }
415};
416
417// Define this out-of-line to dissuade the C++ compiler from inlining it.
418template <typename T, bool TriviallyCopyable>
419void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
420 size_t NewCapacity;
421 T *NewElts = mallocForGrow(MinSize, NewCapacity);
422 moveElementsForGrow(NewElts);
423 takeAllocationForGrow(NewElts, NewCapacity);
424}
425
426// Define this out-of-line to dissuade the C++ compiler from inlining it.
427template <typename T, bool TriviallyCopyable>
428void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow(
429 T *NewElts) {
430 // Move the elements over.
431 this->uninitialized_move(this->begin(), this->end(), NewElts);
432
433 // Destroy the original elements.
434 destroy_range(this->begin(), this->end());
435}
436
437// Define this out-of-line to dissuade the C++ compiler from inlining it.
438template <typename T, bool TriviallyCopyable>
439void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow(
440 T *NewElts, size_t NewCapacity) {
441 // If this wasn't grown from the inline copy, deallocate the old space.
442 if (!this->isSmall())
443 free(this->begin());
444
445 this->BeginX = NewElts;
446 this->Capacity = NewCapacity;
447}
448
449/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
450/// method implementations that are designed to work with trivially copyable
451/// T's. This allows using memcpy in place of copy/move construction and
452/// skipping destruction.
453template <typename T>
454class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
455 friend class SmallVectorTemplateCommon<T>;
456
457protected:
458 /// True if it's cheap enough to take parameters by value. Doing so avoids
459 /// overhead related to mitigations for reference invalidation.
460 static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *);
461
462 /// Either const T& or T, depending on whether it's cheap enough to take
463 /// parameters by value.
464 using ValueParamT =
465 typename std::conditional<TakesParamByValue, T, const T &>::type;
466
467 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
468
469 // No need to do a destroy loop for POD's.
470 static void destroy_range(T *, T *) {}
471
472 /// Move the range [I, E) onto the uninitialized memory
473 /// starting with "Dest", constructing elements into it as needed.
474 template<typename It1, typename It2>
475 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
476 // Just do a copy.
477 uninitialized_copy(I, E, Dest);
478 }
479
480 /// Copy the range [I, E) onto the uninitialized memory
481 /// starting with "Dest", constructing elements into it as needed.
482 template<typename It1, typename It2>
483 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
484 // Arbitrary iterator types; just use the basic implementation.
485 std::uninitialized_copy(I, E, Dest);
486 }
487
488 /// Copy the range [I, E) onto the uninitialized memory
489 /// starting with "Dest", constructing elements into it as needed.
490 template <typename T1, typename T2>
491 static void uninitialized_copy(
492 T1 *I, T1 *E, T2 *Dest,
493 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
494 T2>::value> * = nullptr) {
495 // Use memcpy for PODs iterated by pointers (which includes SmallVector
496 // iterators): std::uninitialized_copy optimizes to memmove, but we can
497 // use memcpy here. Note that I and E are iterators and thus might be
498 // invalid for memcpy if they are equal.
499 if (I != E)
500 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
501 }
502
503 /// Double the size of the allocated memory, guaranteeing space for at
504 /// least one more element or MinSize if specified.
505 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
506
507 /// Reserve enough space to add one element, and return the updated element
508 /// pointer in case it was a reference to the storage.
509 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
510 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
511 }
512
513 /// Reserve enough space to add one element, and return the updated element
514 /// pointer in case it was a reference to the storage.
515 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
516 return const_cast<T *>(
517 this->reserveForParamAndGetAddressImpl(this, Elt, N));
518 }
519
520 /// Copy \p V or return a reference, depending on \a ValueParamT.
521 static ValueParamT forward_value_param(ValueParamT V) { return V; }
522
523 void growAndAssign(size_t NumElts, T Elt) {
524 // Elt has been copied in case it's an internal reference, side-stepping
525 // reference invalidation problems without losing the realloc optimization.
526 this->set_size(0);
527 this->grow(NumElts);
528 std::uninitialized_fill_n(this->begin(), NumElts, Elt);
529 this->set_size(NumElts);
530 }
531
532 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
533 // Use push_back with a copy in case Args has an internal reference,
534 // side-stepping reference invalidation problems without losing the realloc
535 // optimization.
536 push_back(T(std::forward<ArgTypes>(Args)...));
537 return this->back();
538 }
539
540public:
541 void push_back(ValueParamT Elt) {
542 const T *EltPtr = reserveForParamAndGetAddress(Elt);
543 memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
544 this->set_size(this->size() + 1);
545 }
546
547 void pop_back() { this->set_size(this->size() - 1); }
548};
549
550/// This class consists of common code factored out of the SmallVector class to
551/// reduce code duplication based on the SmallVector 'N' template parameter.
552template <typename T>
553class SmallVectorImpl : public SmallVectorTemplateBase<T> {
554 using SuperClass = SmallVectorTemplateBase<T>;
555
556public:
557 using iterator = typename SuperClass::iterator;
558 using const_iterator = typename SuperClass::const_iterator;
559 using reference = typename SuperClass::reference;
560 using size_type = typename SuperClass::size_type;
561
562protected:
563 using SmallVectorTemplateBase<T>::TakesParamByValue;
564 using ValueParamT = typename SuperClass::ValueParamT;
565
566 // Default ctor - Initialize to empty.
567 explicit SmallVectorImpl(unsigned N)
568 : SmallVectorTemplateBase<T>(N) {}
569
570public:
571 SmallVectorImpl(const SmallVectorImpl &) = delete;
572
573 ~SmallVectorImpl() {
574 // Subclass has already destructed this vector's elements.
575 // If this wasn't grown from the inline copy, deallocate the old space.
576 if (!this->isSmall())
577 free(this->begin());
578 }
579
580 void clear() {
581 this->destroy_range(this->begin(), this->end());
582 this->Size = 0;
583 }
584
585private:
586 // Make set_size() private to avoid misuse in subclasses.
587 using SuperClass::set_size;
588
589 template <bool ForOverwrite> void resizeImpl(size_type N) {
590 if (N == this->size())
591 return;
592
593 if (N < this->size()) {
594 this->truncate(N);
595 return;
596 }
597
598 this->reserve(N);
599 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
600 if (ForOverwrite)
601 new (&*I) T;
602 else
603 new (&*I) T();
604 this->set_size(N);
605 }
606
607public:
608 void resize(size_type N) { resizeImpl<false>(N); }
609
610 /// Like resize, but \ref T is POD, the new values won't be initialized.
611 void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }
612
613 /// Like resize, but requires that \p N is less than \a size().
614 void truncate(size_type N) {
615 assert(this->size() >= N && "Cannot increase size with truncate")(static_cast <bool> (this->size() >= N &&
"Cannot increase size with truncate") ? void (0) : __assert_fail
("this->size() >= N && \"Cannot increase size with truncate\""
, "llvm/include/llvm/ADT/SmallVector.h", 615, __extension__ __PRETTY_FUNCTION__
))
;
616 this->destroy_range(this->begin() + N, this->end());
617 this->set_size(N);
618 }
619
620 void resize(size_type N, ValueParamT NV) {
621 if (N == this->size())
622 return;
623
624 if (N < this->size()) {
625 this->truncate(N);
626 return;
627 }
628
629 // N > this->size(). Defer to append.
630 this->append(N - this->size(), NV);
631 }
632
633 void reserve(size_type N) {
634 if (this->capacity() < N)
635 this->grow(N);
636 }
637
638 void pop_back_n(size_type NumItems) {
639 assert(this->size() >= NumItems)(static_cast <bool> (this->size() >= NumItems) ? void
(0) : __assert_fail ("this->size() >= NumItems", "llvm/include/llvm/ADT/SmallVector.h"
, 639, __extension__ __PRETTY_FUNCTION__))
;
640 truncate(this->size() - NumItems);
641 }
642
643 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
644 T Result = ::std::move(this->back());
645 this->pop_back();
646 return Result;
647 }
648
649 void swap(SmallVectorImpl &RHS);
650
651 /// Add the specified range to the end of the SmallVector.
652 template <typename in_iter,
653 typename = std::enable_if_t<std::is_convertible<
654 typename std::iterator_traits<in_iter>::iterator_category,
655 std::input_iterator_tag>::value>>
656 void append(in_iter in_start, in_iter in_end) {
657 this->assertSafeToAddRange(in_start, in_end);
658 size_type NumInputs = std::distance(in_start, in_end);
659 this->reserve(this->size() + NumInputs);
660 this->uninitialized_copy(in_start, in_end, this->end());
661 this->set_size(this->size() + NumInputs);
662 }
663
664 /// Append \p NumInputs copies of \p Elt to the end.
665 void append(size_type NumInputs, ValueParamT Elt) {
666 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
667 std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
668 this->set_size(this->size() + NumInputs);
669 }
670
671 void append(std::initializer_list<T> IL) {
672 append(IL.begin(), IL.end());
673 }
674
675 void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); }
676
677 void assign(size_type NumElts, ValueParamT Elt) {
678 // Note that Elt could be an internal reference.
679 if (NumElts > this->capacity()) {
680 this->growAndAssign(NumElts, Elt);
681 return;
682 }
683
684 // Assign over existing elements.
685 std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
686 if (NumElts > this->size())
687 std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
688 else if (NumElts < this->size())
689 this->destroy_range(this->begin() + NumElts, this->end());
690 this->set_size(NumElts);
691 }
692
693 // FIXME: Consider assigning over existing elements, rather than clearing &
694 // re-initializing them - for all assign(...) variants.
695
696 template <typename in_iter,
697 typename = std::enable_if_t<std::is_convertible<
698 typename std::iterator_traits<in_iter>::iterator_category,
699 std::input_iterator_tag>::value>>
700 void assign(in_iter in_start, in_iter in_end) {
701 this->assertSafeToReferenceAfterClear(in_start, in_end);
702 clear();
703 append(in_start, in_end);
704 }
705
706 void assign(std::initializer_list<T> IL) {
707 clear();
708 append(IL);
709 }
710
711 void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); }
712
713 iterator erase(const_iterator CI) {
714 // Just cast away constness because this is a non-const member function.
715 iterator I = const_cast<iterator>(CI);
716
717 assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(CI) &&
"Iterator to erase is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(CI) && \"Iterator to erase is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 717, __extension__ __PRETTY_FUNCTION__
))
;
718
719 iterator N = I;
720 // Shift all elts down one.
721 std::move(I+1, this->end(), I);
722 // Drop the last elt.
723 this->pop_back();
724 return(N);
725 }
726
727 iterator erase(const_iterator CS, const_iterator CE) {
728 // Just cast away constness because this is a non-const member function.
729 iterator S = const_cast<iterator>(CS);
730 iterator E = const_cast<iterator>(CE);
731
732 assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")(static_cast <bool> (this->isRangeInStorage(S, E) &&
"Range to erase is out of bounds.") ? void (0) : __assert_fail
("this->isRangeInStorage(S, E) && \"Range to erase is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 732, __extension__ __PRETTY_FUNCTION__
))
;
733
734 iterator N = S;
735 // Shift all elts down.
736 iterator I = std::move(E, this->end(), S);
737 // Drop the last elts.
738 this->destroy_range(I, this->end());
739 this->set_size(I - this->begin());
740 return(N);
741 }
742
743private:
744 template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
745 // Callers ensure that ArgType is derived from T.
746 static_assert(
747 std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
748 T>::value,
749 "ArgType must be derived from T!");
750
751 if (I == this->end()) { // Important special case for empty vector.
752 this->push_back(::std::forward<ArgType>(Elt));
753 return this->end()-1;
754 }
755
756 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 756, __extension__ __PRETTY_FUNCTION__
))
;
757
758 // Grow if necessary.
759 size_t Index = I - this->begin();
760 std::remove_reference_t<ArgType> *EltPtr =
761 this->reserveForParamAndGetAddress(Elt);
762 I = this->begin() + Index;
763
764 ::new ((void*) this->end()) T(::std::move(this->back()));
765 // Push everything else over.
766 std::move_backward(I, this->end()-1, this->end());
767 this->set_size(this->size() + 1);
768
769 // If we just moved the element we're inserting, be sure to update
770 // the reference (never happens if TakesParamByValue).
771 static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value,
772 "ArgType must be 'T' when taking by value!");
773 if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
774 ++EltPtr;
775
776 *I = ::std::forward<ArgType>(*EltPtr);
777 return I;
778 }
779
780public:
781 iterator insert(iterator I, T &&Elt) {
782 return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
783 }
784
785 iterator insert(iterator I, const T &Elt) {
786 return insert_one_impl(I, this->forward_value_param(Elt));
787 }
788
789 iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
790 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
791 size_t InsertElt = I - this->begin();
792
793 if (I == this->end()) { // Important special case for empty vector.
794 append(NumToInsert, Elt);
795 return this->begin()+InsertElt;
796 }
797
798 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 798, __extension__ __PRETTY_FUNCTION__
))
;
799
800 // Ensure there is enough space, and get the (maybe updated) address of
801 // Elt.
802 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
803
804 // Uninvalidate the iterator.
805 I = this->begin()+InsertElt;
806
807 // If there are more elements between the insertion point and the end of the
808 // range than there are being inserted, we can use a simple approach to
809 // insertion. Since we already reserved space, we know that this won't
810 // reallocate the vector.
811 if (size_t(this->end()-I) >= NumToInsert) {
812 T *OldEnd = this->end();
813 append(std::move_iterator<iterator>(this->end() - NumToInsert),
814 std::move_iterator<iterator>(this->end()));
815
816 // Copy the existing elements that get replaced.
817 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
818
819 // If we just moved the element we're inserting, be sure to update
820 // the reference (never happens if TakesParamByValue).
821 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
822 EltPtr += NumToInsert;
823
824 std::fill_n(I, NumToInsert, *EltPtr);
825 return I;
826 }
827
828 // Otherwise, we're inserting more elements than exist already, and we're
829 // not inserting at the end.
830
831 // Move over the elements that we're about to overwrite.
832 T *OldEnd = this->end();
833 this->set_size(this->size() + NumToInsert);
834 size_t NumOverwritten = OldEnd-I;
835 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
836
837 // If we just moved the element we're inserting, be sure to update
838 // the reference (never happens if TakesParamByValue).
839 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
840 EltPtr += NumToInsert;
841
842 // Replace the overwritten part.
843 std::fill_n(I, NumOverwritten, *EltPtr);
844
845 // Insert the non-overwritten middle part.
846 std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
847 return I;
848 }
849
850 template <typename ItTy,
851 typename = std::enable_if_t<std::is_convertible<
852 typename std::iterator_traits<ItTy>::iterator_category,
853 std::input_iterator_tag>::value>>
854 iterator insert(iterator I, ItTy From, ItTy To) {
855 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
856 size_t InsertElt = I - this->begin();
857
858 if (I == this->end()) { // Important special case for empty vector.
859 append(From, To);
860 return this->begin()+InsertElt;
861 }
862
863 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "llvm/include/llvm/ADT/SmallVector.h", 863, __extension__ __PRETTY_FUNCTION__
))
;
864
865 // Check that the reserve that follows doesn't invalidate the iterators.
866 this->assertSafeToAddRange(From, To);
867
868 size_t NumToInsert = std::distance(From, To);
869
870 // Ensure there is enough space.
871 reserve(this->size() + NumToInsert);
872
873 // Uninvalidate the iterator.
874 I = this->begin()+InsertElt;
875
876 // If there are more elements between the insertion point and the end of the
877 // range than there are being inserted, we can use a simple approach to
878 // insertion. Since we already reserved space, we know that this won't
879 // reallocate the vector.
880 if (size_t(this->end()-I) >= NumToInsert) {
881 T *OldEnd = this->end();
882 append(std::move_iterator<iterator>(this->end() - NumToInsert),
883 std::move_iterator<iterator>(this->end()));
884
885 // Copy the existing elements that get replaced.
886 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
887
888 std::copy(From, To, I);
889 return I;
890 }
891
892 // Otherwise, we're inserting more elements than exist already, and we're
893 // not inserting at the end.
894
895 // Move over the elements that we're about to overwrite.
896 T *OldEnd = this->end();
897 this->set_size(this->size() + NumToInsert);
898 size_t NumOverwritten = OldEnd-I;
899 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
900
901 // Replace the overwritten part.
902 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
903 *J = *From;
904 ++J; ++From;
905 }
906
907 // Insert the non-overwritten middle part.
908 this->uninitialized_copy(From, To, OldEnd);
909 return I;
910 }
911
912 void insert(iterator I, std::initializer_list<T> IL) {
913 insert(I, IL.begin(), IL.end());
914 }
915
916 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
917 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
918 return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...);
919
920 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
921 this->set_size(this->size() + 1);
922 return this->back();
923 }
924
925 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
926
927 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
928
929 bool operator==(const SmallVectorImpl &RHS) const {
930 if (this->size() != RHS.size()) return false;
931 return std::equal(this->begin(), this->end(), RHS.begin());
932 }
933 bool operator!=(const SmallVectorImpl &RHS) const {
934 return !(*this == RHS);
935 }
936
937 bool operator<(const SmallVectorImpl &RHS) const {
938 return std::lexicographical_compare(this->begin(), this->end(),
939 RHS.begin(), RHS.end());
940 }
941};
942
943template <typename T>
944void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
945 if (this == &RHS) return;
946
947 // We can only avoid copying elements if neither vector is small.
948 if (!this->isSmall() && !RHS.isSmall()) {
949 std::swap(this->BeginX, RHS.BeginX);
950 std::swap(this->Size, RHS.Size);
951 std::swap(this->Capacity, RHS.Capacity);
952 return;
953 }
954 this->reserve(RHS.size());
955 RHS.reserve(this->size());
956
957 // Swap the shared elements.
958 size_t NumShared = this->size();
959 if (NumShared > RHS.size()) NumShared = RHS.size();
960 for (size_type i = 0; i != NumShared; ++i)
961 std::swap((*this)[i], RHS[i]);
962
963 // Copy over the extra elts.
964 if (this->size() > RHS.size()) {
965 size_t EltDiff = this->size() - RHS.size();
966 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
967 RHS.set_size(RHS.size() + EltDiff);
968 this->destroy_range(this->begin()+NumShared, this->end());
969 this->set_size(NumShared);
970 } else if (RHS.size() > this->size()) {
971 size_t EltDiff = RHS.size() - this->size();
972 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
973 this->set_size(this->size() + EltDiff);
974 this->destroy_range(RHS.begin()+NumShared, RHS.end());
975 RHS.set_size(NumShared);
976 }
977}
978
979template <typename T>
980SmallVectorImpl<T> &SmallVectorImpl<T>::
981 operator=(const SmallVectorImpl<T> &RHS) {
982 // Avoid self-assignment.
983 if (this == &RHS) return *this;
984
985 // If we already have sufficient space, assign the common elements, then
986 // destroy any excess.
987 size_t RHSSize = RHS.size();
988 size_t CurSize = this->size();
989 if (CurSize >= RHSSize) {
990 // Assign common elements.
991 iterator NewEnd;
992 if (RHSSize)
993 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
994 else
995 NewEnd = this->begin();
996
997 // Destroy excess elements.
998 this->destroy_range(NewEnd, this->end());
999
1000 // Trim.
1001 this->set_size(RHSSize);
1002 return *this;
1003 }
1004
1005 // If we have to grow to have enough elements, destroy the current elements.
1006 // This allows us to avoid copying them during the grow.
1007 // FIXME: don't do this if they're efficiently moveable.
1008 if (this->capacity() < RHSSize) {
1009 // Destroy current elements.
1010 this->clear();
1011 CurSize = 0;
1012 this->grow(RHSSize);
1013 } else if (CurSize) {
1014 // Otherwise, use assignment for the already-constructed elements.
1015 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
1016 }
1017
1018 // Copy construct the new elements in place.
1019 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
1020 this->begin()+CurSize);
1021
1022 // Set end.
1023 this->set_size(RHSSize);
1024 return *this;
1025}
1026
1027template <typename T>
1028SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
1029 // Avoid self-assignment.
1030 if (this == &RHS) return *this;
1031
1032 // If the RHS isn't small, clear this vector and then steal its buffer.
1033 if (!RHS.isSmall()) {
1034 this->destroy_range(this->begin(), this->end());
1035 if (!this->isSmall()) free(this->begin());
1036 this->BeginX = RHS.BeginX;
1037 this->Size = RHS.Size;
1038 this->Capacity = RHS.Capacity;
1039 RHS.resetToSmall();
1040 return *this;
1041 }
1042
1043 // If we already have sufficient space, assign the common elements, then
1044 // destroy any excess.
1045 size_t RHSSize = RHS.size();
1046 size_t CurSize = this->size();
1047 if (CurSize >= RHSSize) {
1048 // Assign common elements.
1049 iterator NewEnd = this->begin();
1050 if (RHSSize)
1051 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
1052
1053 // Destroy excess elements and trim the bounds.
1054 this->destroy_range(NewEnd, this->end());
1055 this->set_size(RHSSize);
1056
1057 // Clear the RHS.
1058 RHS.clear();
1059
1060 return *this;
1061 }
1062
1063 // If we have to grow to have enough elements, destroy the current elements.
1064 // This allows us to avoid copying them during the grow.
1065 // FIXME: this may not actually make any sense if we can efficiently move
1066 // elements.
1067 if (this->capacity() < RHSSize) {
1068 // Destroy current elements.
1069 this->clear();
1070 CurSize = 0;
1071 this->grow(RHSSize);
1072 } else if (CurSize) {
1073 // Otherwise, use assignment for the already-constructed elements.
1074 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
1075 }
1076
1077 // Move-construct the new elements in place.
1078 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
1079 this->begin()+CurSize);
1080
1081 // Set end.
1082 this->set_size(RHSSize);
1083
1084 RHS.clear();
1085 return *this;
1086}
1087
1088/// Storage for the SmallVector elements. This is specialized for the N=0 case
1089/// to avoid allocating unnecessary storage.
1090template <typename T, unsigned N>
1091struct SmallVectorStorage {
1092 alignas(T) char InlineElts[N * sizeof(T)];
1093};
1094
1095/// We need the storage to be properly aligned even for small-size of 0 so that
1096/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
1097/// well-defined.
1098template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
1099
1100/// Forward declaration of SmallVector so that
1101/// calculateSmallVectorDefaultInlinedElements can reference
1102/// `sizeof(SmallVector<T, 0>)`.
1103template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector;
1104
1105/// Helper class for calculating the default number of inline elements for
1106/// `SmallVector<T>`.
1107///
1108/// This should be migrated to a constexpr function when our minimum
1109/// compiler support is enough for multi-statement constexpr functions.
1110template <typename T> struct CalculateSmallVectorDefaultInlinedElements {
1111 // Parameter controlling the default number of inlined elements
1112 // for `SmallVector<T>`.
1113 //
1114 // The default number of inlined elements ensures that
1115 // 1. There is at least one inlined element.
1116 // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
1117 // it contradicts 1.
1118 static constexpr size_t kPreferredSmallVectorSizeof = 64;
1119
1120 // static_assert that sizeof(T) is not "too big".
1121 //
1122 // Because our policy guarantees at least one inlined element, it is possible
1123 // for an arbitrarily large inlined element to allocate an arbitrarily large
1124 // amount of inline storage. We generally consider it an antipattern for a
1125 // SmallVector to allocate an excessive amount of inline storage, so we want
1126 // to call attention to these cases and make sure that users are making an
1127 // intentional decision if they request a lot of inline storage.
1128 //
1129 // We want this assertion to trigger in pathological cases, but otherwise
1130 // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
1131 // larger than kPreferredSmallVectorSizeof (otherwise,
1132 // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
1133 // pattern seems useful in practice).
1134 //
1135 // One wrinkle is that this assertion is in theory non-portable, since
1136 // sizeof(T) is in general platform-dependent. However, we don't expect this
1137 // to be much of an issue, because most LLVM development happens on 64-bit
1138 // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
1139 // 32-bit hosts, dodging the issue. The reverse situation, where development
1140 // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
1141 // 64-bit host, is expected to be very rare.
1142 static_assert(
1143 sizeof(T) <= 256,
1144 "You are trying to use a default number of inlined elements for "
1145 "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
1146 "explicit number of inlined elements with `SmallVector<T, N>` to make "
1147 "sure you really want that much inline storage.");
1148
1149 // Discount the size of the header itself when calculating the maximum inline
1150 // bytes.
1151 static constexpr size_t PreferredInlineBytes =
1152 kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
1153 static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
1154 static constexpr size_t value =
1155 NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
1156};
1157
1158/// This is a 'vector' (really, a variable-sized array), optimized
1159/// for the case when the array is small. It contains some number of elements
1160/// in-place, which allows it to avoid heap allocation when the actual number of
1161/// elements is below that threshold. This allows normal "small" cases to be
1162/// fast without losing generality for large inputs.
1163///
1164/// \note
1165/// In the absence of a well-motivated choice for the number of inlined
1166/// elements \p N, it is recommended to use \c SmallVector<T> (that is,
1167/// omitting the \p N). This will choose a default number of inlined elements
1168/// reasonable for allocation on the stack (for example, trying to keep \c
1169/// sizeof(SmallVector<T>) around 64 bytes).
1170///
1171/// \warning This does not attempt to be exception safe.
1172///
1173/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
1174template <typename T,
1175 unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
1176class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
1177 SmallVectorStorage<T, N> {
1178public:
1179 SmallVector() : SmallVectorImpl<T>(N) {}
1180
1181 ~SmallVector() {
1182 // Destroy the constructed elements in the vector.
1183 this->destroy_range(this->begin(), this->end());
1184 }
1185
1186 explicit SmallVector(size_t Size, const T &Value = T())
1187 : SmallVectorImpl<T>(N) {
1188 this->assign(Size, Value);
1189 }
1190
1191 template <typename ItTy,
1192 typename = std::enable_if_t<std::is_convertible<
1193 typename std::iterator_traits<ItTy>::iterator_category,
1194 std::input_iterator_tag>::value>>
1195 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
1196 this->append(S, E);
1197 }
1198
1199 template <typename RangeTy>
1200 explicit SmallVector(const iterator_range<RangeTy> &R)
1201 : SmallVectorImpl<T>(N) {
1202 this->append(R.begin(), R.end());
1203 }
1204
1205 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
1206 this->assign(IL);
1207 }
1208
1209 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
1210 if (!RHS.empty())
1211 SmallVectorImpl<T>::operator=(RHS);
1212 }
1213
1214 SmallVector &operator=(const SmallVector &RHS) {
1215 SmallVectorImpl<T>::operator=(RHS);
1216 return *this;
1217 }
1218
1219 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
1220 if (!RHS.empty())
1221 SmallVectorImpl<T>::operator=(::std::move(RHS));
1222 }
1223
1224 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
1225 if (!RHS.empty())
1226 SmallVectorImpl<T>::operator=(::std::move(RHS));
1227 }
1228
1229 SmallVector &operator=(SmallVector &&RHS) {
1230 SmallVectorImpl<T>::operator=(::std::move(RHS));
1231 return *this;
1232 }
1233
1234 SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
1235 SmallVectorImpl<T>::operator=(::std::move(RHS));
1236 return *this;
1237 }
1238
1239 SmallVector &operator=(std::initializer_list<T> IL) {
1240 this->assign(IL);
1241 return *this;
1242 }
1243};
1244
1245template <typename T, unsigned N>
1246inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
1247 return X.capacity_in_bytes();
1248}
1249
1250template <typename RangeType>
1251using ValueTypeFromRangeType =
1252 typename std::remove_const<typename std::remove_reference<
1253 decltype(*std::begin(std::declval<RangeType &>()))>::type>::type;
1254
1255/// Given a range of type R, iterate the entire range and return a
1256/// SmallVector with elements of the vector. This is useful, for example,
1257/// when you want to iterate a range and then sort the results.
1258template <unsigned Size, typename R>
1259SmallVector<ValueTypeFromRangeType<R>, Size> to_vector(R &&Range) {
1260 return {std::begin(Range), std::end(Range)};
1261}
1262template <typename R>
1263SmallVector<ValueTypeFromRangeType<R>,
1264 CalculateSmallVectorDefaultInlinedElements<
1265 ValueTypeFromRangeType<R>>::value>
1266to_vector(R &&Range) {
1267 return {std::begin(Range), std::end(Range)};
1268}
1269
1270} // end namespace llvm
1271
1272namespace std {
1273
1274 /// Implement std::swap in terms of SmallVector swap.
1275 template<typename T>
1276 inline void
1277 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
1278 LHS.swap(RHS);
1279 }
1280
1281 /// Implement std::swap in terms of SmallVector swap.
1282 template<typename T, unsigned N>
1283 inline void
1284 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
1285 LHS.swap(RHS);
1286 }
1287
1288} // end namespace std
1289
1290#endif // LLVM_ADT_SMALLVECTOR_H

/build/llvm-toolchain-snapshot-14~++20220126111400+9b6c2ea30219/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/StringRef.h"
25#include "llvm/ADT/Twine.h"
26#include "llvm/ADT/iterator.h"
27#include "llvm/ADT/iterator_range.h"
28#include "llvm/IR/Attributes.h"
29#include "llvm/IR/BasicBlock.h"
30#include "llvm/IR/CallingConv.h"
31#include "llvm/IR/CFG.h"
32#include "llvm/IR/Constant.h"
33#include "llvm/IR/DerivedTypes.h"
34#include "llvm/IR/Function.h"
35#include "llvm/IR/InstrTypes.h"
36#include "llvm/IR/Instruction.h"
37#include "llvm/IR/OperandTraits.h"
38#include "llvm/IR/Type.h"
39#include "llvm/IR/Use.h"
40#include "llvm/IR/User.h"
41#include "llvm/IR/Value.h"
42#include "llvm/Support/AtomicOrdering.h"
43#include "llvm/Support/Casting.h"
44#include "llvm/Support/ErrorHandling.h"
45#include <cassert>
46#include <cstddef>
47#include <cstdint>
48#include <iterator>
49
50namespace llvm {
51
52class APInt;
53class ConstantInt;
54class DataLayout;
55class LLVMContext;
56
57//===----------------------------------------------------------------------===//
58// AllocaInst Class
59//===----------------------------------------------------------------------===//
60
61/// an instruction to allocate memory on the stack
62class AllocaInst : public UnaryInstruction {
63 Type *AllocatedType;
64
65 using AlignmentField = AlignmentBitfieldElementT<0>;
66 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
67 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
68 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
69 SwiftErrorField>(),
70 "Bitfields must be contiguous");
71
72protected:
73 // Note: Instruction needs to be a friend here to call cloneImpl.
74 friend class Instruction;
75
76 AllocaInst *cloneImpl() const;
77
78public:
79 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
80 const Twine &Name, Instruction *InsertBefore);
81 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
82 const Twine &Name, BasicBlock *InsertAtEnd);
83
84 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
85 Instruction *InsertBefore);
86 AllocaInst(Type *Ty, unsigned AddrSpace,
87 const Twine &Name, BasicBlock *InsertAtEnd);
88
89 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
90 const Twine &Name = "", Instruction *InsertBefore = nullptr);
91 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
92 const Twine &Name, BasicBlock *InsertAtEnd);
93
94 /// Return true if there is an allocation size parameter to the allocation
95 /// instruction that is not 1.
96 bool isArrayAllocation() const;
97
98 /// Get the number of elements allocated. For a simple allocation of a single
99 /// element, this will return a constant 1 value.
100 const Value *getArraySize() const { return getOperand(0); }
101 Value *getArraySize() { return getOperand(0); }
102
103 /// Overload to return most specific pointer type.
104 PointerType *getType() const {
105 return cast<PointerType>(Instruction::getType());
106 }
107
108 /// Return the address space for the allocation.
109 unsigned getAddressSpace() const {
110 return getType()->getAddressSpace();
111 }
112
113 /// Get allocation size in bits. Returns None if size can't be determined,
114 /// e.g. in case of a VLA.
115 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
116
117 /// Return the type that is being allocated by the instruction.
118 Type *getAllocatedType() const { return AllocatedType; }
119 /// for use only in special circumstances that need to generically
120 /// transform a whole instruction (eg: IR linking and vectorization).
121 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
122
123 /// Return the alignment of the memory that is being allocated by the
124 /// instruction.
125 Align getAlign() const {
126 return Align(1ULL << getSubclassData<AlignmentField>());
127 }
128
129 void setAlignment(Align Align) {
130 setSubclassData<AlignmentField>(Log2(Align));
131 }
132
133 // FIXME: Remove this one transition to Align is over.
134 uint64_t getAlignment() const { return getAlign().value(); }
135
136 /// Return true if this alloca is in the entry block of the function and is a
137 /// constant size. If so, the code generator will fold it into the
138 /// prolog/epilog code, so it is basically free.
139 bool isStaticAlloca() const;
140
141 /// Return true if this alloca is used as an inalloca argument to a call. Such
142 /// allocas are never considered static even if they are in the entry block.
143 bool isUsedWithInAlloca() const {
144 return getSubclassData<UsedWithInAllocaField>();
145 }
146
147 /// Specify whether this alloca is used to represent the arguments to a call.
148 void setUsedWithInAlloca(bool V) {
149 setSubclassData<UsedWithInAllocaField>(V);
150 }
151
152 /// Return true if this alloca is used as a swifterror argument to a call.
153 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
154 /// Specify whether this alloca is used to represent a swifterror.
155 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
156
157 // Methods for support type inquiry through isa, cast, and dyn_cast:
158 static bool classof(const Instruction *I) {
159 return (I->getOpcode() == Instruction::Alloca);
160 }
161 static bool classof(const Value *V) {
162 return isa<Instruction>(V) && classof(cast<Instruction>(V));
163 }
164
165private:
166 // Shadow Instruction::setInstructionSubclassData with a private forwarding
167 // method so that subclasses cannot accidentally use it.
168 template <typename Bitfield>
169 void setSubclassData(typename Bitfield::Type Value) {
170 Instruction::setSubclassData<Bitfield>(Value);
171 }
172};
173
174//===----------------------------------------------------------------------===//
175// LoadInst Class
176//===----------------------------------------------------------------------===//
177
178/// An instruction for reading from memory. This uses the SubclassData field in
179/// Value to store whether or not the load is volatile.
180class LoadInst : public UnaryInstruction {
181 using VolatileField = BoolBitfieldElementT<0>;
182 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
183 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
184 static_assert(
185 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
186 "Bitfields must be contiguous");
187
188 void AssertOK();
189
190protected:
191 // Note: Instruction needs to be a friend here to call cloneImpl.
192 friend class Instruction;
193
194 LoadInst *cloneImpl() const;
195
196public:
197 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
198 Instruction *InsertBefore);
199 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Instruction *InsertBefore);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 BasicBlock *InsertAtEnd);
204 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
205 Align Align, Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, BasicBlock *InsertAtEnd);
208 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
209 Align Align, AtomicOrdering Order,
210 SyncScope::ID SSID = SyncScope::System,
211 Instruction *InsertBefore = nullptr);
212 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
213 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
214 BasicBlock *InsertAtEnd);
215
216 /// Return true if this is a load from a volatile memory location.
217 bool isVolatile() const { return getSubclassData<VolatileField>(); }
218
219 /// Specify whether this is a volatile load or not.
220 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
221
222 /// Return the alignment of the access that is being performed.
223 /// FIXME: Remove this function once transition to Align is over.
224 /// Use getAlign() instead.
225 uint64_t getAlignment() const { return getAlign().value(); }
226
227 /// Return the alignment of the access that is being performed.
228 Align getAlign() const {
229 return Align(1ULL << (getSubclassData<AlignmentField>()));
230 }
231
232 void setAlignment(Align Align) {
233 setSubclassData<AlignmentField>(Log2(Align));
234 }
235
236 /// Returns the ordering constraint of this load instruction.
237 AtomicOrdering getOrdering() const {
238 return getSubclassData<OrderingField>();
239 }
240 /// Sets the ordering constraint of this load instruction. May not be Release
241 /// or AcquireRelease.
242 void setOrdering(AtomicOrdering Ordering) {
243 setSubclassData<OrderingField>(Ordering);
244 }
245
246 /// Returns the synchronization scope ID of this load instruction.
247 SyncScope::ID getSyncScopeID() const {
248 return SSID;
249 }
250
251 /// Sets the synchronization scope ID of this load instruction.
252 void setSyncScopeID(SyncScope::ID SSID) {
253 this->SSID = SSID;
254 }
255
256 /// Sets the ordering constraint and the synchronization scope ID of this load
257 /// instruction.
258 void setAtomic(AtomicOrdering Ordering,
259 SyncScope::ID SSID = SyncScope::System) {
260 setOrdering(Ordering);
261 setSyncScopeID(SSID);
262 }
263
264 bool isSimple() const { return !isAtomic() && !isVolatile(); }
265
266 bool isUnordered() const {
267 return (getOrdering() == AtomicOrdering::NotAtomic ||
268 getOrdering() == AtomicOrdering::Unordered) &&
269 !isVolatile();
270 }
271
272 Value *getPointerOperand() { return getOperand(0); }
273 const Value *getPointerOperand() const { return getOperand(0); }
274 static unsigned getPointerOperandIndex() { return 0U; }
275 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
276
277 /// Returns the address space of the pointer operand.
278 unsigned getPointerAddressSpace() const {
279 return getPointerOperandType()->getPointerAddressSpace();
280 }
281
282 // Methods for support type inquiry through isa, cast, and dyn_cast:
283 static bool classof(const Instruction *I) {
284 return I->getOpcode() == Instruction::Load;
285 }
286 static bool classof(const Value *V) {
287 return isa<Instruction>(V) && classof(cast<Instruction>(V));
288 }
289
290private:
291 // Shadow Instruction::setInstructionSubclassData with a private forwarding
292 // method so that subclasses cannot accidentally use it.
293 template <typename Bitfield>
294 void setSubclassData(typename Bitfield::Type Value) {
295 Instruction::setSubclassData<Bitfield>(Value);
296 }
297
298 /// The synchronization scope ID of this load instruction. Not quite enough
299 /// room in SubClassData for everything, so synchronization scope ID gets its
300 /// own field.
301 SyncScope::ID SSID;
302};
303
304//===----------------------------------------------------------------------===//
305// StoreInst Class
306//===----------------------------------------------------------------------===//
307
308/// An instruction for storing to memory.
309class StoreInst : public Instruction {
310 using VolatileField = BoolBitfieldElementT<0>;
311 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
312 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
313 static_assert(
314 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
315 "Bitfields must be contiguous");
316
317 void AssertOK();
318
319protected:
320 // Note: Instruction needs to be a friend here to call cloneImpl.
321 friend class Instruction;
322
323 StoreInst *cloneImpl() const;
324
325public:
326 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
327 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
329 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
330 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
331 Instruction *InsertBefore = nullptr);
332 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
333 BasicBlock *InsertAtEnd);
334 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
335 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
336 Instruction *InsertBefore = nullptr);
337 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
338 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
339
340 // allocate space for exactly two operands
341 void *operator new(size_t S) { return User::operator new(S, 2); }
342 void operator delete(void *Ptr) { User::operator delete(Ptr); }
343
344 /// Return true if this is a store to a volatile memory location.
345 bool isVolatile() const { return getSubclassData<VolatileField>(); }
346
347 /// Specify whether this is a volatile store or not.
348 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
349
350 /// Transparently provide more efficient getOperand methods.
351 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
352
353 /// Return the alignment of the access that is being performed
354 /// FIXME: Remove this function once transition to Align is over.
355 /// Use getAlign() instead.
356 uint64_t getAlignment() const { return getAlign().value(); }
357
358 Align getAlign() const {
359 return Align(1ULL << (getSubclassData<AlignmentField>()));
360 }
361
362 void setAlignment(Align Align) {
363 setSubclassData<AlignmentField>(Log2(Align));
364 }
365
366 /// Returns the ordering constraint of this store instruction.
367 AtomicOrdering getOrdering() const {
368 return getSubclassData<OrderingField>();
369 }
370
371 /// Sets the ordering constraint of this store instruction. May not be
372 /// Acquire or AcquireRelease.
373 void setOrdering(AtomicOrdering Ordering) {
374 setSubclassData<OrderingField>(Ordering);
375 }
376
377 /// Returns the synchronization scope ID of this store instruction.
378 SyncScope::ID getSyncScopeID() const {
379 return SSID;
380 }
381
382 /// Sets the synchronization scope ID of this store instruction.
383 void setSyncScopeID(SyncScope::ID SSID) {
384 this->SSID = SSID;
385 }
386
387 /// Sets the ordering constraint and the synchronization scope ID of this
388 /// store instruction.
389 void setAtomic(AtomicOrdering Ordering,
390 SyncScope::ID SSID = SyncScope::System) {
391 setOrdering(Ordering);
392 setSyncScopeID(SSID);
393 }
394
395 bool isSimple() const { return !isAtomic() && !isVolatile(); }
396
397 bool isUnordered() const {
398 return (getOrdering() == AtomicOrdering::NotAtomic ||
399 getOrdering() == AtomicOrdering::Unordered) &&
400 !isVolatile();
401 }
402
403 Value *getValueOperand() { return getOperand(0); }
404 const Value *getValueOperand() const { return getOperand(0); }
405
406 Value *getPointerOperand() { return getOperand(1); }
407 const Value *getPointerOperand() const { return getOperand(1); }
408 static unsigned getPointerOperandIndex() { return 1U; }
409 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
410
411 /// Returns the address space of the pointer operand.
412 unsigned getPointerAddressSpace() const {
413 return getPointerOperandType()->getPointerAddressSpace();
414 }
415
416 // Methods for support type inquiry through isa, cast, and dyn_cast:
417 static bool classof(const Instruction *I) {
418 return I->getOpcode() == Instruction::Store;
419 }
420 static bool classof(const Value *V) {
421 return isa<Instruction>(V) && classof(cast<Instruction>(V));
422 }
423
424private:
425 // Shadow Instruction::setInstructionSubclassData with a private forwarding
426 // method so that subclasses cannot accidentally use it.
427 template <typename Bitfield>
428 void setSubclassData(typename Bitfield::Type Value) {
429 Instruction::setSubclassData<Bitfield>(Value);
430 }
431
432 /// The synchronization scope ID of this store instruction. Not quite enough
433 /// room in SubClassData for everything, so synchronization scope ID gets its
434 /// own field.
435 SyncScope::ID SSID;
436};
437
438template <>
439struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
440};
441
442DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 442, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 442, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
443
444//===----------------------------------------------------------------------===//
445// FenceInst Class
446//===----------------------------------------------------------------------===//
447
448/// An instruction for ordering other memory operations.
449class FenceInst : public Instruction {
450 using OrderingField = AtomicOrderingBitfieldElementT<0>;
451
452 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
453
454protected:
455 // Note: Instruction needs to be a friend here to call cloneImpl.
456 friend class Instruction;
457
458 FenceInst *cloneImpl() const;
459
460public:
461 // Ordering may only be Acquire, Release, AcquireRelease, or
462 // SequentiallyConsistent.
463 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
464 SyncScope::ID SSID = SyncScope::System,
465 Instruction *InsertBefore = nullptr);
466 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
467 BasicBlock *InsertAtEnd);
468
469 // allocate space for exactly zero operands
470 void *operator new(size_t S) { return User::operator new(S, 0); }
471 void operator delete(void *Ptr) { User::operator delete(Ptr); }
472
473 /// Returns the ordering constraint of this fence instruction.
474 AtomicOrdering getOrdering() const {
475 return getSubclassData<OrderingField>();
476 }
477
478 /// Sets the ordering constraint of this fence instruction. May only be
479 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
480 void setOrdering(AtomicOrdering Ordering) {
481 setSubclassData<OrderingField>(Ordering);
482 }
483
484 /// Returns the synchronization scope ID of this fence instruction.
485 SyncScope::ID getSyncScopeID() const {
486 return SSID;
487 }
488
489 /// Sets the synchronization scope ID of this fence instruction.
490 void setSyncScopeID(SyncScope::ID SSID) {
491 this->SSID = SSID;
492 }
493
494 // Methods for support type inquiry through isa, cast, and dyn_cast:
495 static bool classof(const Instruction *I) {
496 return I->getOpcode() == Instruction::Fence;
497 }
498 static bool classof(const Value *V) {
499 return isa<Instruction>(V) && classof(cast<Instruction>(V));
500 }
501
502private:
503 // Shadow Instruction::setInstructionSubclassData with a private forwarding
504 // method so that subclasses cannot accidentally use it.
505 template <typename Bitfield>
506 void setSubclassData(typename Bitfield::Type Value) {
507 Instruction::setSubclassData<Bitfield>(Value);
508 }
509
510 /// The synchronization scope ID of this fence instruction. Not quite enough
511 /// room in SubClassData for everything, so synchronization scope ID gets its
512 /// own field.
513 SyncScope::ID SSID;
514};
515
516//===----------------------------------------------------------------------===//
517// AtomicCmpXchgInst Class
518//===----------------------------------------------------------------------===//
519
520/// An instruction that atomically checks whether a
521/// specified value is in a memory location, and, if it is, stores a new value
522/// there. The value returned by this instruction is a pair containing the
523/// original value as first element, and an i1 indicating success (true) or
524/// failure (false) as second element.
525///
526class AtomicCmpXchgInst : public Instruction {
527 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
528 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
529 SyncScope::ID SSID);
530
531 template <unsigned Offset>
532 using AtomicOrderingBitfieldElement =
533 typename Bitfield::Element<AtomicOrdering, Offset, 3,
534 AtomicOrdering::LAST>;
535
536protected:
537 // Note: Instruction needs to be a friend here to call cloneImpl.
538 friend class Instruction;
539
540 AtomicCmpXchgInst *cloneImpl() const;
541
542public:
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 Instruction *InsertBefore = nullptr);
547 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
548 AtomicOrdering SuccessOrdering,
549 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
550 BasicBlock *InsertAtEnd);
551
552 // allocate space for exactly three operands
553 void *operator new(size_t S) { return User::operator new(S, 3); }
554 void operator delete(void *Ptr) { User::operator delete(Ptr); }
555
556 using VolatileField = BoolBitfieldElementT<0>;
557 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
558 using SuccessOrderingField =
559 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
560 using FailureOrderingField =
561 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
562 using AlignmentField =
563 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
564 static_assert(
565 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
566 FailureOrderingField, AlignmentField>(),
567 "Bitfields must be contiguous");
568
569 /// Return the alignment of the memory that is being allocated by the
570 /// instruction.
571 Align getAlign() const {
572 return Align(1ULL << getSubclassData<AlignmentField>());
573 }
574
575 void setAlignment(Align Align) {
576 setSubclassData<AlignmentField>(Log2(Align));
577 }
578
579 /// Return true if this is a cmpxchg from a volatile memory
580 /// location.
581 ///
582 bool isVolatile() const { return getSubclassData<VolatileField>(); }
583
584 /// Specify whether this is a volatile cmpxchg.
585 ///
586 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
587
588 /// Return true if this cmpxchg may spuriously fail.
589 bool isWeak() const { return getSubclassData<WeakField>(); }
590
591 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
592
593 /// Transparently provide more efficient getOperand methods.
594 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
595
596 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
597 return Ordering != AtomicOrdering::NotAtomic &&
598 Ordering != AtomicOrdering::Unordered;
599 }
600
601 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
602 return Ordering != AtomicOrdering::NotAtomic &&
603 Ordering != AtomicOrdering::Unordered &&
604 Ordering != AtomicOrdering::AcquireRelease &&
605 Ordering != AtomicOrdering::Release;
606 }
607
608 /// Returns the success ordering constraint of this cmpxchg instruction.
609 AtomicOrdering getSuccessOrdering() const {
610 return getSubclassData<SuccessOrderingField>();
611 }
612
613 /// Sets the success ordering constraint of this cmpxchg instruction.
614 void setSuccessOrdering(AtomicOrdering Ordering) {
615 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 616, __extension__ __PRETTY_FUNCTION__
))
616 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 616, __extension__ __PRETTY_FUNCTION__
))
;
617 setSubclassData<SuccessOrderingField>(Ordering);
618 }
619
620 /// Returns the failure ordering constraint of this cmpxchg instruction.
621 AtomicOrdering getFailureOrdering() const {
622 return getSubclassData<FailureOrderingField>();
623 }
624
625 /// Sets the failure ordering constraint of this cmpxchg instruction.
626 void setFailureOrdering(AtomicOrdering Ordering) {
627 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 628, __extension__ __PRETTY_FUNCTION__
))
628 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 628, __extension__ __PRETTY_FUNCTION__
))
;
629 setSubclassData<FailureOrderingField>(Ordering);
630 }
631
632 /// Returns a single ordering which is at least as strong as both the
633 /// success and failure orderings for this cmpxchg.
634 AtomicOrdering getMergedOrdering() const {
635 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
636 return AtomicOrdering::SequentiallyConsistent;
637 if (getFailureOrdering() == AtomicOrdering::Acquire) {
638 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
639 return AtomicOrdering::Acquire;
640 if (getSuccessOrdering() == AtomicOrdering::Release)
641 return AtomicOrdering::AcquireRelease;
642 }
643 return getSuccessOrdering();
644 }
645
646 /// Returns the synchronization scope ID of this cmpxchg instruction.
647 SyncScope::ID getSyncScopeID() const {
648 return SSID;
649 }
650
651 /// Sets the synchronization scope ID of this cmpxchg instruction.
652 void setSyncScopeID(SyncScope::ID SSID) {
653 this->SSID = SSID;
654 }
655
656 Value *getPointerOperand() { return getOperand(0); }
657 const Value *getPointerOperand() const { return getOperand(0); }
658 static unsigned getPointerOperandIndex() { return 0U; }
659
660 Value *getCompareOperand() { return getOperand(1); }
661 const Value *getCompareOperand() const { return getOperand(1); }
662
663 Value *getNewValOperand() { return getOperand(2); }
664 const Value *getNewValOperand() const { return getOperand(2); }
665
666 /// Returns the address space of the pointer operand.
667 unsigned getPointerAddressSpace() const {
668 return getPointerOperand()->getType()->getPointerAddressSpace();
669 }
670
671 /// Returns the strongest permitted ordering on failure, given the
672 /// desired ordering on success.
673 ///
674 /// If the comparison in a cmpxchg operation fails, there is no atomic store
675 /// so release semantics cannot be provided. So this function drops explicit
676 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
677 /// operation would remain SequentiallyConsistent.
678 static AtomicOrdering
679 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
680 switch (SuccessOrdering) {
681 default:
682 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "llvm/include/llvm/IR/Instructions.h", 682)
;
683 case AtomicOrdering::Release:
684 case AtomicOrdering::Monotonic:
685 return AtomicOrdering::Monotonic;
686 case AtomicOrdering::AcquireRelease:
687 case AtomicOrdering::Acquire:
688 return AtomicOrdering::Acquire;
689 case AtomicOrdering::SequentiallyConsistent:
690 return AtomicOrdering::SequentiallyConsistent;
691 }
692 }
693
694 // Methods for support type inquiry through isa, cast, and dyn_cast:
695 static bool classof(const Instruction *I) {
696 return I->getOpcode() == Instruction::AtomicCmpXchg;
697 }
698 static bool classof(const Value *V) {
699 return isa<Instruction>(V) && classof(cast<Instruction>(V));
700 }
701
702private:
703 // Shadow Instruction::setInstructionSubclassData with a private forwarding
704 // method so that subclasses cannot accidentally use it.
705 template <typename Bitfield>
706 void setSubclassData(typename Bitfield::Type Value) {
707 Instruction::setSubclassData<Bitfield>(Value);
708 }
709
710 /// The synchronization scope ID of this cmpxchg instruction. Not quite
711 /// enough room in SubClassData for everything, so synchronization scope ID
712 /// gets its own field.
713 SyncScope::ID SSID;
714};
715
716template <>
717struct OperandTraits<AtomicCmpXchgInst> :
718 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
719};
720
721DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 721, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst
>::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture
].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 721, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands
() const { return OperandTraits<AtomicCmpXchgInst>::operands
(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicCmpXchgInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
722
723//===----------------------------------------------------------------------===//
724// AtomicRMWInst Class
725//===----------------------------------------------------------------------===//
726
727/// an instruction that atomically reads a memory location,
728/// combines it with another value, and then stores the result back. Returns
729/// the old value.
730///
731class AtomicRMWInst : public Instruction {
732protected:
733 // Note: Instruction needs to be a friend here to call cloneImpl.
734 friend class Instruction;
735
736 AtomicRMWInst *cloneImpl() const;
737
738public:
739 /// This enumeration lists the possible modifications atomicrmw can make. In
740 /// the descriptions, 'p' is the pointer to the instruction's memory location,
741 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
742 /// instruction. These instructions always return 'old'.
743 enum BinOp : unsigned {
744 /// *p = v
745 Xchg,
746 /// *p = old + v
747 Add,
748 /// *p = old - v
749 Sub,
750 /// *p = old & v
751 And,
752 /// *p = ~(old & v)
753 Nand,
754 /// *p = old | v
755 Or,
756 /// *p = old ^ v
757 Xor,
758 /// *p = old >signed v ? old : v
759 Max,
760 /// *p = old <signed v ? old : v
761 Min,
762 /// *p = old >unsigned v ? old : v
763 UMax,
764 /// *p = old <unsigned v ? old : v
765 UMin,
766
767 /// *p = old + v
768 FAdd,
769
770 /// *p = old - v
771 FSub,
772
773 FIRST_BINOP = Xchg,
774 LAST_BINOP = FSub,
775 BAD_BINOP
776 };
777
778private:
779 template <unsigned Offset>
780 using AtomicOrderingBitfieldElement =
781 typename Bitfield::Element<AtomicOrdering, Offset, 3,
782 AtomicOrdering::LAST>;
783
784 template <unsigned Offset>
785 using BinOpBitfieldElement =
786 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
787
788public:
789 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
790 AtomicOrdering Ordering, SyncScope::ID SSID,
791 Instruction *InsertBefore = nullptr);
792 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
793 AtomicOrdering Ordering, SyncScope::ID SSID,
794 BasicBlock *InsertAtEnd);
795
796 // allocate space for exactly two operands
797 void *operator new(size_t S) { return User::operator new(S, 2); }
798 void operator delete(void *Ptr) { User::operator delete(Ptr); }
799
800 using VolatileField = BoolBitfieldElementT<0>;
801 using AtomicOrderingField =
802 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
803 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
804 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
805 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
806 OperationField, AlignmentField>(),
807 "Bitfields must be contiguous");
808
809 BinOp getOperation() const { return getSubclassData<OperationField>(); }
810
811 static StringRef getOperationName(BinOp Op);
812
813 static bool isFPOperation(BinOp Op) {
814 switch (Op) {
815 case AtomicRMWInst::FAdd:
816 case AtomicRMWInst::FSub:
817 return true;
818 default:
819 return false;
820 }
821 }
822
823 void setOperation(BinOp Operation) {
824 setSubclassData<OperationField>(Operation);
825 }
826
827 /// Return the alignment of the memory that is being allocated by the
828 /// instruction.
829 Align getAlign() const {
830 return Align(1ULL << getSubclassData<AlignmentField>());
831 }
832
833 void setAlignment(Align Align) {
834 setSubclassData<AlignmentField>(Log2(Align));
835 }
836
837 /// Return true if this is a RMW on a volatile memory location.
838 ///
839 bool isVolatile() const { return getSubclassData<VolatileField>(); }
840
841 /// Specify whether this is a volatile RMW or not.
842 ///
843 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
844
845 /// Transparently provide more efficient getOperand methods.
846 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
847
848 /// Returns the ordering constraint of this rmw instruction.
849 AtomicOrdering getOrdering() const {
850 return getSubclassData<AtomicOrderingField>();
851 }
852
853 /// Sets the ordering constraint of this rmw instruction.
854 void setOrdering(AtomicOrdering Ordering) {
855 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 856, __extension__ __PRETTY_FUNCTION__
))
856 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 856, __extension__ __PRETTY_FUNCTION__
))
;
857 setSubclassData<AtomicOrderingField>(Ordering);
858 }
859
860 /// Returns the synchronization scope ID of this rmw instruction.
861 SyncScope::ID getSyncScopeID() const {
862 return SSID;
863 }
864
865 /// Sets the synchronization scope ID of this rmw instruction.
866 void setSyncScopeID(SyncScope::ID SSID) {
867 this->SSID = SSID;
868 }
869
870 Value *getPointerOperand() { return getOperand(0); }
871 const Value *getPointerOperand() const { return getOperand(0); }
872 static unsigned getPointerOperandIndex() { return 0U; }
873
874 Value *getValOperand() { return getOperand(1); }
875 const Value *getValOperand() const { return getOperand(1); }
876
877 /// Returns the address space of the pointer operand.
878 unsigned getPointerAddressSpace() const {
879 return getPointerOperand()->getType()->getPointerAddressSpace();
880 }
881
882 bool isFloatingPointOperation() const {
883 return isFPOperation(getOperation());
884 }
885
886 // Methods for support type inquiry through isa, cast, and dyn_cast:
887 static bool classof(const Instruction *I) {
888 return I->getOpcode() == Instruction::AtomicRMW;
889 }
890 static bool classof(const Value *V) {
891 return isa<Instruction>(V) && classof(cast<Instruction>(V));
892 }
893
894private:
895 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
896 AtomicOrdering Ordering, SyncScope::ID SSID);
897
898 // Shadow Instruction::setInstructionSubclassData with a private forwarding
899 // method so that subclasses cannot accidentally use it.
900 template <typename Bitfield>
901 void setSubclassData(typename Bitfield::Type Value) {
902 Instruction::setSubclassData<Bitfield>(Value);
903 }
904
905 /// The synchronization scope ID of this rmw instruction. Not quite enough
906 /// room in SubClassData for everything, so synchronization scope ID gets its
907 /// own field.
908 SyncScope::ID SSID;
909};
910
911template <>
912struct OperandTraits<AtomicRMWInst>
913 : public FixedNumOperandTraits<AtomicRMWInst,2> {
914};
915
916DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 916, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 916, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
917
918//===----------------------------------------------------------------------===//
919// GetElementPtrInst Class
920//===----------------------------------------------------------------------===//
921
922// checkGEPType - Simple wrapper function to give a better assertion failure
923// message on bad indexes for a gep instruction.
924//
925inline Type *checkGEPType(Type *Ty) {
926 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "llvm/include/llvm/IR/Instructions.h", 926, __extension__ __PRETTY_FUNCTION__
))
;
927 return Ty;
928}
929
930/// an instruction for type-safe pointer arithmetic to
931/// access elements of arrays and structs
932///
933class GetElementPtrInst : public Instruction {
934 Type *SourceElementType;
935 Type *ResultElementType;
936
937 GetElementPtrInst(const GetElementPtrInst &GEPI);
938
939 /// Constructors - Create a getelementptr instruction with a base pointer an
940 /// list of indices. The first ctor can optionally insert before an existing
941 /// instruction, the second appends the new instruction to the specified
942 /// BasicBlock.
943 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
944 ArrayRef<Value *> IdxList, unsigned Values,
945 const Twine &NameStr, Instruction *InsertBefore);
946 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
947 ArrayRef<Value *> IdxList, unsigned Values,
948 const Twine &NameStr, BasicBlock *InsertAtEnd);
949
950 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
951
952protected:
953 // Note: Instruction needs to be a friend here to call cloneImpl.
954 friend class Instruction;
955
956 GetElementPtrInst *cloneImpl() const;
957
958public:
959 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
960 ArrayRef<Value *> IdxList,
961 const Twine &NameStr = "",
962 Instruction *InsertBefore = nullptr) {
963 unsigned Values = 1 + unsigned(IdxList.size());
964 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 964, __extension__ __PRETTY_FUNCTION__
))
;
965 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 966, __extension__ __PRETTY_FUNCTION__
))
966 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 966, __extension__ __PRETTY_FUNCTION__
))
;
967 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
968 NameStr, InsertBefore);
969 }
970
971 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
972 ArrayRef<Value *> IdxList,
973 const Twine &NameStr,
974 BasicBlock *InsertAtEnd) {
975 unsigned Values = 1 + unsigned(IdxList.size());
976 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 976, __extension__ __PRETTY_FUNCTION__
))
;
977 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 978, __extension__ __PRETTY_FUNCTION__
))
978 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 978, __extension__ __PRETTY_FUNCTION__
))
;
979 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
980 NameStr, InsertAtEnd);
981 }
982
983 /// Create an "inbounds" getelementptr. See the documentation for the
984 /// "inbounds" flag in LangRef.html for details.
985 static GetElementPtrInst *
986 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
987 const Twine &NameStr = "",
988 Instruction *InsertBefore = nullptr) {
989 GetElementPtrInst *GEP =
990 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
991 GEP->setIsInBounds(true);
992 return GEP;
993 }
994
995 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
996 ArrayRef<Value *> IdxList,
997 const Twine &NameStr,
998 BasicBlock *InsertAtEnd) {
999 GetElementPtrInst *GEP =
1000 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1001 GEP->setIsInBounds(true);
1002 return GEP;
1003 }
1004
1005 /// Transparently provide more efficient getOperand methods.
1006 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1007
1008 Type *getSourceElementType() const { return SourceElementType; }
1009
1010 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1011 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1012
1013 Type *getResultElementType() const {
1014 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1015, __extension__ __PRETTY_FUNCTION__
))
1015 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1015, __extension__ __PRETTY_FUNCTION__
))
;
1016 return ResultElementType;
1017 }
1018
1019 /// Returns the address space of this instruction's pointer type.
1020 unsigned getAddressSpace() const {
1021 // Note that this is always the same as the pointer operand's address space
1022 // and that is cheaper to compute, so cheat here.
1023 return getPointerAddressSpace();
1024 }
1025
1026 /// Returns the result type of a getelementptr with the given source
1027 /// element type and indexes.
1028 ///
1029 /// Null is returned if the indices are invalid for the specified
1030 /// source element type.
1031 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1032 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1033 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1034
1035 /// Return the type of the element at the given index of an indexable
1036 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1037 ///
1038 /// Returns null if the type can't be indexed, or the given index is not
1039 /// legal for the given type.
1040 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1041 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1042
1043 inline op_iterator idx_begin() { return op_begin()+1; }
1044 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1045 inline op_iterator idx_end() { return op_end(); }
1046 inline const_op_iterator idx_end() const { return op_end(); }
1047
1048 inline iterator_range<op_iterator> indices() {
1049 return make_range(idx_begin(), idx_end());
1050 }
1051
1052 inline iterator_range<const_op_iterator> indices() const {
1053 return make_range(idx_begin(), idx_end());
1054 }
1055
1056 Value *getPointerOperand() {
1057 return getOperand(0);
1058 }
1059 const Value *getPointerOperand() const {
1060 return getOperand(0);
1061 }
1062 static unsigned getPointerOperandIndex() {
1063 return 0U; // get index for modifying correct operand.
1064 }
1065
1066 /// Method to return the pointer operand as a
1067 /// PointerType.
1068 Type *getPointerOperandType() const {
1069 return getPointerOperand()->getType();
1070 }
1071
1072 /// Returns the address space of the pointer operand.
1073 unsigned getPointerAddressSpace() const {
1074 return getPointerOperandType()->getPointerAddressSpace();
1075 }
1076
1077 /// Returns the pointer type returned by the GEP
1078 /// instruction, which may be a vector of pointers.
1079 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1080 ArrayRef<Value *> IdxList) {
1081 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1082 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1083 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1084 Type *PtrTy = OrigPtrTy->isOpaque()
1085 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1086 : PointerType::get(ResultElemTy, AddrSpace);
1087 // Vector GEP
1088 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1089 ElementCount EltCount = PtrVTy->getElementCount();
1090 return VectorType::get(PtrTy, EltCount);
1091 }
1092 for (Value *Index : IdxList)
1093 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1094 ElementCount EltCount = IndexVTy->getElementCount();
1095 return VectorType::get(PtrTy, EltCount);
1096 }
1097 // Scalar GEP
1098 return PtrTy;
1099 }
1100
1101 unsigned getNumIndices() const { // Note: always non-negative
1102 return getNumOperands() - 1;
1103 }
1104
1105 bool hasIndices() const {
1106 return getNumOperands() > 1;
1107 }
1108
1109 /// Return true if all of the indices of this GEP are
1110 /// zeros. If so, the result pointer and the first operand have the same
1111 /// value, just potentially different types.
1112 bool hasAllZeroIndices() const;
1113
1114 /// Return true if all of the indices of this GEP are
1115 /// constant integers. If so, the result pointer and the first operand have
1116 /// a constant offset between them.
1117 bool hasAllConstantIndices() const;
1118
1119 /// Set or clear the inbounds flag on this GEP instruction.
1120 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1121 void setIsInBounds(bool b = true);
1122
1123 /// Determine whether the GEP has the inbounds flag.
1124 bool isInBounds() const;
1125
1126 /// Accumulate the constant address offset of this GEP if possible.
1127 ///
1128 /// This routine accepts an APInt into which it will accumulate the constant
1129 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1130 /// all-constant, it returns false and the value of the offset APInt is
1131 /// undefined (it is *not* preserved!). The APInt passed into this routine
1132 /// must be at least as wide as the IntPtr type for the address space of
1133 /// the base GEP pointer.
1134 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1135 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1136 MapVector<Value *, APInt> &VariableOffsets,
1137 APInt &ConstantOffset) const;
1138 // Methods for support type inquiry through isa, cast, and dyn_cast:
1139 static bool classof(const Instruction *I) {
1140 return (I->getOpcode() == Instruction::GetElementPtr);
1141 }
1142 static bool classof(const Value *V) {
1143 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1144 }
1145};
1146
1147template <>
1148struct OperandTraits<GetElementPtrInst> :
1149 public VariadicOperandTraits<GetElementPtrInst, 1> {
1150};
1151
1152GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1153 ArrayRef<Value *> IdxList, unsigned Values,
1154 const Twine &NameStr,
1155 Instruction *InsertBefore)
1156 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1157 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1158 Values, InsertBefore),
1159 SourceElementType(PointeeType),
1160 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1161 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1162, __extension__ __PRETTY_FUNCTION__
))
1162 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1162, __extension__ __PRETTY_FUNCTION__
))
;
1163 init(Ptr, IdxList, NameStr);
1164}
1165
1166GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1167 ArrayRef<Value *> IdxList, unsigned Values,
1168 const Twine &NameStr,
1169 BasicBlock *InsertAtEnd)
1170 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1171 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1172 Values, InsertAtEnd),
1173 SourceElementType(PointeeType),
1174 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1175 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
))
1176 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
))
;
1177 init(Ptr, IdxList, NameStr);
1178}
1179
1180DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1180, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<GetElementPtrInst
>::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture
].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1180, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands
() const { return OperandTraits<GetElementPtrInst>::operands
(this); } template <int Idx_nocapture> Use &GetElementPtrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &GetElementPtrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1181
1182//===----------------------------------------------------------------------===//
1183// ICmpInst Class
1184//===----------------------------------------------------------------------===//
1185
1186/// This instruction compares its operands according to the predicate given
1187/// to the constructor. It only operates on integers or pointers. The operands
1188/// must be identical types.
1189/// Represent an integer comparison operator.
1190class ICmpInst: public CmpInst {
1191 void AssertOK() {
1192 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1193, __extension__ __PRETTY_FUNCTION__
))
1193 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1193, __extension__ __PRETTY_FUNCTION__
))
;
1194 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1195 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
;
1196 // Check that the operands are the right type
1197 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1199, __extension__ __PRETTY_FUNCTION__
))
1198 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1199, __extension__ __PRETTY_FUNCTION__
))
1199 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1199, __extension__ __PRETTY_FUNCTION__
))
;
1200 }
1201
1202protected:
1203 // Note: Instruction needs to be a friend here to call cloneImpl.
1204 friend class Instruction;
1205
1206 /// Clone an identical ICmpInst
1207 ICmpInst *cloneImpl() const;
1208
1209public:
1210 /// Constructor with insert-before-instruction semantics.
1211 ICmpInst(
1212 Instruction *InsertBefore, ///< Where to insert
1213 Predicate pred, ///< The predicate to use for the comparison
1214 Value *LHS, ///< The left-hand-side of the expression
1215 Value *RHS, ///< The right-hand-side of the expression
1216 const Twine &NameStr = "" ///< Name of the instruction
1217 ) : CmpInst(makeCmpResultType(LHS->getType()),
1218 Instruction::ICmp, pred, LHS, RHS, NameStr,
1219 InsertBefore) {
1220#ifndef NDEBUG
1221 AssertOK();
1222#endif
1223 }
1224
1225 /// Constructor with insert-at-end semantics.
1226 ICmpInst(
1227 BasicBlock &InsertAtEnd, ///< Block to insert into.
1228 Predicate pred, ///< The predicate to use for the comparison
1229 Value *LHS, ///< The left-hand-side of the expression
1230 Value *RHS, ///< The right-hand-side of the expression
1231 const Twine &NameStr = "" ///< Name of the instruction
1232 ) : CmpInst(makeCmpResultType(LHS->getType()),
1233 Instruction::ICmp, pred, LHS, RHS, NameStr,
1234 &InsertAtEnd) {
1235#ifndef NDEBUG
1236 AssertOK();
1237#endif
1238 }
1239
1240 /// Constructor with no-insertion semantics
1241 ICmpInst(
1242 Predicate pred, ///< The predicate to use for the comparison
1243 Value *LHS, ///< The left-hand-side of the expression
1244 Value *RHS, ///< The right-hand-side of the expression
1245 const Twine &NameStr = "" ///< Name of the instruction
1246 ) : CmpInst(makeCmpResultType(LHS->getType()),
1247 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1248#ifndef NDEBUG
1249 AssertOK();
1250#endif
1251 }
1252
1253 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1254 /// @returns the predicate that would be the result if the operand were
1255 /// regarded as signed.
1256 /// Return the signed version of the predicate
1257 Predicate getSignedPredicate() const {
1258 return getSignedPredicate(getPredicate());
1259 }
1260
1261 /// This is a static version that you can use without an instruction.
1262 /// Return the signed version of the predicate.
1263 static Predicate getSignedPredicate(Predicate pred);
1264
1265 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1266 /// @returns the predicate that would be the result if the operand were
1267 /// regarded as unsigned.
1268 /// Return the unsigned version of the predicate
1269 Predicate getUnsignedPredicate() const {
1270 return getUnsignedPredicate(getPredicate());
1271 }
1272
1273 /// This is a static version that you can use without an instruction.
1274 /// Return the unsigned version of the predicate.
1275 static Predicate getUnsignedPredicate(Predicate pred);
1276
1277 /// Return true if this predicate is either EQ or NE. This also
1278 /// tests for commutativity.
1279 static bool isEquality(Predicate P) {
1280 return P == ICMP_EQ || P == ICMP_NE;
1281 }
1282
1283 /// Return true if this predicate is either EQ or NE. This also
1284 /// tests for commutativity.
1285 bool isEquality() const {
1286 return isEquality(getPredicate());
1287 }
1288
1289 /// @returns true if the predicate of this ICmpInst is commutative
1290 /// Determine if this relation is commutative.
1291 bool isCommutative() const { return isEquality(); }
1292
1293 /// Return true if the predicate is relational (not EQ or NE).
1294 ///
1295 bool isRelational() const {
1296 return !isEquality();
1297 }
1298
1299 /// Return true if the predicate is relational (not EQ or NE).
1300 ///
1301 static bool isRelational(Predicate P) {
1302 return !isEquality(P);
1303 }
1304
1305 /// Return true if the predicate is SGT or UGT.
1306 ///
1307 static bool isGT(Predicate P) {
1308 return P == ICMP_SGT || P == ICMP_UGT;
1309 }
1310
1311 /// Return true if the predicate is SLT or ULT.
1312 ///
1313 static bool isLT(Predicate P) {
1314 return P == ICMP_SLT || P == ICMP_ULT;
1315 }
1316
1317 /// Return true if the predicate is SGE or UGE.
1318 ///
1319 static bool isGE(Predicate P) {
1320 return P == ICMP_SGE || P == ICMP_UGE;
1321 }
1322
1323 /// Return true if the predicate is SLE or ULE.
1324 ///
1325 static bool isLE(Predicate P) {
1326 return P == ICMP_SLE || P == ICMP_ULE;
1327 }
1328
1329 /// Returns the sequence of all ICmp predicates.
1330 ///
1331 static auto predicates() { return ICmpPredicates(); }
1332
1333 /// Exchange the two operands to this instruction in such a way that it does
1334 /// not modify the semantics of the instruction. The predicate value may be
1335 /// changed to retain the same result if the predicate is order dependent
1336 /// (e.g. ult).
1337 /// Swap operands and adjust predicate.
1338 void swapOperands() {
1339 setPredicate(getSwappedPredicate());
1340 Op<0>().swap(Op<1>());
1341 }
1342
1343 /// Return result of `LHS Pred RHS` comparison.
1344 static bool compare(const APInt &LHS, const APInt &RHS,
1345 ICmpInst::Predicate Pred);
1346
1347 // Methods for support type inquiry through isa, cast, and dyn_cast:
1348 static bool classof(const Instruction *I) {
1349 return I->getOpcode() == Instruction::ICmp;
1350 }
1351 static bool classof(const Value *V) {
1352 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1353 }
1354};
1355
1356//===----------------------------------------------------------------------===//
1357// FCmpInst Class
1358//===----------------------------------------------------------------------===//
1359
1360/// This instruction compares its operands according to the predicate given
1361/// to the constructor. It only operates on floating point values or packed
1362/// vectors of floating point values. The operands must be identical types.
1363/// Represents a floating point comparison operator.
1364class FCmpInst: public CmpInst {
1365 void AssertOK() {
1366 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1366, __extension__ __PRETTY_FUNCTION__
))
;
1367 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1368, __extension__ __PRETTY_FUNCTION__
))
1368 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1368, __extension__ __PRETTY_FUNCTION__
))
;
1369 // Check that the operands are the right type
1370 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1371, __extension__ __PRETTY_FUNCTION__
))
1371 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1371, __extension__ __PRETTY_FUNCTION__
))
;
1372 }
1373
1374protected:
1375 // Note: Instruction needs to be a friend here to call cloneImpl.
1376 friend class Instruction;
1377
1378 /// Clone an identical FCmpInst
1379 FCmpInst *cloneImpl() const;
1380
1381public:
1382 /// Constructor with insert-before-instruction semantics.
1383 FCmpInst(
1384 Instruction *InsertBefore, ///< Where to insert
1385 Predicate pred, ///< The predicate to use for the comparison
1386 Value *LHS, ///< The left-hand-side of the expression
1387 Value *RHS, ///< The right-hand-side of the expression
1388 const Twine &NameStr = "" ///< Name of the instruction
1389 ) : CmpInst(makeCmpResultType(LHS->getType()),
1390 Instruction::FCmp, pred, LHS, RHS, NameStr,
1391 InsertBefore) {
1392 AssertOK();
1393 }
1394
1395 /// Constructor with insert-at-end semantics.
1396 FCmpInst(
1397 BasicBlock &InsertAtEnd, ///< Block to insert into.
1398 Predicate pred, ///< The predicate to use for the comparison
1399 Value *LHS, ///< The left-hand-side of the expression
1400 Value *RHS, ///< The right-hand-side of the expression
1401 const Twine &NameStr = "" ///< Name of the instruction
1402 ) : CmpInst(makeCmpResultType(LHS->getType()),
1403 Instruction::FCmp, pred, LHS, RHS, NameStr,
1404 &InsertAtEnd) {
1405 AssertOK();
1406 }
1407
1408 /// Constructor with no-insertion semantics
1409 FCmpInst(
1410 Predicate Pred, ///< The predicate to use for the comparison
1411 Value *LHS, ///< The left-hand-side of the expression
1412 Value *RHS, ///< The right-hand-side of the expression
1413 const Twine &NameStr = "", ///< Name of the instruction
1414 Instruction *FlagsSource = nullptr
1415 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1416 RHS, NameStr, nullptr, FlagsSource) {
1417 AssertOK();
1418 }
1419
1420 /// @returns true if the predicate of this instruction is EQ or NE.
1421 /// Determine if this is an equality predicate.
1422 static bool isEquality(Predicate Pred) {
1423 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1424 Pred == FCMP_UNE;
1425 }
1426
1427 /// @returns true if the predicate of this instruction is EQ or NE.
1428 /// Determine if this is an equality predicate.
1429 bool isEquality() const { return isEquality(getPredicate()); }
1430
1431 /// @returns true if the predicate of this instruction is commutative.
1432 /// Determine if this is a commutative predicate.
1433 bool isCommutative() const {
1434 return isEquality() ||
1435 getPredicate() == FCMP_FALSE ||
1436 getPredicate() == FCMP_TRUE ||
1437 getPredicate() == FCMP_ORD ||
1438 getPredicate() == FCMP_UNO;
1439 }
1440
1441 /// @returns true if the predicate is relational (not EQ or NE).
1442 /// Determine if this a relational predicate.
1443 bool isRelational() const { return !isEquality(); }
1444
1445 /// Exchange the two operands to this instruction in such a way that it does
1446 /// not modify the semantics of the instruction. The predicate value may be
1447 /// changed to retain the same result if the predicate is order dependent
1448 /// (e.g. ult).
1449 /// Swap operands and adjust predicate.
1450 void swapOperands() {
1451 setPredicate(getSwappedPredicate());
1452 Op<0>().swap(Op<1>());
1453 }
1454
1455 /// Returns the sequence of all FCmp predicates.
1456 ///
1457 static auto predicates() { return FCmpPredicates(); }
1458
1459 /// Return result of `LHS Pred RHS` comparison.
1460 static bool compare(const APFloat &LHS, const APFloat &RHS,
1461 FCmpInst::Predicate Pred);
1462
1463 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1464 static bool classof(const Instruction *I) {
1465 return I->getOpcode() == Instruction::FCmp;
1466 }
1467 static bool classof(const Value *V) {
1468 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1469 }
1470};
1471
1472//===----------------------------------------------------------------------===//
1473/// This class represents a function call, abstracting a target
1474/// machine's calling convention. This class uses low bit of the SubClassData
1475/// field to indicate whether or not this is a tail call. The rest of the bits
1476/// hold the calling convention of the call.
1477///
1478class CallInst : public CallBase {
1479 CallInst(const CallInst &CI);
1480
1481 /// Construct a CallInst given a range of arguments.
1482 /// Construct a CallInst from a range of arguments
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1485 Instruction *InsertBefore);
1486
1487 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1488 const Twine &NameStr, Instruction *InsertBefore)
1489 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1490
1491 /// Construct a CallInst given a range of arguments.
1492 /// Construct a CallInst from a range of arguments
1493 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1494 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1495 BasicBlock *InsertAtEnd);
1496
1497 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1498 Instruction *InsertBefore);
1499
1500 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1501 BasicBlock *InsertAtEnd);
1502
1503 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1504 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1505 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1506
1507 /// Compute the number of operands to allocate.
1508 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1509 // We need one operand for the called function, plus the input operand
1510 // counts provided.
1511 return 1 + NumArgs + NumBundleInputs;
1512 }
1513
1514protected:
1515 // Note: Instruction needs to be a friend here to call cloneImpl.
1516 friend class Instruction;
1517
1518 CallInst *cloneImpl() const;
1519
1520public:
1521 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1522 Instruction *InsertBefore = nullptr) {
1523 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1524 }
1525
1526 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1527 const Twine &NameStr,
1528 Instruction *InsertBefore = nullptr) {
1529 return new (ComputeNumOperands(Args.size()))
1530 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1531 }
1532
1533 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1534 ArrayRef<OperandBundleDef> Bundles = None,
1535 const Twine &NameStr = "",
1536 Instruction *InsertBefore = nullptr) {
1537 const int NumOperands =
1538 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1539 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1540
1541 return new (NumOperands, DescriptorBytes)
1542 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1543 }
1544
1545 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1546 BasicBlock *InsertAtEnd) {
1547 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1548 }
1549
1550 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1551 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1552 return new (ComputeNumOperands(Args.size()))
1553 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1554 }
1555
1556 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1557 ArrayRef<OperandBundleDef> Bundles,
1558 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1559 const int NumOperands =
1560 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1561 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1562
1563 return new (NumOperands, DescriptorBytes)
1564 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1565 }
1566
1567 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1568 Instruction *InsertBefore = nullptr) {
1569 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1570 InsertBefore);
1571 }
1572
1573 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1574 ArrayRef<OperandBundleDef> Bundles = None,
1575 const Twine &NameStr = "",
1576 Instruction *InsertBefore = nullptr) {
1577 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1578 NameStr, InsertBefore);
1579 }
1580
1581 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1582 const Twine &NameStr,
1583 Instruction *InsertBefore = nullptr) {
1584 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1585 InsertBefore);
1586 }
1587
1588 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1589 BasicBlock *InsertAtEnd) {
1590 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1591 InsertAtEnd);
1592 }
1593
1594 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1595 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1596 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1597 InsertAtEnd);
1598 }
1599
1600 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1601 ArrayRef<OperandBundleDef> Bundles,
1602 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1603 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1604 NameStr, InsertAtEnd);
1605 }
1606
1607 /// Create a clone of \p CI with a different set of operand bundles and
1608 /// insert it before \p InsertPt.
1609 ///
1610 /// The returned call instruction is identical \p CI in every way except that
1611 /// the operand bundles for the new instruction are set to the operand bundles
1612 /// in \p Bundles.
1613 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1614 Instruction *InsertPt = nullptr);
1615
1616 /// Generate the IR for a call to malloc:
1617 /// 1. Compute the malloc call's argument as the specified type's size,
1618 /// possibly multiplied by the array size if the array size is not
1619 /// constant 1.
1620 /// 2. Call malloc with that argument.
1621 /// 3. Bitcast the result of the malloc call to the specified type.
1622 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1623 Type *AllocTy, Value *AllocSize,
1624 Value *ArraySize = nullptr,
1625 Function *MallocF = nullptr,
1626 const Twine &Name = "");
1627 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1628 Type *AllocTy, Value *AllocSize,
1629 Value *ArraySize = nullptr,
1630 Function *MallocF = nullptr,
1631 const Twine &Name = "");
1632 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1633 Type *AllocTy, Value *AllocSize,
1634 Value *ArraySize = nullptr,
1635 ArrayRef<OperandBundleDef> Bundles = None,
1636 Function *MallocF = nullptr,
1637 const Twine &Name = "");
1638 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1639 Type *AllocTy, Value *AllocSize,
1640 Value *ArraySize = nullptr,
1641 ArrayRef<OperandBundleDef> Bundles = None,
1642 Function *MallocF = nullptr,
1643 const Twine &Name = "");
1644 /// Generate the IR for a call to the builtin free function.
1645 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1646 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1647 static Instruction *CreateFree(Value *Source,
1648 ArrayRef<OperandBundleDef> Bundles,
1649 Instruction *InsertBefore);
1650 static Instruction *CreateFree(Value *Source,
1651 ArrayRef<OperandBundleDef> Bundles,
1652 BasicBlock *InsertAtEnd);
1653
1654 // Note that 'musttail' implies 'tail'.
1655 enum TailCallKind : unsigned {
1656 TCK_None = 0,
1657 TCK_Tail = 1,
1658 TCK_MustTail = 2,
1659 TCK_NoTail = 3,
1660 TCK_LAST = TCK_NoTail
1661 };
1662
1663 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1664 static_assert(
1665 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1666 "Bitfields must be contiguous");
1667
1668 TailCallKind getTailCallKind() const {
1669 return getSubclassData<TailCallKindField>();
1670 }
1671
1672 bool isTailCall() const {
1673 TailCallKind Kind = getTailCallKind();
1674 return Kind == TCK_Tail || Kind == TCK_MustTail;
1675 }
1676
1677 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1678
1679 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1680
1681 void setTailCallKind(TailCallKind TCK) {
1682 setSubclassData<TailCallKindField>(TCK);
1683 }
1684
1685 void setTailCall(bool IsTc = true) {
1686 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1687 }
1688
1689 /// Return true if the call can return twice
1690 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1691 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1692
1693 // Methods for support type inquiry through isa, cast, and dyn_cast:
1694 static bool classof(const Instruction *I) {
1695 return I->getOpcode() == Instruction::Call;
1696 }
1697 static bool classof(const Value *V) {
1698 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1699 }
1700
1701 /// Updates profile metadata by scaling it by \p S / \p T.
1702 void updateProfWeight(uint64_t S, uint64_t T);
1703
1704private:
1705 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1706 // method so that subclasses cannot accidentally use it.
1707 template <typename Bitfield>
1708 void setSubclassData(typename Bitfield::Type Value) {
1709 Instruction::setSubclassData<Bitfield>(Value);
1710 }
1711};
1712
1713CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1714 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1715 BasicBlock *InsertAtEnd)
1716 : CallBase(Ty->getReturnType(), Instruction::Call,
1717 OperandTraits<CallBase>::op_end(this) -
1718 (Args.size() + CountBundleInputs(Bundles) + 1),
1719 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1720 InsertAtEnd) {
1721 init(Ty, Func, Args, Bundles, NameStr);
1722}
1723
1724CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1725 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1726 Instruction *InsertBefore)
1727 : CallBase(Ty->getReturnType(), Instruction::Call,
1728 OperandTraits<CallBase>::op_end(this) -
1729 (Args.size() + CountBundleInputs(Bundles) + 1),
1730 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1731 InsertBefore) {
1732 init(Ty, Func, Args, Bundles, NameStr);
1733}
1734
1735//===----------------------------------------------------------------------===//
1736// SelectInst Class
1737//===----------------------------------------------------------------------===//
1738
1739/// This class represents the LLVM 'select' instruction.
1740///
1741class SelectInst : public Instruction {
1742 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1743 Instruction *InsertBefore)
1744 : Instruction(S1->getType(), Instruction::Select,
1745 &Op<0>(), 3, InsertBefore) {
1746 init(C, S1, S2);
1747 setName(NameStr);
1748 }
1749
1750 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1751 BasicBlock *InsertAtEnd)
1752 : Instruction(S1->getType(), Instruction::Select,
1753 &Op<0>(), 3, InsertAtEnd) {
1754 init(C, S1, S2);
1755 setName(NameStr);
1756 }
1757
1758 void init(Value *C, Value *S1, Value *S2) {
1759 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "llvm/include/llvm/IR/Instructions.h", 1759, __extension__ __PRETTY_FUNCTION__
))
;
1760 Op<0>() = C;
1761 Op<1>() = S1;
1762 Op<2>() = S2;
1763 }
1764
1765protected:
1766 // Note: Instruction needs to be a friend here to call cloneImpl.
1767 friend class Instruction;
1768
1769 SelectInst *cloneImpl() const;
1770
1771public:
1772 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1773 const Twine &NameStr = "",
1774 Instruction *InsertBefore = nullptr,
1775 Instruction *MDFrom = nullptr) {
1776 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1777 if (MDFrom)
1778 Sel->copyMetadata(*MDFrom);
1779 return Sel;
1780 }
1781
1782 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1783 const Twine &NameStr,
1784 BasicBlock *InsertAtEnd) {
1785 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1786 }
1787
1788 const Value *getCondition() const { return Op<0>(); }
1789 const Value *getTrueValue() const { return Op<1>(); }
1790 const Value *getFalseValue() const { return Op<2>(); }
1791 Value *getCondition() { return Op<0>(); }
1792 Value *getTrueValue() { return Op<1>(); }
1793 Value *getFalseValue() { return Op<2>(); }
1794
1795 void setCondition(Value *V) { Op<0>() = V; }
1796 void setTrueValue(Value *V) { Op<1>() = V; }
1797 void setFalseValue(Value *V) { Op<2>() = V; }
1798
1799 /// Swap the true and false values of the select instruction.
1800 /// This doesn't swap prof metadata.
1801 void swapValues() { Op<1>().swap(Op<2>()); }
1802
1803 /// Return a string if the specified operands are invalid
1804 /// for a select operation, otherwise return null.
1805 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1806
1807 /// Transparently provide more efficient getOperand methods.
1808 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1809
1810 OtherOps getOpcode() const {
1811 return static_cast<OtherOps>(Instruction::getOpcode());
1812 }
1813
1814 // Methods for support type inquiry through isa, cast, and dyn_cast:
1815 static bool classof(const Instruction *I) {
1816 return I->getOpcode() == Instruction::Select;
1817 }
1818 static bool classof(const Value *V) {
1819 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1820 }
1821};
1822
1823template <>
1824struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1825};
1826
1827DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1827, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this))[i_nocapture
].get()); } void SelectInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SelectInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1827, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1828
1829//===----------------------------------------------------------------------===//
1830// VAArgInst Class
1831//===----------------------------------------------------------------------===//
1832
1833/// This class represents the va_arg llvm instruction, which returns
1834/// an argument of the specified type given a va_list and increments that list
1835///
1836class VAArgInst : public UnaryInstruction {
1837protected:
1838 // Note: Instruction needs to be a friend here to call cloneImpl.
1839 friend class Instruction;
1840
1841 VAArgInst *cloneImpl() const;
1842
1843public:
1844 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1845 Instruction *InsertBefore = nullptr)
1846 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1847 setName(NameStr);
1848 }
1849
1850 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1851 BasicBlock *InsertAtEnd)
1852 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1853 setName(NameStr);
1854 }
1855
1856 Value *getPointerOperand() { return getOperand(0); }
1857 const Value *getPointerOperand() const { return getOperand(0); }
1858 static unsigned getPointerOperandIndex() { return 0U; }
1859
1860 // Methods for support type inquiry through isa, cast, and dyn_cast:
1861 static bool classof(const Instruction *I) {
1862 return I->getOpcode() == VAArg;
1863 }
1864 static bool classof(const Value *V) {
1865 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1866 }
1867};
1868
1869//===----------------------------------------------------------------------===//
1870// ExtractElementInst Class
1871//===----------------------------------------------------------------------===//
1872
1873/// This instruction extracts a single (scalar)
1874/// element from a VectorType value
1875///
1876class ExtractElementInst : public Instruction {
1877 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1878 Instruction *InsertBefore = nullptr);
1879 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1880 BasicBlock *InsertAtEnd);
1881
1882protected:
1883 // Note: Instruction needs to be a friend here to call cloneImpl.
1884 friend class Instruction;
1885
1886 ExtractElementInst *cloneImpl() const;
1887
1888public:
1889 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1890 const Twine &NameStr = "",
1891 Instruction *InsertBefore = nullptr) {
1892 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1893 }
1894
1895 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1896 const Twine &NameStr,
1897 BasicBlock *InsertAtEnd) {
1898 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1899 }
1900
1901 /// Return true if an extractelement instruction can be
1902 /// formed with the specified operands.
1903 static bool isValidOperands(const Value *Vec, const Value *Idx);
1904
1905 Value *getVectorOperand() { return Op<0>(); }
1906 Value *getIndexOperand() { return Op<1>(); }
1907 const Value *getVectorOperand() const { return Op<0>(); }
1908 const Value *getIndexOperand() const { return Op<1>(); }
1909
1910 VectorType *getVectorOperandType() const {
1911 return cast<VectorType>(getVectorOperand()->getType());
1912 }
1913
1914 /// Transparently provide more efficient getOperand methods.
1915 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1916
1917 // Methods for support type inquiry through isa, cast, and dyn_cast:
1918 static bool classof(const Instruction *I) {
1919 return I->getOpcode() == Instruction::ExtractElement;
1920 }
1921 static bool classof(const Value *V) {
1922 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1923 }
1924};
1925
1926template <>
1927struct OperandTraits<ExtractElementInst> :
1928 public FixedNumOperandTraits<ExtractElementInst, 2> {
1929};
1930
1931DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1931, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture
].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1931, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands
() const { return OperandTraits<ExtractElementInst>::operands
(this); } template <int Idx_nocapture> Use &ExtractElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ExtractElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1932
1933//===----------------------------------------------------------------------===//
1934// InsertElementInst Class
1935//===----------------------------------------------------------------------===//
1936
1937/// This instruction inserts a single (scalar)
1938/// element into a VectorType value
1939///
1940class InsertElementInst : public Instruction {
1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1942 const Twine &NameStr = "",
1943 Instruction *InsertBefore = nullptr);
1944 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1945 BasicBlock *InsertAtEnd);
1946
1947protected:
1948 // Note: Instruction needs to be a friend here to call cloneImpl.
1949 friend class Instruction;
1950
1951 InsertElementInst *cloneImpl() const;
1952
1953public:
1954 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1955 const Twine &NameStr = "",
1956 Instruction *InsertBefore = nullptr) {
1957 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1958 }
1959
1960 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1961 const Twine &NameStr,
1962 BasicBlock *InsertAtEnd) {
1963 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1964 }
1965
1966 /// Return true if an insertelement instruction can be
1967 /// formed with the specified operands.
1968 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1969 const Value *Idx);
1970
1971 /// Overload to return most specific vector type.
1972 ///
1973 VectorType *getType() const {
1974 return cast<VectorType>(Instruction::getType());
1975 }
1976
1977 /// Transparently provide more efficient getOperand methods.
1978 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1979
1980 // Methods for support type inquiry through isa, cast, and dyn_cast:
1981 static bool classof(const Instruction *I) {
1982 return I->getOpcode() == Instruction::InsertElement;
1983 }
1984 static bool classof(const Value *V) {
1985 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1986 }
1987};
1988
1989template <>
1990struct OperandTraits<InsertElementInst> :
1991 public FixedNumOperandTraits<InsertElementInst, 3> {
1992};
1993
1994DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1994, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertElementInst
>::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture
].get()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1994, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertElementInst::getNumOperands
() const { return OperandTraits<InsertElementInst>::operands
(this); } template <int Idx_nocapture> Use &InsertElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1995
1996//===----------------------------------------------------------------------===//
1997// ShuffleVectorInst Class
1998//===----------------------------------------------------------------------===//
1999
2000constexpr int UndefMaskElem = -1;
2001
2002/// This instruction constructs a fixed permutation of two
2003/// input vectors.
2004///
2005/// For each element of the result vector, the shuffle mask selects an element
2006/// from one of the input vectors to copy to the result. Non-negative elements
2007/// in the mask represent an index into the concatenated pair of input vectors.
2008/// UndefMaskElem (-1) specifies that the result element is undefined.
2009///
2010/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2011/// requirement may be relaxed in the future.
2012class ShuffleVectorInst : public Instruction {
2013 SmallVector<int, 4> ShuffleMask;
2014 Constant *ShuffleMaskForBitcode;
2015
2016protected:
2017 // Note: Instruction needs to be a friend here to call cloneImpl.
2018 friend class Instruction;
2019
2020 ShuffleVectorInst *cloneImpl() const;
2021
2022public:
2023 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2024 Instruction *InsertBefore = nullptr);
2025 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2026 BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2028 Instruction *InsertBefore = nullptr);
2029 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2030 BasicBlock *InsertAtEnd);
2031 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2032 const Twine &NameStr = "",
2033 Instruction *InsertBefor = nullptr);
2034 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2035 const Twine &NameStr, BasicBlock *InsertAtEnd);
2036 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2037 const Twine &NameStr = "",
2038 Instruction *InsertBefor = nullptr);
2039 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2040 const Twine &NameStr, BasicBlock *InsertAtEnd);
2041
2042 void *operator new(size_t S) { return User::operator new(S, 2); }
2043 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2044
2045 /// Swap the operands and adjust the mask to preserve the semantics
2046 /// of the instruction.
2047 void commute();
2048
2049 /// Return true if a shufflevector instruction can be
2050 /// formed with the specified operands.
2051 static bool isValidOperands(const Value *V1, const Value *V2,
2052 const Value *Mask);
2053 static bool isValidOperands(const Value *V1, const Value *V2,
2054 ArrayRef<int> Mask);
2055
2056 /// Overload to return most specific vector type.
2057 ///
2058 VectorType *getType() const {
2059 return cast<VectorType>(Instruction::getType());
2060 }
2061
2062 /// Transparently provide more efficient getOperand methods.
2063 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2064
2065 /// Return the shuffle mask value of this instruction for the given element
2066 /// index. Return UndefMaskElem if the element is undef.
2067 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2068
2069 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2070 /// elements of the mask are returned as UndefMaskElem.
2071 static void getShuffleMask(const Constant *Mask,
2072 SmallVectorImpl<int> &Result);
2073
2074 /// Return the mask for this instruction as a vector of integers. Undefined
2075 /// elements of the mask are returned as UndefMaskElem.
2076 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2077 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2078 }
2079
2080 /// Return the mask for this instruction, for use in bitcode.
2081 ///
2082 /// TODO: This is temporary until we decide a new bitcode encoding for
2083 /// shufflevector.
2084 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2085
2086 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2087 Type *ResultTy);
2088
2089 void setShuffleMask(ArrayRef<int> Mask);
2090
2091 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2092
2093 /// Return true if this shuffle returns a vector with a different number of
2094 /// elements than its source vectors.
2095 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2096 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2097 bool changesLength() const {
2098 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2099 ->getElementCount()
2100 .getKnownMinValue();
2101 unsigned NumMaskElts = ShuffleMask.size();
2102 return NumSourceElts != NumMaskElts;
2103 }
2104
2105 /// Return true if this shuffle returns a vector with a greater number of
2106 /// elements than its source vectors.
2107 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2108 bool increasesLength() const {
2109 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2110 ->getElementCount()
2111 .getKnownMinValue();
2112 unsigned NumMaskElts = ShuffleMask.size();
2113 return NumSourceElts < NumMaskElts;
2114 }
2115
2116 /// Return true if this shuffle mask chooses elements from exactly one source
2117 /// vector.
2118 /// Example: <7,5,undef,7>
2119 /// This assumes that vector operands are the same length as the mask.
2120 static bool isSingleSourceMask(ArrayRef<int> Mask);
2121 static bool isSingleSourceMask(const Constant *Mask) {
2122 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2122, __extension__ __PRETTY_FUNCTION__
))
;
2123 SmallVector<int, 16> MaskAsInts;
2124 getShuffleMask(Mask, MaskAsInts);
2125 return isSingleSourceMask(MaskAsInts);
2126 }
2127
2128 /// Return true if this shuffle chooses elements from exactly one source
2129 /// vector without changing the length of that vector.
2130 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2131 /// TODO: Optionally allow length-changing shuffles.
2132 bool isSingleSource() const {
2133 return !changesLength() && isSingleSourceMask(ShuffleMask);
2134 }
2135
2136 /// Return true if this shuffle mask chooses elements from exactly one source
2137 /// vector without lane crossings. A shuffle using this mask is not
2138 /// necessarily a no-op because it may change the number of elements from its
2139 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2140 /// Example: <undef,undef,2,3>
2141 static bool isIdentityMask(ArrayRef<int> Mask);
2142 static bool isIdentityMask(const Constant *Mask) {
2143 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2143, __extension__ __PRETTY_FUNCTION__
))
;
2144 SmallVector<int, 16> MaskAsInts;
2145 getShuffleMask(Mask, MaskAsInts);
2146 return isIdentityMask(MaskAsInts);
2147 }
2148
2149 /// Return true if this shuffle chooses elements from exactly one source
2150 /// vector without lane crossings and does not change the number of elements
2151 /// from its input vectors.
2152 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2153 bool isIdentity() const {
2154 return !changesLength() && isIdentityMask(ShuffleMask);
2155 }
2156
2157 /// Return true if this shuffle lengthens exactly one source vector with
2158 /// undefs in the high elements.
2159 bool isIdentityWithPadding() const;
2160
2161 /// Return true if this shuffle extracts the first N elements of exactly one
2162 /// source vector.
2163 bool isIdentityWithExtract() const;
2164
2165 /// Return true if this shuffle concatenates its 2 source vectors. This
2166 /// returns false if either input is undefined. In that case, the shuffle is
2167 /// is better classified as an identity with padding operation.
2168 bool isConcat() const;
2169
2170 /// Return true if this shuffle mask chooses elements from its source vectors
2171 /// without lane crossings. A shuffle using this mask would be
2172 /// equivalent to a vector select with a constant condition operand.
2173 /// Example: <4,1,6,undef>
2174 /// This returns false if the mask does not choose from both input vectors.
2175 /// In that case, the shuffle is better classified as an identity shuffle.
2176 /// This assumes that vector operands are the same length as the mask
2177 /// (a length-changing shuffle can never be equivalent to a vector select).
2178 static bool isSelectMask(ArrayRef<int> Mask);
2179 static bool isSelectMask(const Constant *Mask) {
2180 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2180, __extension__ __PRETTY_FUNCTION__
))
;
2181 SmallVector<int, 16> MaskAsInts;
2182 getShuffleMask(Mask, MaskAsInts);
2183 return isSelectMask(MaskAsInts);
2184 }
2185
2186 /// Return true if this shuffle chooses elements from its source vectors
2187 /// without lane crossings and all operands have the same number of elements.
2188 /// In other words, this shuffle is equivalent to a vector select with a
2189 /// constant condition operand.
2190 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2191 /// This returns false if the mask does not choose from both input vectors.
2192 /// In that case, the shuffle is better classified as an identity shuffle.
2193 /// TODO: Optionally allow length-changing shuffles.
2194 bool isSelect() const {
2195 return !changesLength() && isSelectMask(ShuffleMask);
2196 }
2197
2198 /// Return true if this shuffle mask swaps the order of elements from exactly
2199 /// one source vector.
2200 /// Example: <7,6,undef,4>
2201 /// This assumes that vector operands are the same length as the mask.
2202 static bool isReverseMask(ArrayRef<int> Mask);
2203 static bool isReverseMask(const Constant *Mask) {
2204 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2204, __extension__ __PRETTY_FUNCTION__
))
;
2205 SmallVector<int, 16> MaskAsInts;
2206 getShuffleMask(Mask, MaskAsInts);
2207 return isReverseMask(MaskAsInts);
2208 }
2209
2210 /// Return true if this shuffle swaps the order of elements from exactly
2211 /// one source vector.
2212 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2213 /// TODO: Optionally allow length-changing shuffles.
2214 bool isReverse() const {
2215 return !changesLength() && isReverseMask(ShuffleMask);
2216 }
2217
2218 /// Return true if this shuffle mask chooses all elements with the same value
2219 /// as the first element of exactly one source vector.
2220 /// Example: <4,undef,undef,4>
2221 /// This assumes that vector operands are the same length as the mask.
2222 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2223 static bool isZeroEltSplatMask(const Constant *Mask) {
2224 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2224, __extension__ __PRETTY_FUNCTION__
))
;
2225 SmallVector<int, 16> MaskAsInts;
2226 getShuffleMask(Mask, MaskAsInts);
2227 return isZeroEltSplatMask(MaskAsInts);
2228 }
2229
2230 /// Return true if all elements of this shuffle are the same value as the
2231 /// first element of exactly one source vector without changing the length
2232 /// of that vector.
2233 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2234 /// TODO: Optionally allow length-changing shuffles.
2235 /// TODO: Optionally allow splats from other elements.
2236 bool isZeroEltSplat() const {
2237 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2238 }
2239
2240 /// Return true if this shuffle mask is a transpose mask.
2241 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2242 /// even- or odd-numbered vector elements from two n-dimensional source
2243 /// vectors and write each result into consecutive elements of an
2244 /// n-dimensional destination vector. Two shuffles are necessary to complete
2245 /// the transpose, one for the even elements and another for the odd elements.
2246 /// This description closely follows how the TRN1 and TRN2 AArch64
2247 /// instructions operate.
2248 ///
2249 /// For example, a simple 2x2 matrix can be transposed with:
2250 ///
2251 /// ; Original matrix
2252 /// m0 = < a, b >
2253 /// m1 = < c, d >
2254 ///
2255 /// ; Transposed matrix
2256 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2257 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2258 ///
2259 /// For matrices having greater than n columns, the resulting nx2 transposed
2260 /// matrix is stored in two result vectors such that one vector contains
2261 /// interleaved elements from all the even-numbered rows and the other vector
2262 /// contains interleaved elements from all the odd-numbered rows. For example,
2263 /// a 2x4 matrix can be transposed with:
2264 ///
2265 /// ; Original matrix
2266 /// m0 = < a, b, c, d >
2267 /// m1 = < e, f, g, h >
2268 ///
2269 /// ; Transposed matrix
2270 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2271 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2272 static bool isTransposeMask(ArrayRef<int> Mask);
2273 static bool isTransposeMask(const Constant *Mask) {
2274 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2274, __extension__ __PRETTY_FUNCTION__
))
;
2275 SmallVector<int, 16> MaskAsInts;
2276 getShuffleMask(Mask, MaskAsInts);
2277 return isTransposeMask(MaskAsInts);
2278 }
2279
2280 /// Return true if this shuffle transposes the elements of its inputs without
2281 /// changing the length of the vectors. This operation may also be known as a
2282 /// merge or interleave. See the description for isTransposeMask() for the
2283 /// exact specification.
2284 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2285 bool isTranspose() const {
2286 return !changesLength() && isTransposeMask(ShuffleMask);
2287 }
2288
2289 /// Return true if this shuffle mask is an extract subvector mask.
2290 /// A valid extract subvector mask returns a smaller vector from a single
2291 /// source operand. The base extraction index is returned as well.
2292 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2293 int &Index);
2294 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2295 int &Index) {
2296 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2296, __extension__ __PRETTY_FUNCTION__
))
;
2297 // Not possible to express a shuffle mask for a scalable vector for this
2298 // case.
2299 if (isa<ScalableVectorType>(Mask->getType()))
2300 return false;
2301 SmallVector<int, 16> MaskAsInts;
2302 getShuffleMask(Mask, MaskAsInts);
2303 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2304 }
2305
2306 /// Return true if this shuffle mask is an extract subvector mask.
2307 bool isExtractSubvectorMask(int &Index) const {
2308 // Not possible to express a shuffle mask for a scalable vector for this
2309 // case.
2310 if (isa<ScalableVectorType>(getType()))
2311 return false;
2312
2313 int NumSrcElts =
2314 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2315 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2316 }
2317
2318 /// Return true if this shuffle mask is an insert subvector mask.
2319 /// A valid insert subvector mask inserts the lowest elements of a second
2320 /// source operand into an in-place first source operand operand.
2321 /// Both the sub vector width and the insertion index is returned.
2322 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2323 int &NumSubElts, int &Index);
2324 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2325 int &NumSubElts, int &Index) {
2326 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2326, __extension__ __PRETTY_FUNCTION__
))
;
2327 // Not possible to express a shuffle mask for a scalable vector for this
2328 // case.
2329 if (isa<ScalableVectorType>(Mask->getType()))
2330 return false;
2331 SmallVector<int, 16> MaskAsInts;
2332 getShuffleMask(Mask, MaskAsInts);
2333 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2334 }
2335
2336 /// Return true if this shuffle mask is an insert subvector mask.
2337 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2338 // Not possible to express a shuffle mask for a scalable vector for this
2339 // case.
2340 if (isa<ScalableVectorType>(getType()))
2341 return false;
2342
2343 int NumSrcElts =
2344 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2345 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2346 }
2347
2348 /// Return true if this shuffle mask replicates each of the \p VF elements
2349 /// in a vector \p ReplicationFactor times.
2350 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2351 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2352 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2353 int &VF);
2354 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2355 int &VF) {
2356 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2356, __extension__ __PRETTY_FUNCTION__
))
;
2357 // Not possible to express a shuffle mask for a scalable vector for this
2358 // case.
2359 if (isa<ScalableVectorType>(Mask->getType()))
2360 return false;
2361 SmallVector<int, 16> MaskAsInts;
2362 getShuffleMask(Mask, MaskAsInts);
2363 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2364 }
2365
2366 /// Return true if this shuffle mask is a replication mask.
2367 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2368
2369 /// Change values in a shuffle permute mask assuming the two vector operands
2370 /// of length InVecNumElts have swapped position.
2371 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2372 unsigned InVecNumElts) {
2373 for (int &Idx : Mask) {
2374 if (Idx == -1)
2375 continue;
2376 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2377 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2378, __extension__ __PRETTY_FUNCTION__
))
2378 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2378, __extension__ __PRETTY_FUNCTION__
))
;
2379 }
2380 }
2381
2382 // Methods for support type inquiry through isa, cast, and dyn_cast:
2383 static bool classof(const Instruction *I) {
2384 return I->getOpcode() == Instruction::ShuffleVector;
2385 }
2386 static bool classof(const Value *V) {
2387 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2388 }
2389};
2390
2391template <>
2392struct OperandTraits<ShuffleVectorInst>
2393 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2394
2395DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2395, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst
>::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture
].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2395, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands
() const { return OperandTraits<ShuffleVectorInst>::operands
(this); } template <int Idx_nocapture> Use &ShuffleVectorInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ShuffleVectorInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2396
2397//===----------------------------------------------------------------------===//
2398// ExtractValueInst Class
2399//===----------------------------------------------------------------------===//
2400
2401/// This instruction extracts a struct member or array
2402/// element value from an aggregate value.
2403///
2404class ExtractValueInst : public UnaryInstruction {
2405 SmallVector<unsigned, 4> Indices;
2406
2407 ExtractValueInst(const ExtractValueInst &EVI);
2408
2409 /// Constructors - Create a extractvalue instruction with a base aggregate
2410 /// value and a list of indices. The first ctor can optionally insert before
2411 /// an existing instruction, the second appends the new instruction to the
2412 /// specified BasicBlock.
2413 inline ExtractValueInst(Value *Agg,
2414 ArrayRef<unsigned> Idxs,
2415 const Twine &NameStr,
2416 Instruction *InsertBefore);
2417 inline ExtractValueInst(Value *Agg,
2418 ArrayRef<unsigned> Idxs,
2419 const Twine &NameStr, BasicBlock *InsertAtEnd);
2420
2421 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2422
2423protected:
2424 // Note: Instruction needs to be a friend here to call cloneImpl.
2425 friend class Instruction;
2426
2427 ExtractValueInst *cloneImpl() const;
2428
2429public:
2430 static ExtractValueInst *Create(Value *Agg,
2431 ArrayRef<unsigned> Idxs,
2432 const Twine &NameStr = "",
2433 Instruction *InsertBefore = nullptr) {
2434 return new
2435 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2436 }
2437
2438 static ExtractValueInst *Create(Value *Agg,
2439 ArrayRef<unsigned> Idxs,
2440 const Twine &NameStr,
2441 BasicBlock *InsertAtEnd) {
2442 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2443 }
2444
2445 /// Returns the type of the element that would be extracted
2446 /// with an extractvalue instruction with the specified parameters.
2447 ///
2448 /// Null is returned if the indices are invalid for the specified type.
2449 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2450
2451 using idx_iterator = const unsigned*;
2452
2453 inline idx_iterator idx_begin() const { return Indices.begin(); }
2454 inline idx_iterator idx_end() const { return Indices.end(); }
2455 inline iterator_range<idx_iterator> indices() const {
2456 return make_range(idx_begin(), idx_end());
2457 }
2458
2459 Value *getAggregateOperand() {
2460 return getOperand(0);
2461 }
2462 const Value *getAggregateOperand() const {
2463 return getOperand(0);
2464 }
2465 static unsigned getAggregateOperandIndex() {
2466 return 0U; // get index for modifying correct operand
2467 }
2468
2469 ArrayRef<unsigned> getIndices() const {
2470 return Indices;
2471 }
2472
2473 unsigned getNumIndices() const {
2474 return (unsigned)Indices.size();
2475 }
2476
2477 bool hasIndices() const {
2478 return true;
2479 }
2480
2481 // Methods for support type inquiry through isa, cast, and dyn_cast:
2482 static bool classof(const Instruction *I) {
2483 return I->getOpcode() == Instruction::ExtractValue;
2484 }
2485 static bool classof(const Value *V) {
2486 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2487 }
2488};
2489
2490ExtractValueInst::ExtractValueInst(Value *Agg,
2491 ArrayRef<unsigned> Idxs,
2492 const Twine &NameStr,
2493 Instruction *InsertBefore)
2494 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2495 ExtractValue, Agg, InsertBefore) {
2496 init(Idxs, NameStr);
2497}
2498
2499ExtractValueInst::ExtractValueInst(Value *Agg,
2500 ArrayRef<unsigned> Idxs,
2501 const Twine &NameStr,
2502 BasicBlock *InsertAtEnd)
2503 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2504 ExtractValue, Agg, InsertAtEnd) {
2505 init(Idxs, NameStr);
2506}
2507
2508//===----------------------------------------------------------------------===//
2509// InsertValueInst Class
2510//===----------------------------------------------------------------------===//
2511
2512/// This instruction inserts a struct field of array element
2513/// value into an aggregate value.
2514///
2515class InsertValueInst : public Instruction {
2516 SmallVector<unsigned, 4> Indices;
2517
2518 InsertValueInst(const InsertValueInst &IVI);
2519
2520 /// Constructors - Create a insertvalue instruction with a base aggregate
2521 /// value, a value to insert, and a list of indices. The first ctor can
2522 /// optionally insert before an existing instruction, the second appends
2523 /// the new instruction to the specified BasicBlock.
2524 inline InsertValueInst(Value *Agg, Value *Val,
2525 ArrayRef<unsigned> Idxs,
2526 const Twine &NameStr,
2527 Instruction *InsertBefore);
2528 inline InsertValueInst(Value *Agg, Value *Val,
2529 ArrayRef<unsigned> Idxs,
2530 const Twine &NameStr, BasicBlock *InsertAtEnd);
2531
2532 /// Constructors - These two constructors are convenience methods because one
2533 /// and two index insertvalue instructions are so common.
2534 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2535 const Twine &NameStr = "",
2536 Instruction *InsertBefore = nullptr);
2537 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2538 BasicBlock *InsertAtEnd);
2539
2540 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2541 const Twine &NameStr);
2542
2543protected:
2544 // Note: Instruction needs to be a friend here to call cloneImpl.
2545 friend class Instruction;
2546
2547 InsertValueInst *cloneImpl() const;
2548
2549public:
2550 // allocate space for exactly two operands
2551 void *operator new(size_t S) { return User::operator new(S, 2); }
2552 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2553
2554 static InsertValueInst *Create(Value *Agg, Value *Val,
2555 ArrayRef<unsigned> Idxs,
2556 const Twine &NameStr = "",
2557 Instruction *InsertBefore = nullptr) {
2558 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2559 }
2560
2561 static InsertValueInst *Create(Value *Agg, Value *Val,
2562 ArrayRef<unsigned> Idxs,
2563 const Twine &NameStr,
2564 BasicBlock *InsertAtEnd) {
2565 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2566 }
2567
2568 /// Transparently provide more efficient getOperand methods.
2569 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2570
2571 using idx_iterator = const unsigned*;
2572
2573 inline idx_iterator idx_begin() const { return Indices.begin(); }
2574 inline idx_iterator idx_end() const { return Indices.end(); }
2575 inline iterator_range<idx_iterator> indices() const {
2576 return make_range(idx_begin(), idx_end());
2577 }
2578
2579 Value *getAggregateOperand() {
2580 return getOperand(0);
2581 }
2582 const Value *getAggregateOperand() const {
2583 return getOperand(0);
2584 }
2585 static unsigned getAggregateOperandIndex() {
2586 return 0U; // get index for modifying correct operand
2587 }
2588
2589 Value *getInsertedValueOperand() {
2590 return getOperand(1);
2591 }
2592 const Value *getInsertedValueOperand() const {
2593 return getOperand(1);
2594 }
2595 static unsigned getInsertedValueOperandIndex() {
2596 return 1U; // get index for modifying correct operand
2597 }
2598
2599 ArrayRef<unsigned> getIndices() const {
2600 return Indices;
2601 }
2602
2603 unsigned getNumIndices() const {
2604 return (unsigned)Indices.size();
2605 }
2606
2607 bool hasIndices() const {
2608 return true;
2609 }
2610
2611 // Methods for support type inquiry through isa, cast, and dyn_cast:
2612 static bool classof(const Instruction *I) {
2613 return I->getOpcode() == Instruction::InsertValue;
2614 }
2615 static bool classof(const Value *V) {
2616 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2617 }
2618};
2619
2620template <>
2621struct OperandTraits<InsertValueInst> :
2622 public FixedNumOperandTraits<InsertValueInst, 2> {
2623};
2624
2625InsertValueInst::InsertValueInst(Value *Agg,
2626 Value *Val,
2627 ArrayRef<unsigned> Idxs,
2628 const Twine &NameStr,
2629 Instruction *InsertBefore)
2630 : Instruction(Agg->getType(), InsertValue,
2631 OperandTraits<InsertValueInst>::op_begin(this),
2632 2, InsertBefore) {
2633 init(Agg, Val, Idxs, NameStr);
2634}
2635
2636InsertValueInst::InsertValueInst(Value *Agg,
2637 Value *Val,
2638 ArrayRef<unsigned> Idxs,
2639 const Twine &NameStr,
2640 BasicBlock *InsertAtEnd)
2641 : Instruction(Agg->getType(), InsertValue,
2642 OperandTraits<InsertValueInst>::op_begin(this),
2643 2, InsertAtEnd) {
2644 init(Agg, Val, Idxs, NameStr);
2645}
2646
2647DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2647, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertValueInst
>::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture
].get()); } void InsertValueInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2647, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertValueInst::getNumOperands
() const { return OperandTraits<InsertValueInst>::operands
(this); } template <int Idx_nocapture> Use &InsertValueInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertValueInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2648
2649//===----------------------------------------------------------------------===//
2650// PHINode Class
2651//===----------------------------------------------------------------------===//
2652
2653// PHINode - The PHINode class is used to represent the magical mystical PHI
2654// node, that can not exist in nature, but can be synthesized in a computer
2655// scientist's overactive imagination.
2656//
2657class PHINode : public Instruction {
2658 /// The number of operands actually allocated. NumOperands is
2659 /// the number actually in use.
2660 unsigned ReservedSpace;
2661
2662 PHINode(const PHINode &PN);
2663
2664 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2665 const Twine &NameStr = "",
2666 Instruction *InsertBefore = nullptr)
2667 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2668 ReservedSpace(NumReservedValues) {
2669 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2669, __extension__ __PRETTY_FUNCTION__
))
;
2670 setName(NameStr);
2671 allocHungoffUses(ReservedSpace);
2672 }
2673
2674 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2675 BasicBlock *InsertAtEnd)
2676 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2677 ReservedSpace(NumReservedValues) {
2678 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2678, __extension__ __PRETTY_FUNCTION__
))
;
31
Called C++ object pointer is null
2679 setName(NameStr);
2680 allocHungoffUses(ReservedSpace);
2681 }
2682
2683protected:
2684 // Note: Instruction needs to be a friend here to call cloneImpl.
2685 friend class Instruction;
2686
2687 PHINode *cloneImpl() const;
2688
2689 // allocHungoffUses - this is more complicated than the generic
2690 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2691 // values and pointers to the incoming blocks, all in one allocation.
2692 void allocHungoffUses(unsigned N) {
2693 User::allocHungoffUses(N, /* IsPhi */ true);
2694 }
2695
2696public:
2697 /// Constructors - NumReservedValues is a hint for the number of incoming
2698 /// edges that this phi node will have (use 0 if you really have no idea).
2699 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2700 const Twine &NameStr = "",
2701 Instruction *InsertBefore = nullptr) {
2702 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2703 }
2704
2705 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2706 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2707 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
29
Passing null pointer value via 1st parameter 'Ty'
30
Calling constructor for 'PHINode'
2708 }
2709
2710 /// Provide fast operand accessors
2711 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2712
2713 // Block iterator interface. This provides access to the list of incoming
2714 // basic blocks, which parallels the list of incoming values.
2715
2716 using block_iterator = BasicBlock **;
2717 using const_block_iterator = BasicBlock * const *;
2718
2719 block_iterator block_begin() {
2720 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2721 }
2722
2723 const_block_iterator block_begin() const {
2724 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2725 }
2726
2727 block_iterator block_end() {
2728 return block_begin() + getNumOperands();
2729 }
2730
2731 const_block_iterator block_end() const {
2732 return block_begin() + getNumOperands();
2733 }
2734
2735 iterator_range<block_iterator> blocks() {
2736 return make_range(block_begin(), block_end());
2737 }
2738
2739 iterator_range<const_block_iterator> blocks() const {
2740 return make_range(block_begin(), block_end());
2741 }
2742
2743 op_range incoming_values() { return operands(); }
2744
2745 const_op_range incoming_values() const { return operands(); }
2746
2747 /// Return the number of incoming edges
2748 ///
2749 unsigned getNumIncomingValues() const { return getNumOperands(); }
2750
2751 /// Return incoming value number x
2752 ///
2753 Value *getIncomingValue(unsigned i) const {
2754 return getOperand(i);
2755 }
2756 void setIncomingValue(unsigned i, Value *V) {
2757 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "llvm/include/llvm/IR/Instructions.h", 2757, __extension__ __PRETTY_FUNCTION__
))
;
2758 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2759, __extension__ __PRETTY_FUNCTION__
))
2759 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2759, __extension__ __PRETTY_FUNCTION__
))
;
2760 setOperand(i, V);
2761 }
2762
2763 static unsigned getOperandNumForIncomingValue(unsigned i) {
2764 return i;
2765 }
2766
2767 static unsigned getIncomingValueNumForOperand(unsigned i) {
2768 return i;
2769 }
2770
2771 /// Return incoming basic block number @p i.
2772 ///
2773 BasicBlock *getIncomingBlock(unsigned i) const {
2774 return block_begin()[i];
2775 }
2776
2777 /// Return incoming basic block corresponding
2778 /// to an operand of the PHI.
2779 ///
2780 BasicBlock *getIncomingBlock(const Use &U) const {
2781 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "llvm/include/llvm/IR/Instructions.h", 2781, __extension__ __PRETTY_FUNCTION__
))
;
2782 return getIncomingBlock(unsigned(&U - op_begin()));
2783 }
2784
2785 /// Return incoming basic block corresponding
2786 /// to value use iterator.
2787 ///
2788 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2789 return getIncomingBlock(I.getUse());
2790 }
2791
2792 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2793 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2793, __extension__ __PRETTY_FUNCTION__
))
;
2794 block_begin()[i] = BB;
2795 }
2796
2797 /// Replace every incoming basic block \p Old to basic block \p New.
2798 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2799 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2799, __extension__ __PRETTY_FUNCTION__
))
;
2800 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2801 if (getIncomingBlock(Op) == Old)
2802 setIncomingBlock(Op, New);
2803 }
2804
2805 /// Add an incoming value to the end of the PHI list
2806 ///
2807 void addIncoming(Value *V, BasicBlock *BB) {
2808 if (getNumOperands() == ReservedSpace)
2809 growOperands(); // Get more space!
2810 // Initialize some new operands.
2811 setNumHungOffUseOperands(getNumOperands() + 1);
2812 setIncomingValue(getNumOperands() - 1, V);
2813 setIncomingBlock(getNumOperands() - 1, BB);
2814 }
2815
2816 /// Remove an incoming value. This is useful if a
2817 /// predecessor basic block is deleted. The value removed is returned.
2818 ///
2819 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2820 /// is true), the PHI node is destroyed and any uses of it are replaced with
2821 /// dummy values. The only time there should be zero incoming values to a PHI
2822 /// node is when the block is dead, so this strategy is sound.
2823 ///
2824 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2825
2826 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2827 int Idx = getBasicBlockIndex(BB);
2828 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "llvm/include/llvm/IR/Instructions.h", 2828, __extension__ __PRETTY_FUNCTION__
))
;
2829 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2830 }
2831
2832 /// Return the first index of the specified basic
2833 /// block in the value list for this PHI. Returns -1 if no instance.
2834 ///
2835 int getBasicBlockIndex(const BasicBlock *BB) const {
2836 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2837 if (block_begin()[i] == BB)
2838 return i;
2839 return -1;
2840 }
2841
2842 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2843 int Idx = getBasicBlockIndex(BB);
2844 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "llvm/include/llvm/IR/Instructions.h", 2844, __extension__ __PRETTY_FUNCTION__
))
;
2845 return getIncomingValue(Idx);
2846 }
2847
2848 /// Set every incoming value(s) for block \p BB to \p V.
2849 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2850 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2850, __extension__ __PRETTY_FUNCTION__
))
;
2851 bool Found = false;
2852 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2853 if (getIncomingBlock(Op) == BB) {
2854 Found = true;
2855 setIncomingValue(Op, V);
2856 }
2857 (void)Found;
2858 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "llvm/include/llvm/IR/Instructions.h", 2858, __extension__ __PRETTY_FUNCTION__
))
;
2859 }
2860
2861 /// If the specified PHI node always merges together the
2862 /// same value, return the value, otherwise return null.
2863 Value *hasConstantValue() const;
2864
2865 /// Whether the specified PHI node always merges
2866 /// together the same value, assuming undefs are equal to a unique
2867 /// non-undef value.
2868 bool hasConstantOrUndefValue() const;
2869
2870 /// If the PHI node is complete which means all of its parent's predecessors
2871 /// have incoming value in this PHI, return true, otherwise return false.
2872 bool isComplete() const {
2873 return llvm::all_of(predecessors(getParent()),
2874 [this](const BasicBlock *Pred) {
2875 return getBasicBlockIndex(Pred) >= 0;
2876 });
2877 }
2878
2879 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2880 static bool classof(const Instruction *I) {
2881 return I->getOpcode() == Instruction::PHI;
2882 }
2883 static bool classof(const Value *V) {
2884 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2885 }
2886
2887private:
2888 void growOperands();
2889};
2890
2891template <>
2892struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2893};
2894
2895DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2895, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<PHINode
>::op_begin(const_cast<PHINode*>(this))[i_nocapture]
.get()); } void PHINode::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2895, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<PHINode>::op_begin(this)[i_nocapture]
= Val_nocapture; } unsigned PHINode::getNumOperands() const {
return OperandTraits<PHINode>::operands(this); } template
<int Idx_nocapture> Use &PHINode::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &PHINode::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
2896
2897//===----------------------------------------------------------------------===//
2898// LandingPadInst Class
2899//===----------------------------------------------------------------------===//
2900
2901//===---------------------------------------------------------------------------
2902/// The landingpad instruction holds all of the information
2903/// necessary to generate correct exception handling. The landingpad instruction
2904/// cannot be moved from the top of a landing pad block, which itself is
2905/// accessible only from the 'unwind' edge of an invoke. This uses the
2906/// SubclassData field in Value to store whether or not the landingpad is a
2907/// cleanup.
2908///
2909class LandingPadInst : public Instruction {
2910 using CleanupField = BoolBitfieldElementT<0>;
2911
2912 /// The number of operands actually allocated. NumOperands is
2913 /// the number actually in use.
2914 unsigned ReservedSpace;
2915
2916 LandingPadInst(const LandingPadInst &LP);
2917
2918public:
2919 enum ClauseType { Catch, Filter };
2920
2921private:
2922 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2923 const Twine &NameStr, Instruction *InsertBefore);
2924 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2925 const Twine &NameStr, BasicBlock *InsertAtEnd);
2926
2927 // Allocate space for exactly zero operands.
2928 void *operator new(size_t S) { return User::operator new(S); }
2929
2930 void growOperands(unsigned Size);
2931 void init(unsigned NumReservedValues, const Twine &NameStr);
2932
2933protected:
2934 // Note: Instruction needs to be a friend here to call cloneImpl.
2935 friend class Instruction;
2936
2937 LandingPadInst *cloneImpl() const;
2938
2939public:
2940 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2941
2942 /// Constructors - NumReservedClauses is a hint for the number of incoming
2943 /// clauses that this landingpad will have (use 0 if you really have no idea).
2944 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2945 const Twine &NameStr = "",
2946 Instruction *InsertBefore = nullptr);
2947 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2948 const Twine &NameStr, BasicBlock *InsertAtEnd);
2949
2950 /// Provide fast operand accessors
2951 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2952
2953 /// Return 'true' if this landingpad instruction is a
2954 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2955 /// doesn't catch the exception.
2956 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2957
2958 /// Indicate that this landingpad instruction is a cleanup.
2959 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2960
2961 /// Add a catch or filter clause to the landing pad.
2962 void addClause(Constant *ClauseVal);
2963
2964 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2965 /// determine what type of clause this is.
2966 Constant *getClause(unsigned Idx) const {
2967 return cast<Constant>(getOperandList()[Idx]);
2968 }
2969
2970 /// Return 'true' if the clause and index Idx is a catch clause.
2971 bool isCatch(unsigned Idx) const {
2972 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2973 }
2974
2975 /// Return 'true' if the clause and index Idx is a filter clause.
2976 bool isFilter(unsigned Idx) const {
2977 return isa<ArrayType>(getOperandList()[Idx]->getType());
2978 }
2979
2980 /// Get the number of clauses for this landing pad.
2981 unsigned getNumClauses() const { return getNumOperands(); }
2982
2983 /// Grow the size of the operand list to accommodate the new
2984 /// number of clauses.
2985 void reserveClauses(unsigned Size) { growOperands(Size); }
2986
2987 // Methods for support type inquiry through isa, cast, and dyn_cast:
2988 static bool classof(const Instruction *I) {
2989 return I->getOpcode() == Instruction::LandingPad;
2990 }
2991 static bool classof(const Value *V) {
2992 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2993 }
2994};
2995
2996template <>
2997struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2998};
2999
3000DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3000, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<LandingPadInst
>::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture
].get()); } void LandingPadInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3000, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned LandingPadInst::getNumOperands(
) const { return OperandTraits<LandingPadInst>::operands
(this); } template <int Idx_nocapture> Use &LandingPadInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &LandingPadInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3001
3002//===----------------------------------------------------------------------===//
3003// ReturnInst Class
3004//===----------------------------------------------------------------------===//
3005
3006//===---------------------------------------------------------------------------
3007/// Return a value (possibly void), from a function. Execution
3008/// does not continue in this function any longer.
3009///
3010class ReturnInst : public Instruction {
3011 ReturnInst(const ReturnInst &RI);
3012
3013private:
3014 // ReturnInst constructors:
3015 // ReturnInst() - 'ret void' instruction
3016 // ReturnInst( null) - 'ret void' instruction
3017 // ReturnInst(Value* X) - 'ret X' instruction
3018 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3019 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3020 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3021 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3022 //
3023 // NOTE: If the Value* passed is of type void then the constructor behaves as
3024 // if it was passed NULL.
3025 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3026 Instruction *InsertBefore = nullptr);
3027 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3028 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3029
3030protected:
3031 // Note: Instruction needs to be a friend here to call cloneImpl.
3032 friend class Instruction;
3033
3034 ReturnInst *cloneImpl() const;
3035
3036public:
3037 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3038 Instruction *InsertBefore = nullptr) {
3039 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3040 }
3041
3042 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3043 BasicBlock *InsertAtEnd) {
3044 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3045 }
3046
3047 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3048 return new(0) ReturnInst(C, InsertAtEnd);
3049 }
3050
3051 /// Provide fast operand accessors
3052 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3053
3054 /// Convenience accessor. Returns null if there is no return value.
3055 Value *getReturnValue() const {
3056 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3057 }
3058
3059 unsigned getNumSuccessors() const { return 0; }
3060
3061 // Methods for support type inquiry through isa, cast, and dyn_cast:
3062 static bool classof(const Instruction *I) {
3063 return (I->getOpcode() == Instruction::Ret);
3064 }
3065 static bool classof(const Value *V) {
3066 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3067 }
3068
3069private:
3070 BasicBlock *getSuccessor(unsigned idx) const {
3071 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3071)
;
3072 }
3073
3074 void setSuccessor(unsigned idx, BasicBlock *B) {
3075 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3075)
;
3076 }
3077};
3078
3079template <>
3080struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3081};
3082
3083DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3083, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this))[i_nocapture
].get()); } void ReturnInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3083, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const
{ return OperandTraits<ReturnInst>::operands(this); } template
<int Idx_nocapture> Use &ReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ReturnInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3084
3085//===----------------------------------------------------------------------===//
3086// BranchInst Class
3087//===----------------------------------------------------------------------===//
3088
3089//===---------------------------------------------------------------------------
3090/// Conditional or Unconditional Branch instruction.
3091///
3092class BranchInst : public Instruction {
3093 /// Ops list - Branches are strange. The operands are ordered:
3094 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3095 /// they don't have to check for cond/uncond branchness. These are mostly
3096 /// accessed relative from op_end().
3097 BranchInst(const BranchInst &BI);
3098 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3099 // BranchInst(BB *B) - 'br B'
3100 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3101 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3102 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3103 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3104 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3105 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3106 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3107 Instruction *InsertBefore = nullptr);
3108 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3109 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3110 BasicBlock *InsertAtEnd);
3111
3112 void AssertOK();
3113
3114protected:
3115 // Note: Instruction needs to be a friend here to call cloneImpl.
3116 friend class Instruction;
3117
3118 BranchInst *cloneImpl() const;
3119
3120public:
3121 /// Iterator type that casts an operand to a basic block.
3122 ///
3123 /// This only makes sense because the successors are stored as adjacent
3124 /// operands for branch instructions.
3125 struct succ_op_iterator
3126 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3127 std::random_access_iterator_tag, BasicBlock *,
3128 ptrdiff_t, BasicBlock *, BasicBlock *> {
3129 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3130
3131 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3132 BasicBlock *operator->() const { return operator*(); }
3133 };
3134
3135 /// The const version of `succ_op_iterator`.
3136 struct const_succ_op_iterator
3137 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3138 std::random_access_iterator_tag,
3139 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3140 const BasicBlock *> {
3141 explicit const_succ_op_iterator(const_value_op_iterator I)
3142 : iterator_adaptor_base(I) {}
3143
3144 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3145 const BasicBlock *operator->() const { return operator*(); }
3146 };
3147
3148 static BranchInst *Create(BasicBlock *IfTrue,
3149 Instruction *InsertBefore = nullptr) {
3150 return new(1) BranchInst(IfTrue, InsertBefore);
3151 }
3152
3153 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3154 Value *Cond, Instruction *InsertBefore = nullptr) {
3155 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3156 }
3157
3158 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3159 return new(1) BranchInst(IfTrue, InsertAtEnd);
3160 }
3161
3162 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3163 Value *Cond, BasicBlock *InsertAtEnd) {
3164 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3165 }
3166
3167 /// Transparently provide more efficient getOperand methods.
3168 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3169
3170 bool isUnconditional() const { return getNumOperands() == 1; }
3171 bool isConditional() const { return getNumOperands() == 3; }
3172
3173 Value *getCondition() const {
3174 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3174, __extension__ __PRETTY_FUNCTION__
))
;
3175 return Op<-3>();
3176 }
3177
3178 void setCondition(Value *V) {
3179 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3179, __extension__ __PRETTY_FUNCTION__
))
;
3180 Op<-3>() = V;
3181 }
3182
3183 unsigned getNumSuccessors() const { return 1+isConditional(); }
3184
3185 BasicBlock *getSuccessor(unsigned i) const {
3186 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3186, __extension__ __PRETTY_FUNCTION__
))
;
3187 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3188 }
3189
3190 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3191 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3191, __extension__ __PRETTY_FUNCTION__
))
;
3192 *(&Op<-1>() - idx) = NewSucc;
3193 }
3194
3195 /// Swap the successors of this branch instruction.
3196 ///
3197 /// Swaps the successors of the branch instruction. This also swaps any
3198 /// branch weight metadata associated with the instruction so that it
3199 /// continues to map correctly to each operand.
3200 void swapSuccessors();
3201
3202 iterator_range<succ_op_iterator> successors() {
3203 return make_range(
3204 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3205 succ_op_iterator(value_op_end()));
3206 }
3207
3208 iterator_range<const_succ_op_iterator> successors() const {
3209 return make_range(const_succ_op_iterator(
3210 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3211 const_succ_op_iterator(value_op_end()));
3212 }
3213
3214 // Methods for support type inquiry through isa, cast, and dyn_cast:
3215 static bool classof(const Instruction *I) {
3216 return (I->getOpcode() == Instruction::Br);
3217 }
3218 static bool classof(const Value *V) {
3219 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3220 }
3221};
3222
3223template <>
3224struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3225};
3226
3227DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3227, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this))[i_nocapture
].get()); } void BranchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<BranchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3227, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<BranchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned BranchInst::getNumOperands() const
{ return OperandTraits<BranchInst>::operands(this); } template
<int Idx_nocapture> Use &BranchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &BranchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3228
3229//===----------------------------------------------------------------------===//
3230// SwitchInst Class
3231//===----------------------------------------------------------------------===//
3232
3233//===---------------------------------------------------------------------------
3234/// Multiway switch
3235///
3236class SwitchInst : public Instruction {
3237 unsigned ReservedSpace;
3238
3239 // Operand[0] = Value to switch on
3240 // Operand[1] = Default basic block destination
3241 // Operand[2n ] = Value to match
3242 // Operand[2n+1] = BasicBlock to go to on match
3243 SwitchInst(const SwitchInst &SI);
3244
3245 /// Create a new switch instruction, specifying a value to switch on and a
3246 /// default destination. The number of additional cases can be specified here
3247 /// to make memory allocation more efficient. This constructor can also
3248 /// auto-insert before another instruction.
3249 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3250 Instruction *InsertBefore);
3251
3252 /// Create a new switch instruction, specifying a value to switch on and a
3253 /// default destination. The number of additional cases can be specified here
3254 /// to make memory allocation more efficient. This constructor also
3255 /// auto-inserts at the end of the specified BasicBlock.
3256 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3257 BasicBlock *InsertAtEnd);
3258
3259 // allocate space for exactly zero operands
3260 void *operator new(size_t S) { return User::operator new(S); }
3261
3262 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3263 void growOperands();
3264
3265protected:
3266 // Note: Instruction needs to be a friend here to call cloneImpl.
3267 friend class Instruction;
3268
3269 SwitchInst *cloneImpl() const;
3270
3271public:
3272 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3273
3274 // -2
3275 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3276
3277 template <typename CaseHandleT> class CaseIteratorImpl;
3278
3279 /// A handle to a particular switch case. It exposes a convenient interface
3280 /// to both the case value and the successor block.
3281 ///
3282 /// We define this as a template and instantiate it to form both a const and
3283 /// non-const handle.
3284 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3285 class CaseHandleImpl {
3286 // Directly befriend both const and non-const iterators.
3287 friend class SwitchInst::CaseIteratorImpl<
3288 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3289
3290 protected:
3291 // Expose the switch type we're parameterized with to the iterator.
3292 using SwitchInstType = SwitchInstT;
3293
3294 SwitchInstT *SI;
3295 ptrdiff_t Index;
3296
3297 CaseHandleImpl() = default;
3298 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3299
3300 public:
3301 /// Resolves case value for current case.
3302 ConstantIntT *getCaseValue() const {
3303 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3304, __extension__ __PRETTY_FUNCTION__
))
3304 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3304, __extension__ __PRETTY_FUNCTION__
))
;
3305 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3306 }
3307
3308 /// Resolves successor for current case.
3309 BasicBlockT *getCaseSuccessor() const {
3310 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3312, __extension__ __PRETTY_FUNCTION__
))
3311 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3312, __extension__ __PRETTY_FUNCTION__
))
3312 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3312, __extension__ __PRETTY_FUNCTION__
))
;
3313 return SI->getSuccessor(getSuccessorIndex());
3314 }
3315
3316 /// Returns number of current case.
3317 unsigned getCaseIndex() const { return Index; }
3318
3319 /// Returns successor index for current case successor.
3320 unsigned getSuccessorIndex() const {
3321 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3323, __extension__ __PRETTY_FUNCTION__
))
3322 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3323, __extension__ __PRETTY_FUNCTION__
))
3323 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3323, __extension__ __PRETTY_FUNCTION__
))
;
3324 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3325 }
3326
3327 bool operator==(const CaseHandleImpl &RHS) const {
3328 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3328, __extension__ __PRETTY_FUNCTION__
))
;
3329 return Index == RHS.Index;
3330 }
3331 };
3332
3333 using ConstCaseHandle =
3334 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3335
3336 class CaseHandle
3337 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3338 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3339
3340 public:
3341 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3342
3343 /// Sets the new value for current case.
3344 void setValue(ConstantInt *V) const {
3345 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3346, __extension__ __PRETTY_FUNCTION__
))
3346 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3346, __extension__ __PRETTY_FUNCTION__
))
;
3347 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3348 }
3349
3350 /// Sets the new successor for current case.
3351 void setSuccessor(BasicBlock *S) const {
3352 SI->setSuccessor(getSuccessorIndex(), S);
3353 }
3354 };
3355
3356 template <typename CaseHandleT>
3357 class CaseIteratorImpl
3358 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3359 std::random_access_iterator_tag,
3360 const CaseHandleT> {
3361 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3362
3363 CaseHandleT Case;
3364
3365 public:
3366 /// Default constructed iterator is in an invalid state until assigned to
3367 /// a case for a particular switch.
3368 CaseIteratorImpl() = default;
3369
3370 /// Initializes case iterator for given SwitchInst and for given
3371 /// case number.
3372 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3373
3374 /// Initializes case iterator for given SwitchInst and for given
3375 /// successor index.
3376 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3377 unsigned SuccessorIndex) {
3378 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3379, __extension__ __PRETTY_FUNCTION__
))
3379 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3379, __extension__ __PRETTY_FUNCTION__
))
;
3380 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3381 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3382 }
3383
3384 /// Support converting to the const variant. This will be a no-op for const
3385 /// variant.
3386 operator CaseIteratorImpl<ConstCaseHandle>() const {
3387 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3388 }
3389
3390 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3391 // Check index correctness after addition.
3392 // Note: Index == getNumCases() means end().
3393 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3395, __extension__ __PRETTY_FUNCTION__
))
3394 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3395, __extension__ __PRETTY_FUNCTION__
))
3395 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3395, __extension__ __PRETTY_FUNCTION__
))
;
3396 Case.Index += N;
3397 return *this;
3398 }
3399 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3400 // Check index correctness after subtraction.
3401 // Note: Case.Index == getNumCases() means end().
3402 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3404, __extension__ __PRETTY_FUNCTION__
))
3403 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3404, __extension__ __PRETTY_FUNCTION__
))
3404 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3404, __extension__ __PRETTY_FUNCTION__
))
;
3405 Case.Index -= N;
3406 return *this;
3407 }
3408 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3409 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3409, __extension__ __PRETTY_FUNCTION__
))
;
3410 return Case.Index - RHS.Case.Index;
3411 }
3412 bool operator==(const CaseIteratorImpl &RHS) const {
3413 return Case == RHS.Case;
3414 }
3415 bool operator<(const CaseIteratorImpl &RHS) const {
3416 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3416, __extension__ __PRETTY_FUNCTION__
))
;
3417 return Case.Index < RHS.Case.Index;
3418 }
3419 const CaseHandleT &operator*() const { return Case; }
3420 };
3421
3422 using CaseIt = CaseIteratorImpl<CaseHandle>;
3423 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3424
3425 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3426 unsigned NumCases,
3427 Instruction *InsertBefore = nullptr) {
3428 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3429 }
3430
3431 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3432 unsigned NumCases, BasicBlock *InsertAtEnd) {
3433 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3434 }
3435
3436 /// Provide fast operand accessors
3437 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3438
3439 // Accessor Methods for Switch stmt
3440 Value *getCondition() const { return getOperand(0); }
3441 void setCondition(Value *V) { setOperand(0, V); }
3442
3443 BasicBlock *getDefaultDest() const {
3444 return cast<BasicBlock>(getOperand(1));
3445 }
3446
3447 void setDefaultDest(BasicBlock *DefaultCase) {
3448 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3449 }
3450
3451 /// Return the number of 'cases' in this switch instruction, excluding the
3452 /// default case.
3453 unsigned getNumCases() const {
3454 return getNumOperands()/2 - 1;
3455 }
3456
3457 /// Returns a read/write iterator that points to the first case in the
3458 /// SwitchInst.
3459 CaseIt case_begin() {
3460 return CaseIt(this, 0);
3461 }
3462
3463 /// Returns a read-only iterator that points to the first case in the
3464 /// SwitchInst.
3465 ConstCaseIt case_begin() const {
3466 return ConstCaseIt(this, 0);
3467 }
3468
3469 /// Returns a read/write iterator that points one past the last in the
3470 /// SwitchInst.
3471 CaseIt case_end() {
3472 return CaseIt(this, getNumCases());
3473 }
3474
3475 /// Returns a read-only iterator that points one past the last in the
3476 /// SwitchInst.
3477 ConstCaseIt case_end() const {