Bug Summary

File:llvm/include/llvm/IR/Instructions.h
Warning:line 2619, column 5
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name IndirectBrExpandPass.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/CodeGen -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/CodeGen -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7=. -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-13-111025-38230-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp

1//===- IndirectBrExpandPass.cpp - Expand indirectbr to switch -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8/// \file
9///
10/// Implements an expansion pass to turn `indirectbr` instructions in the IR
11/// into `switch` instructions. This works by enumerating the basic blocks in
12/// a dense range of integers, replacing each `blockaddr` constant with the
13/// corresponding integer constant, and then building a switch that maps from
14/// the integers to the actual blocks. All of the indirectbr instructions in the
15/// function are redirected to this common switch.
16///
17/// While this is generically useful if a target is unable to codegen
18/// `indirectbr` natively, it is primarily useful when there is some desire to
19/// get the builtin non-jump-table lowering of a switch even when the input
20/// source contained an explicit indirect branch construct.
21///
22/// Note that it doesn't make any sense to enable this pass unless a target also
23/// disables jump-table lowering of switches. Doing that is likely to pessimize
24/// the code.
25///
26//===----------------------------------------------------------------------===//
27
28#include "llvm/ADT/STLExtras.h"
29#include "llvm/ADT/Sequence.h"
30#include "llvm/ADT/SmallVector.h"
31#include "llvm/Analysis/DomTreeUpdater.h"
32#include "llvm/CodeGen/TargetPassConfig.h"
33#include "llvm/CodeGen/TargetSubtargetInfo.h"
34#include "llvm/IR/BasicBlock.h"
35#include "llvm/IR/Dominators.h"
36#include "llvm/IR/Function.h"
37#include "llvm/IR/IRBuilder.h"
38#include "llvm/IR/InstIterator.h"
39#include "llvm/IR/Instruction.h"
40#include "llvm/IR/Instructions.h"
41#include "llvm/InitializePasses.h"
42#include "llvm/Pass.h"
43#include "llvm/Support/Debug.h"
44#include "llvm/Support/ErrorHandling.h"
45#include "llvm/Support/raw_ostream.h"
46#include "llvm/Target/TargetMachine.h"
47
48using namespace llvm;
49
50#define DEBUG_TYPE"indirectbr-expand" "indirectbr-expand"
51
52namespace {
53
54class IndirectBrExpandPass : public FunctionPass {
55 const TargetLowering *TLI = nullptr;
56
57public:
58 static char ID; // Pass identification, replacement for typeid
59
60 IndirectBrExpandPass() : FunctionPass(ID) {
61 initializeIndirectBrExpandPassPass(*PassRegistry::getPassRegistry());
62 }
63
64 void getAnalysisUsage(AnalysisUsage &AU) const override {
65 AU.addPreserved<DominatorTreeWrapperPass>();
66 }
67
68 bool runOnFunction(Function &F) override;
69};
70
71} // end anonymous namespace
72
73char IndirectBrExpandPass::ID = 0;
74
75INITIALIZE_PASS_BEGIN(IndirectBrExpandPass, DEBUG_TYPE,static void *initializeIndirectBrExpandPassPassOnce(PassRegistry
&Registry) {
76 "Expand indirectbr instructions", false, false)static void *initializeIndirectBrExpandPassPassOnce(PassRegistry
&Registry) {
77INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)initializeDominatorTreeWrapperPassPass(Registry);
78INITIALIZE_PASS_END(IndirectBrExpandPass, DEBUG_TYPE,PassInfo *PI = new PassInfo( "Expand indirectbr instructions"
, "indirectbr-expand", &IndirectBrExpandPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<IndirectBrExpandPass>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeIndirectBrExpandPassPassFlag; void
llvm::initializeIndirectBrExpandPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeIndirectBrExpandPassPassFlag, initializeIndirectBrExpandPassPassOnce
, std::ref(Registry)); }
79 "Expand indirectbr instructions", false, false)PassInfo *PI = new PassInfo( "Expand indirectbr instructions"
, "indirectbr-expand", &IndirectBrExpandPass::ID, PassInfo
::NormalCtor_t(callDefaultCtor<IndirectBrExpandPass>), false
, false); Registry.registerPass(*PI, true); return PI; } static
llvm::once_flag InitializeIndirectBrExpandPassPassFlag; void
llvm::initializeIndirectBrExpandPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeIndirectBrExpandPassPassFlag, initializeIndirectBrExpandPassPassOnce
, std::ref(Registry)); }
80
81FunctionPass *llvm::createIndirectBrExpandPass() {
82 return new IndirectBrExpandPass();
83}
84
85bool IndirectBrExpandPass::runOnFunction(Function &F) {
86 auto &DL = F.getParent()->getDataLayout();
87 auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
1
Calling 'Pass::getAnalysisIfAvailable'
7
Returning from 'Pass::getAnalysisIfAvailable'
88 if (!TPC)
8
Assuming 'TPC' is non-null
9
Taking false branch
89 return false;
90
91 auto &TM = TPC->getTM<TargetMachine>();
92 auto &STI = *TM.getSubtargetImpl(F);
93 if (!STI.enableIndirectBrExpand())
10
Assuming the condition is false
11
Taking false branch
94 return false;
95 TLI = STI.getTargetLowering();
96
97 Optional<DomTreeUpdater> DTU;
98 if (auto *DTWP
11.1
'DTWP' is null
11.1
'DTWP' is null
11.1
'DTWP' is null
11.1
'DTWP' is null
= getAnalysisIfAvailable<DominatorTreeWrapperPass>())
12
Taking false branch
99 DTU.emplace(DTWP->getDomTree(), DomTreeUpdater::UpdateStrategy::Lazy);
100
101 SmallVector<IndirectBrInst *, 1> IndirectBrs;
102
103 // Set of all potential successors for indirectbr instructions.
104 SmallPtrSet<BasicBlock *, 4> IndirectBrSuccs;
105
106 // Build a list of indirectbrs that we want to rewrite.
107 for (BasicBlock &BB : F)
108 if (auto *IBr = dyn_cast<IndirectBrInst>(BB.getTerminator())) {
109 // Handle the degenerate case of no successors by replacing the indirectbr
110 // with unreachable as there is no successor available.
111 if (IBr->getNumSuccessors() == 0) {
112 (void)new UnreachableInst(F.getContext(), IBr);
113 IBr->eraseFromParent();
114 continue;
115 }
116
117 IndirectBrs.push_back(IBr);
118 for (BasicBlock *SuccBB : IBr->successors())
119 IndirectBrSuccs.insert(SuccBB);
120 }
121
122 if (IndirectBrs.empty())
13
Calling 'SmallVectorBase::empty'
16
Returning from 'SmallVectorBase::empty'
17
Taking false branch
123 return false;
124
125 // If we need to replace any indirectbrs we need to establish integer
126 // constants that will correspond to each of the basic blocks in the function
127 // whose address escapes. We do that here and rewrite all the blockaddress
128 // constants to just be those integer constants cast to a pointer type.
129 SmallVector<BasicBlock *, 4> BBs;
130
131 for (BasicBlock &BB : F) {
132 // Skip blocks that aren't successors to an indirectbr we're going to
133 // rewrite.
134 if (!IndirectBrSuccs.count(&BB))
18
Assuming the condition is true
19
Taking true branch
135 continue;
20
Execution continues on line 131
136
137 auto IsBlockAddressUse = [&](const Use &U) {
138 return isa<BlockAddress>(U.getUser());
139 };
140 auto BlockAddressUseIt = llvm::find_if(BB.uses(), IsBlockAddressUse);
141 if (BlockAddressUseIt == BB.use_end())
142 continue;
143
144 assert(std::find_if(std::next(BlockAddressUseIt), BB.use_end(),(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp"
, 147, __extension__ __PRETTY_FUNCTION__))
145 IsBlockAddressUse) == BB.use_end() &&(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp"
, 147, __extension__ __PRETTY_FUNCTION__))
146 "There should only ever be a single blockaddress use because it is "(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp"
, 147, __extension__ __PRETTY_FUNCTION__))
147 "a constant and should be uniqued.")(static_cast <bool> (std::find_if(std::next(BlockAddressUseIt
), BB.use_end(), IsBlockAddressUse) == BB.use_end() &&
"There should only ever be a single blockaddress use because it is "
"a constant and should be uniqued.") ? void (0) : __assert_fail
("std::find_if(std::next(BlockAddressUseIt), BB.use_end(), IsBlockAddressUse) == BB.use_end() && \"There should only ever be a single blockaddress use because it is \" \"a constant and should be uniqued.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp"
, 147, __extension__ __PRETTY_FUNCTION__))
;
148
149 auto *BA = cast<BlockAddress>(BlockAddressUseIt->getUser());
150
151 // Skip if the constant was formed but ended up not being used (due to DCE
152 // or whatever).
153 if (!BA->isConstantUsed())
154 continue;
155
156 // Compute the index we want to use for this basic block. We can't use zero
157 // because null can be compared with block addresses.
158 int BBIndex = BBs.size() + 1;
159 BBs.push_back(&BB);
160
161 auto *ITy = cast<IntegerType>(DL.getIntPtrType(BA->getType()));
162 ConstantInt *BBIndexC = ConstantInt::get(ITy, BBIndex);
163
164 // Now rewrite the blockaddress to an integer constant based on the index.
165 // FIXME: This part doesn't properly recognize other uses of blockaddress
166 // expressions, for instance, where they are used to pass labels to
167 // asm-goto. This part of the pass needs a rework.
168 BA->replaceAllUsesWith(ConstantExpr::getIntToPtr(BBIndexC, BA->getType()));
169 }
170
171 if (BBs.empty()) {
21
Calling 'SmallVectorBase::empty'
24
Returning from 'SmallVectorBase::empty'
25
Taking false branch
172 // There are no blocks whose address is taken, so any indirectbr instruction
173 // cannot get a valid input and we can replace all of them with unreachable.
174 SmallVector<DominatorTree::UpdateType, 8> Updates;
175 if (DTU)
176 Updates.reserve(IndirectBrSuccs.size());
177 for (auto *IBr : IndirectBrs) {
178 if (DTU) {
179 for (BasicBlock *SuccBB : IBr->successors())
180 Updates.push_back({DominatorTree::Delete, IBr->getParent(), SuccBB});
181 }
182 (void)new UnreachableInst(F.getContext(), IBr);
183 IBr->eraseFromParent();
184 }
185 if (DTU) {
186 assert(Updates.size() == IndirectBrSuccs.size() &&(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp"
, 187, __extension__ __PRETTY_FUNCTION__))
187 "Got unexpected update count.")(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp"
, 187, __extension__ __PRETTY_FUNCTION__))
;
188 DTU->applyUpdates(Updates);
189 }
190 return true;
191 }
192
193 BasicBlock *SwitchBB;
194 Value *SwitchValue;
195
196 // Compute a common integer type across all the indirectbr instructions.
197 IntegerType *CommonITy = nullptr;
26
'CommonITy' initialized to a null pointer value
198 for (auto *IBr : IndirectBrs) {
27
Assuming '__begin1' is equal to '__end1'
199 auto *ITy =
200 cast<IntegerType>(DL.getIntPtrType(IBr->getAddress()->getType()));
201 if (!CommonITy || ITy->getBitWidth() > CommonITy->getBitWidth())
202 CommonITy = ITy;
203 }
204
205 auto GetSwitchValue = [DL, CommonITy](IndirectBrInst *IBr) {
206 return CastInst::CreatePointerCast(
207 IBr->getAddress(), CommonITy,
208 Twine(IBr->getAddress()->getName()) + ".switch_cast", IBr);
209 };
210
211 SmallVector<DominatorTree::UpdateType, 8> Updates;
212
213 if (IndirectBrs.size() == 1) {
28
Assuming the condition is false
29
Taking false branch
214 // If we only have one indirectbr, we can just directly replace it within
215 // its block.
216 IndirectBrInst *IBr = IndirectBrs[0];
217 SwitchBB = IBr->getParent();
218 SwitchValue = GetSwitchValue(IBr);
219 if (DTU) {
220 Updates.reserve(IndirectBrSuccs.size());
221 for (BasicBlock *SuccBB : IBr->successors())
222 Updates.push_back({DominatorTree::Delete, IBr->getParent(), SuccBB});
223 assert(Updates.size() == IndirectBrSuccs.size() &&(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp"
, 224, __extension__ __PRETTY_FUNCTION__))
224 "Got unexpected update count.")(static_cast <bool> (Updates.size() == IndirectBrSuccs.
size() && "Got unexpected update count.") ? void (0) :
__assert_fail ("Updates.size() == IndirectBrSuccs.size() && \"Got unexpected update count.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/CodeGen/IndirectBrExpandPass.cpp"
, 224, __extension__ __PRETTY_FUNCTION__))
;
225 }
226 IBr->eraseFromParent();
227 } else {
228 // Otherwise we need to create a new block to hold the switch across BBs,
229 // jump to that block instead of each indirectbr, and phi together the
230 // values for the switch.
231 SwitchBB = BasicBlock::Create(F.getContext(), "switch_bb", &F);
232 auto *SwitchPN = PHINode::Create(CommonITy, IndirectBrs.size(),
30
Passing null pointer value via 1st parameter 'Ty'
31
Calling 'PHINode::Create'
233 "switch_value_phi", SwitchBB);
234 SwitchValue = SwitchPN;
235
236 // Now replace the indirectbr instructions with direct branches to the
237 // switch block and fill out the PHI operands.
238 if (DTU)
239 Updates.reserve(IndirectBrs.size() + 2 * IndirectBrSuccs.size());
240 for (auto *IBr : IndirectBrs) {
241 SwitchPN->addIncoming(GetSwitchValue(IBr), IBr->getParent());
242 BranchInst::Create(SwitchBB, IBr);
243 if (DTU) {
244 Updates.push_back({DominatorTree::Insert, IBr->getParent(), SwitchBB});
245 for (BasicBlock *SuccBB : IBr->successors())
246 Updates.push_back({DominatorTree::Delete, IBr->getParent(), SuccBB});
247 }
248 IBr->eraseFromParent();
249 }
250 }
251
252 // Now build the switch in the block. The block will have no terminator
253 // already.
254 auto *SI = SwitchInst::Create(SwitchValue, BBs[0], BBs.size(), SwitchBB);
255
256 // Add a case for each block.
257 for (int i : llvm::seq<int>(1, BBs.size()))
258 SI->addCase(ConstantInt::get(CommonITy, i + 1), BBs[i]);
259
260 if (DTU) {
261 // If there were multiple indirectbr's, they may have common successors,
262 // but in the dominator tree, we only track unique edges.
263 SmallPtrSet<BasicBlock *, 8> UniqueSuccessors(BBs.begin(), BBs.end());
264 Updates.reserve(Updates.size() + UniqueSuccessors.size());
265 for (BasicBlock *BB : UniqueSuccessors)
266 Updates.push_back({DominatorTree::Insert, SwitchBB, BB});
267 DTU->applyUpdates(Updates);
268 }
269
270 return true;
271}

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h

1//===- llvm/PassAnalysisSupport.h - Analysis Pass Support code --*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines stuff that is used to define and "use" Analysis Passes.
10// This file is automatically #included by Pass.h, so:
11//
12// NO .CPP FILES SHOULD INCLUDE THIS FILE DIRECTLY
13//
14// Instead, #include Pass.h
15//
16//===----------------------------------------------------------------------===//
17
18#if !defined(LLVM_PASS_H) || defined(LLVM_PASSANALYSISSUPPORT_H)
19#error "Do not include <PassAnalysisSupport.h>; include <Pass.h> instead"
20#endif
21
22#ifndef LLVM_PASSANALYSISSUPPORT_H
23#define LLVM_PASSANALYSISSUPPORT_H
24
25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/SmallVector.h"
27#include <cassert>
28#include <tuple>
29#include <utility>
30#include <vector>
31
32namespace llvm {
33
34class Function;
35class Pass;
36class PMDataManager;
37class StringRef;
38
39//===----------------------------------------------------------------------===//
40/// Represent the analysis usage information of a pass. This tracks analyses
41/// that the pass REQUIRES (must be available when the pass runs), REQUIRES
42/// TRANSITIVE (must be available throughout the lifetime of the pass), and
43/// analyses that the pass PRESERVES (the pass does not invalidate the results
44/// of these analyses). This information is provided by a pass to the Pass
45/// infrastructure through the getAnalysisUsage virtual function.
46///
47class AnalysisUsage {
48public:
49 using VectorType = SmallVectorImpl<AnalysisID>;
50
51private:
52 /// Sets of analyses required and preserved by a pass
53 // TODO: It's not clear that SmallVector is an appropriate data structure for
54 // this usecase. The sizes were picked to minimize wasted space, but are
55 // otherwise fairly meaningless.
56 SmallVector<AnalysisID, 8> Required;
57 SmallVector<AnalysisID, 2> RequiredTransitive;
58 SmallVector<AnalysisID, 2> Preserved;
59 SmallVector<AnalysisID, 0> Used;
60 bool PreservesAll = false;
61
62 void pushUnique(VectorType &Set, AnalysisID ID) {
63 if (!llvm::is_contained(Set, ID))
64 Set.push_back(ID);
65 }
66
67public:
68 AnalysisUsage() = default;
69
70 ///@{
71 /// Add the specified ID to the required set of the usage info for a pass.
72 AnalysisUsage &addRequiredID(const void *ID);
73 AnalysisUsage &addRequiredID(char &ID);
74 template<class PassClass>
75 AnalysisUsage &addRequired() {
76 return addRequiredID(PassClass::ID);
77 }
78
79 AnalysisUsage &addRequiredTransitiveID(char &ID);
80 template<class PassClass>
81 AnalysisUsage &addRequiredTransitive() {
82 return addRequiredTransitiveID(PassClass::ID);
83 }
84 ///@}
85
86 ///@{
87 /// Add the specified ID to the set of analyses preserved by this pass.
88 AnalysisUsage &addPreservedID(const void *ID) {
89 pushUnique(Preserved, ID);
90 return *this;
91 }
92 AnalysisUsage &addPreservedID(char &ID) {
93 pushUnique(Preserved, &ID);
94 return *this;
95 }
96 /// Add the specified Pass class to the set of analyses preserved by this pass.
97 template<class PassClass>
98 AnalysisUsage &addPreserved() {
99 pushUnique(Preserved, &PassClass::ID);
100 return *this;
101 }
102 ///@}
103
104 ///@{
105 /// Add the specified ID to the set of analyses used by this pass if they are
106 /// available..
107 AnalysisUsage &addUsedIfAvailableID(const void *ID) {
108 pushUnique(Used, ID);
109 return *this;
110 }
111 AnalysisUsage &addUsedIfAvailableID(char &ID) {
112 pushUnique(Used, &ID);
113 return *this;
114 }
115 /// Add the specified Pass class to the set of analyses used by this pass.
116 template<class PassClass>
117 AnalysisUsage &addUsedIfAvailable() {
118 pushUnique(Used, &PassClass::ID);
119 return *this;
120 }
121 ///@}
122
123 /// Add the Pass with the specified argument string to the set of analyses
124 /// preserved by this pass. If no such Pass exists, do nothing. This can be
125 /// useful when a pass is trivially preserved, but may not be linked in. Be
126 /// careful about spelling!
127 AnalysisUsage &addPreserved(StringRef Arg);
128
129 /// Set by analyses that do not transform their input at all
130 void setPreservesAll() { PreservesAll = true; }
131
132 /// Determine whether a pass said it does not transform its input at all
133 bool getPreservesAll() const { return PreservesAll; }
134
135 /// This function should be called by the pass, iff they do not:
136 ///
137 /// 1. Add or remove basic blocks from the function
138 /// 2. Modify terminator instructions in any way.
139 ///
140 /// This function annotates the AnalysisUsage info object to say that analyses
141 /// that only depend on the CFG are preserved by this pass.
142 void setPreservesCFG();
143
144 const VectorType &getRequiredSet() const { return Required; }
145 const VectorType &getRequiredTransitiveSet() const {
146 return RequiredTransitive;
147 }
148 const VectorType &getPreservedSet() const { return Preserved; }
149 const VectorType &getUsedSet() const { return Used; }
150};
151
152//===----------------------------------------------------------------------===//
153/// AnalysisResolver - Simple interface used by Pass objects to pull all
154/// analysis information out of pass manager that is responsible to manage
155/// the pass.
156///
157class AnalysisResolver {
158public:
159 AnalysisResolver() = delete;
160 explicit AnalysisResolver(PMDataManager &P) : PM(P) {}
161
162 PMDataManager &getPMDataManager() { return PM; }
163
164 /// Find pass that is implementing PI.
165 Pass *findImplPass(AnalysisID PI) {
166 Pass *ResultPass = nullptr;
167 for (const auto &AnalysisImpl : AnalysisImpls) {
168 if (AnalysisImpl.first == PI) {
169 ResultPass = AnalysisImpl.second;
170 break;
171 }
172 }
173 return ResultPass;
174 }
175
176 /// Find pass that is implementing PI. Initialize pass for Function F.
177 std::tuple<Pass *, bool> findImplPass(Pass *P, AnalysisID PI, Function &F);
178
179 void addAnalysisImplsPair(AnalysisID PI, Pass *P) {
180 if (findImplPass(PI) == P)
181 return;
182 std::pair<AnalysisID, Pass*> pir = std::make_pair(PI,P);
183 AnalysisImpls.push_back(pir);
184 }
185
186 /// Clear cache that is used to connect a pass to the analysis (PassInfo).
187 void clearAnalysisImpls() {
188 AnalysisImpls.clear();
189 }
190
191 /// Return analysis result or null if it doesn't exist.
192 Pass *getAnalysisIfAvailable(AnalysisID ID) const;
193
194private:
195 /// This keeps track of which passes implements the interfaces that are
196 /// required by the current pass (to implement getAnalysis()).
197 std::vector<std::pair<AnalysisID, Pass *>> AnalysisImpls;
198
199 /// PassManager that is used to resolve analysis info
200 PMDataManager &PM;
201};
202
203/// getAnalysisIfAvailable<AnalysisType>() - Subclasses use this function to
204/// get analysis information that might be around, for example to update it.
205/// This is different than getAnalysis in that it can fail (if the analysis
206/// results haven't been computed), so should only be used if you can handle
207/// the case when the analysis is not available. This method is often used by
208/// transformation APIs to update analysis results for a pass automatically as
209/// the transform is performed.
210template<typename AnalysisType>
211AnalysisType *Pass::getAnalysisIfAvailable() const {
212 assert(Resolver && "Pass not resident in a PassManager object!")(static_cast <bool> (Resolver && "Pass not resident in a PassManager object!"
) ? void (0) : __assert_fail ("Resolver && \"Pass not resident in a PassManager object!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 212, __extension__ __PRETTY_FUNCTION__))
;
2
Assuming field 'Resolver' is non-null
3
'?' condition is true
213
214 const void *PI = &AnalysisType::ID;
215
216 Pass *ResultPass = Resolver->getAnalysisIfAvailable(PI);
217 if (!ResultPass) return nullptr;
4
Assuming 'ResultPass' is non-null
5
Taking false branch
218
219 // Because the AnalysisType may not be a subclass of pass (for
220 // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
221 // adjust the return pointer (because the class may multiply inherit, once
222 // from pass, once from AnalysisType).
223 return (AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
6
Returning pointer, which participates in a condition later
224}
225
226/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
227/// to the analysis information that they claim to use by overriding the
228/// getAnalysisUsage function.
229template<typename AnalysisType>
230AnalysisType &Pass::getAnalysis() const {
231 assert(Resolver && "Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver && "Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver && \"Pass has not been inserted into a PassManager object!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 231, __extension__ __PRETTY_FUNCTION__))
;
232 return getAnalysisID<AnalysisType>(&AnalysisType::ID);
233}
234
235template<typename AnalysisType>
236AnalysisType &Pass::getAnalysisID(AnalysisID PI) const {
237 assert(PI && "getAnalysis for unregistered pass!")(static_cast <bool> (PI && "getAnalysis for unregistered pass!"
) ? void (0) : __assert_fail ("PI && \"getAnalysis for unregistered pass!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 237, __extension__ __PRETTY_FUNCTION__))
;
238 assert(Resolver&&"Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver&&"Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver&&\"Pass has not been inserted into a PassManager object!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 238, __extension__ __PRETTY_FUNCTION__))
;
239 // PI *must* appear in AnalysisImpls. Because the number of passes used
240 // should be a small number, we just do a linear search over a (dense)
241 // vector.
242 Pass *ResultPass = Resolver->findImplPass(PI);
243 assert(ResultPass &&(static_cast <bool> (ResultPass && "getAnalysis*() called on an analysis that was not "
"'required' by pass!") ? void (0) : __assert_fail ("ResultPass && \"getAnalysis*() called on an analysis that was not \" \"'required' by pass!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 245, __extension__ __PRETTY_FUNCTION__))
244 "getAnalysis*() called on an analysis that was not "(static_cast <bool> (ResultPass && "getAnalysis*() called on an analysis that was not "
"'required' by pass!") ? void (0) : __assert_fail ("ResultPass && \"getAnalysis*() called on an analysis that was not \" \"'required' by pass!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 245, __extension__ __PRETTY_FUNCTION__))
245 "'required' by pass!")(static_cast <bool> (ResultPass && "getAnalysis*() called on an analysis that was not "
"'required' by pass!") ? void (0) : __assert_fail ("ResultPass && \"getAnalysis*() called on an analysis that was not \" \"'required' by pass!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 245, __extension__ __PRETTY_FUNCTION__))
;
246
247 // Because the AnalysisType may not be a subclass of pass (for
248 // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
249 // adjust the return pointer (because the class may multiply inherit, once
250 // from pass, once from AnalysisType).
251 return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
252}
253
254/// getAnalysis<AnalysisType>() - This function is used by subclasses to get
255/// to the analysis information that they claim to use by overriding the
256/// getAnalysisUsage function. If as part of the dependencies, an IR
257/// transformation is triggered (e.g. because the analysis requires
258/// BreakCriticalEdges), and Changed is non null, *Changed is updated.
259template <typename AnalysisType>
260AnalysisType &Pass::getAnalysis(Function &F, bool *Changed) {
261 assert(Resolver &&"Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver &&"Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver &&\"Pass has not been inserted into a PassManager object!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 261, __extension__ __PRETTY_FUNCTION__))
;
262
263 return getAnalysisID<AnalysisType>(&AnalysisType::ID, F, Changed);
264}
265
266template <typename AnalysisType>
267AnalysisType &Pass::getAnalysisID(AnalysisID PI, Function &F, bool *Changed) {
268 assert(PI && "getAnalysis for unregistered pass!")(static_cast <bool> (PI && "getAnalysis for unregistered pass!"
) ? void (0) : __assert_fail ("PI && \"getAnalysis for unregistered pass!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 268, __extension__ __PRETTY_FUNCTION__))
;
269 assert(Resolver && "Pass has not been inserted into a PassManager object!")(static_cast <bool> (Resolver && "Pass has not been inserted into a PassManager object!"
) ? void (0) : __assert_fail ("Resolver && \"Pass has not been inserted into a PassManager object!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 269, __extension__ __PRETTY_FUNCTION__))
;
270 // PI *must* appear in AnalysisImpls. Because the number of passes used
271 // should be a small number, we just do a linear search over a (dense)
272 // vector.
273 Pass *ResultPass;
274 bool LocalChanged;
275 std::tie(ResultPass, LocalChanged) = Resolver->findImplPass(this, PI, F);
276
277 assert(ResultPass && "Unable to find requested analysis info")(static_cast <bool> (ResultPass && "Unable to find requested analysis info"
) ? void (0) : __assert_fail ("ResultPass && \"Unable to find requested analysis info\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 277, __extension__ __PRETTY_FUNCTION__))
;
278 if (Changed)
279 *Changed |= LocalChanged;
280 else
281 assert(!LocalChanged &&(static_cast <bool> (!LocalChanged && "A pass trigged a code update but the update status is lost"
) ? void (0) : __assert_fail ("!LocalChanged && \"A pass trigged a code update but the update status is lost\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 282, __extension__ __PRETTY_FUNCTION__))
282 "A pass trigged a code update but the update status is lost")(static_cast <bool> (!LocalChanged && "A pass trigged a code update but the update status is lost"
) ? void (0) : __assert_fail ("!LocalChanged && \"A pass trigged a code update but the update status is lost\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/PassAnalysisSupport.h"
, 282, __extension__ __PRETTY_FUNCTION__))
;
283
284 // Because the AnalysisType may not be a subclass of pass (for
285 // AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially
286 // adjust the return pointer (because the class may multiply inherit, once
287 // from pass, once from AnalysisType).
288 return *(AnalysisType*)ResultPass->getAdjustedAnalysisPointer(PI);
289}
290
291} // end namespace llvm
292
293#endif // LLVM_PASSANALYSISSUPPORT_H

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h

1//===- llvm/ADT/SmallVector.h - 'Normally small' vectors --------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the SmallVector class.
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef LLVM_ADT_SMALLVECTOR_H
14#define LLVM_ADT_SMALLVECTOR_H
15
16#include "llvm/ADT/iterator_range.h"
17#include "llvm/Support/Compiler.h"
18#include "llvm/Support/ErrorHandling.h"
19#include "llvm/Support/MemAlloc.h"
20#include "llvm/Support/type_traits.h"
21#include <algorithm>
22#include <cassert>
23#include <cstddef>
24#include <cstdlib>
25#include <cstring>
26#include <initializer_list>
27#include <iterator>
28#include <limits>
29#include <memory>
30#include <new>
31#include <type_traits>
32#include <utility>
33
34namespace llvm {
35
36/// This is all the stuff common to all SmallVectors.
37///
38/// The template parameter specifies the type which should be used to hold the
39/// Size and Capacity of the SmallVector, so it can be adjusted.
40/// Using 32 bit size is desirable to shrink the size of the SmallVector.
41/// Using 64 bit size is desirable for cases like SmallVector<char>, where a
42/// 32 bit size would limit the vector to ~4GB. SmallVectors are used for
43/// buffering bitcode output - which can exceed 4GB.
44template <class Size_T> class SmallVectorBase {
45protected:
46 void *BeginX;
47 Size_T Size = 0, Capacity;
48
49 /// The maximum value of the Size_T used.
50 static constexpr size_t SizeTypeMax() {
51 return std::numeric_limits<Size_T>::max();
52 }
53
54 SmallVectorBase() = delete;
55 SmallVectorBase(void *FirstEl, size_t TotalCapacity)
56 : BeginX(FirstEl), Capacity(TotalCapacity) {}
57
58 /// This is a helper for \a grow() that's out of line to reduce code
59 /// duplication. This function will report a fatal error if it can't grow at
60 /// least to \p MinSize.
61 void *mallocForGrow(size_t MinSize, size_t TSize, size_t &NewCapacity);
62
63 /// This is an implementation of the grow() method which only works
64 /// on POD-like data types and is out of line to reduce code duplication.
65 /// This function will report a fatal error if it cannot increase capacity.
66 void grow_pod(void *FirstEl, size_t MinSize, size_t TSize);
67
68public:
69 size_t size() const { return Size; }
70 size_t capacity() const { return Capacity; }
71
72 LLVM_NODISCARD[[clang::warn_unused_result]] bool empty() const { return !Size; }
14
Assuming field 'Size' is not equal to 0
15
Returning zero, which participates in a condition later
22
Assuming field 'Size' is not equal to 0
23
Returning zero, which participates in a condition later
73
74 /// Set the array size to \p N, which the current array must have enough
75 /// capacity for.
76 ///
77 /// This does not construct or destroy any elements in the vector.
78 ///
79 /// Clients can use this in conjunction with capacity() to write past the end
80 /// of the buffer when they know that more elements are available, and only
81 /// update the size later. This avoids the cost of value initializing elements
82 /// which will only be overwritten.
83 void set_size(size_t N) {
84 assert(N <= capacity())(static_cast <bool> (N <= capacity()) ? void (0) : __assert_fail
("N <= capacity()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 84, __extension__ __PRETTY_FUNCTION__))
;
85 Size = N;
86 }
87};
88
89template <class T>
90using SmallVectorSizeType =
91 typename std::conditional<sizeof(T) < 4 && sizeof(void *) >= 8, uint64_t,
92 uint32_t>::type;
93
94/// Figure out the offset of the first element.
95template <class T, typename = void> struct SmallVectorAlignmentAndSize {
96 alignas(SmallVectorBase<SmallVectorSizeType<T>>) char Base[sizeof(
97 SmallVectorBase<SmallVectorSizeType<T>>)];
98 alignas(T) char FirstEl[sizeof(T)];
99};
100
101/// This is the part of SmallVectorTemplateBase which does not depend on whether
102/// the type T is a POD. The extra dummy template argument is used by ArrayRef
103/// to avoid unnecessarily requiring T to be complete.
104template <typename T, typename = void>
105class SmallVectorTemplateCommon
106 : public SmallVectorBase<SmallVectorSizeType<T>> {
107 using Base = SmallVectorBase<SmallVectorSizeType<T>>;
108
109 /// Find the address of the first element. For this pointer math to be valid
110 /// with small-size of 0 for T with lots of alignment, it's important that
111 /// SmallVectorStorage is properly-aligned even for small-size of 0.
112 void *getFirstEl() const {
113 return const_cast<void *>(reinterpret_cast<const void *>(
114 reinterpret_cast<const char *>(this) +
115 offsetof(SmallVectorAlignmentAndSize<T>, FirstEl)__builtin_offsetof(SmallVectorAlignmentAndSize<T>, FirstEl
)
));
116 }
117 // Space after 'FirstEl' is clobbered, do not add any instance vars after it.
118
119protected:
120 SmallVectorTemplateCommon(size_t Size) : Base(getFirstEl(), Size) {}
121
122 void grow_pod(size_t MinSize, size_t TSize) {
123 Base::grow_pod(getFirstEl(), MinSize, TSize);
124 }
125
126 /// Return true if this is a smallvector which has not had dynamic
127 /// memory allocated for it.
128 bool isSmall() const { return this->BeginX == getFirstEl(); }
129
130 /// Put this vector in a state of being small.
131 void resetToSmall() {
132 this->BeginX = getFirstEl();
133 this->Size = this->Capacity = 0; // FIXME: Setting Capacity to 0 is suspect.
134 }
135
136 /// Return true if V is an internal reference to the given range.
137 bool isReferenceToRange(const void *V, const void *First, const void *Last) const {
138 // Use std::less to avoid UB.
139 std::less<> LessThan;
140 return !LessThan(V, First) && LessThan(V, Last);
141 }
142
143 /// Return true if V is an internal reference to this vector.
144 bool isReferenceToStorage(const void *V) const {
145 return isReferenceToRange(V, this->begin(), this->end());
146 }
147
148 /// Return true if First and Last form a valid (possibly empty) range in this
149 /// vector's storage.
150 bool isRangeInStorage(const void *First, const void *Last) const {
151 // Use std::less to avoid UB.
152 std::less<> LessThan;
153 return !LessThan(First, this->begin()) && !LessThan(Last, First) &&
154 !LessThan(this->end(), Last);
155 }
156
157 /// Return true unless Elt will be invalidated by resizing the vector to
158 /// NewSize.
159 bool isSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
160 // Past the end.
161 if (LLVM_LIKELY(!isReferenceToStorage(Elt))__builtin_expect((bool)(!isReferenceToStorage(Elt)), true))
162 return true;
163
164 // Return false if Elt will be destroyed by shrinking.
165 if (NewSize <= this->size())
166 return Elt < this->begin() + NewSize;
167
168 // Return false if we need to grow.
169 return NewSize <= this->capacity();
170 }
171
172 /// Check whether Elt will be invalidated by resizing the vector to NewSize.
173 void assertSafeToReferenceAfterResize(const void *Elt, size_t NewSize) {
174 assert(isSafeToReferenceAfterResize(Elt, NewSize) &&(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 176, __extension__ __PRETTY_FUNCTION__))
175 "Attempting to reference an element of the vector in an operation "(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 176, __extension__ __PRETTY_FUNCTION__))
176 "that invalidates it")(static_cast <bool> (isSafeToReferenceAfterResize(Elt, NewSize
) && "Attempting to reference an element of the vector in an operation "
"that invalidates it") ? void (0) : __assert_fail ("isSafeToReferenceAfterResize(Elt, NewSize) && \"Attempting to reference an element of the vector in an operation \" \"that invalidates it\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 176, __extension__ __PRETTY_FUNCTION__))
;
177 }
178
179 /// Check whether Elt will be invalidated by increasing the size of the
180 /// vector by N.
181 void assertSafeToAdd(const void *Elt, size_t N = 1) {
182 this->assertSafeToReferenceAfterResize(Elt, this->size() + N);
183 }
184
185 /// Check whether any part of the range will be invalidated by clearing.
186 void assertSafeToReferenceAfterClear(const T *From, const T *To) {
187 if (From == To)
188 return;
189 this->assertSafeToReferenceAfterResize(From, 0);
190 this->assertSafeToReferenceAfterResize(To - 1, 0);
191 }
192 template <
193 class ItTy,
194 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
195 bool> = false>
196 void assertSafeToReferenceAfterClear(ItTy, ItTy) {}
197
198 /// Check whether any part of the range will be invalidated by growing.
199 void assertSafeToAddRange(const T *From, const T *To) {
200 if (From == To)
201 return;
202 this->assertSafeToAdd(From, To - From);
203 this->assertSafeToAdd(To - 1, To - From);
204 }
205 template <
206 class ItTy,
207 std::enable_if_t<!std::is_same<std::remove_const_t<ItTy>, T *>::value,
208 bool> = false>
209 void assertSafeToAddRange(ItTy, ItTy) {}
210
211 /// Reserve enough space to add one element, and return the updated element
212 /// pointer in case it was a reference to the storage.
213 template <class U>
214 static const T *reserveForParamAndGetAddressImpl(U *This, const T &Elt,
215 size_t N) {
216 size_t NewSize = This->size() + N;
217 if (LLVM_LIKELY(NewSize <= This->capacity())__builtin_expect((bool)(NewSize <= This->capacity()), true
)
)
218 return &Elt;
219
220 bool ReferencesStorage = false;
221 int64_t Index = -1;
222 if (!U::TakesParamByValue) {
223 if (LLVM_UNLIKELY(This->isReferenceToStorage(&Elt))__builtin_expect((bool)(This->isReferenceToStorage(&Elt
)), false)
) {
224 ReferencesStorage = true;
225 Index = &Elt - This->begin();
226 }
227 }
228 This->grow(NewSize);
229 return ReferencesStorage ? This->begin() + Index : &Elt;
230 }
231
232public:
233 using size_type = size_t;
234 using difference_type = ptrdiff_t;
235 using value_type = T;
236 using iterator = T *;
237 using const_iterator = const T *;
238
239 using const_reverse_iterator = std::reverse_iterator<const_iterator>;
240 using reverse_iterator = std::reverse_iterator<iterator>;
241
242 using reference = T &;
243 using const_reference = const T &;
244 using pointer = T *;
245 using const_pointer = const T *;
246
247 using Base::capacity;
248 using Base::empty;
249 using Base::size;
250
251 // forward iterator creation methods.
252 iterator begin() { return (iterator)this->BeginX; }
253 const_iterator begin() const { return (const_iterator)this->BeginX; }
254 iterator end() { return begin() + size(); }
255 const_iterator end() const { return begin() + size(); }
256
257 // reverse iterator creation methods.
258 reverse_iterator rbegin() { return reverse_iterator(end()); }
259 const_reverse_iterator rbegin() const{ return const_reverse_iterator(end()); }
260 reverse_iterator rend() { return reverse_iterator(begin()); }
261 const_reverse_iterator rend() const { return const_reverse_iterator(begin());}
262
263 size_type size_in_bytes() const { return size() * sizeof(T); }
264 size_type max_size() const {
265 return std::min(this->SizeTypeMax(), size_type(-1) / sizeof(T));
266 }
267
268 size_t capacity_in_bytes() const { return capacity() * sizeof(T); }
269
270 /// Return a pointer to the vector's buffer, even if empty().
271 pointer data() { return pointer(begin()); }
272 /// Return a pointer to the vector's buffer, even if empty().
273 const_pointer data() const { return const_pointer(begin()); }
274
275 reference operator[](size_type idx) {
276 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 276, __extension__ __PRETTY_FUNCTION__))
;
277 return begin()[idx];
278 }
279 const_reference operator[](size_type idx) const {
280 assert(idx < size())(static_cast <bool> (idx < size()) ? void (0) : __assert_fail
("idx < size()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 280, __extension__ __PRETTY_FUNCTION__))
;
281 return begin()[idx];
282 }
283
284 reference front() {
285 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 285, __extension__ __PRETTY_FUNCTION__))
;
286 return begin()[0];
287 }
288 const_reference front() const {
289 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 289, __extension__ __PRETTY_FUNCTION__))
;
290 return begin()[0];
291 }
292
293 reference back() {
294 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 294, __extension__ __PRETTY_FUNCTION__))
;
295 return end()[-1];
296 }
297 const_reference back() const {
298 assert(!empty())(static_cast <bool> (!empty()) ? void (0) : __assert_fail
("!empty()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 298, __extension__ __PRETTY_FUNCTION__))
;
299 return end()[-1];
300 }
301};
302
303/// SmallVectorTemplateBase<TriviallyCopyable = false> - This is where we put
304/// method implementations that are designed to work with non-trivial T's.
305///
306/// We approximate is_trivially_copyable with trivial move/copy construction and
307/// trivial destruction. While the standard doesn't specify that you're allowed
308/// copy these types with memcpy, there is no way for the type to observe this.
309/// This catches the important case of std::pair<POD, POD>, which is not
310/// trivially assignable.
311template <typename T, bool = (is_trivially_copy_constructible<T>::value) &&
312 (is_trivially_move_constructible<T>::value) &&
313 std::is_trivially_destructible<T>::value>
314class SmallVectorTemplateBase : public SmallVectorTemplateCommon<T> {
315 friend class SmallVectorTemplateCommon<T>;
316
317protected:
318 static constexpr bool TakesParamByValue = false;
319 using ValueParamT = const T &;
320
321 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
322
323 static void destroy_range(T *S, T *E) {
324 while (S != E) {
325 --E;
326 E->~T();
327 }
328 }
329
330 /// Move the range [I, E) into the uninitialized memory starting with "Dest",
331 /// constructing elements as needed.
332 template<typename It1, typename It2>
333 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
334 std::uninitialized_copy(std::make_move_iterator(I),
335 std::make_move_iterator(E), Dest);
336 }
337
338 /// Copy the range [I, E) onto the uninitialized memory starting with "Dest",
339 /// constructing elements as needed.
340 template<typename It1, typename It2>
341 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
342 std::uninitialized_copy(I, E, Dest);
343 }
344
345 /// Grow the allocated memory (without initializing new elements), doubling
346 /// the size of the allocated memory. Guarantees space for at least one more
347 /// element, or MinSize more elements if specified.
348 void grow(size_t MinSize = 0);
349
350 /// Create a new allocation big enough for \p MinSize and pass back its size
351 /// in \p NewCapacity. This is the first section of \a grow().
352 T *mallocForGrow(size_t MinSize, size_t &NewCapacity) {
353 return static_cast<T *>(
354 SmallVectorBase<SmallVectorSizeType<T>>::mallocForGrow(
355 MinSize, sizeof(T), NewCapacity));
356 }
357
358 /// Move existing elements over to the new allocation \p NewElts, the middle
359 /// section of \a grow().
360 void moveElementsForGrow(T *NewElts);
361
362 /// Transfer ownership of the allocation, finishing up \a grow().
363 void takeAllocationForGrow(T *NewElts, size_t NewCapacity);
364
365 /// Reserve enough space to add one element, and return the updated element
366 /// pointer in case it was a reference to the storage.
367 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
368 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
369 }
370
371 /// Reserve enough space to add one element, and return the updated element
372 /// pointer in case it was a reference to the storage.
373 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
374 return const_cast<T *>(
375 this->reserveForParamAndGetAddressImpl(this, Elt, N));
376 }
377
378 static T &&forward_value_param(T &&V) { return std::move(V); }
379 static const T &forward_value_param(const T &V) { return V; }
380
381 void growAndAssign(size_t NumElts, const T &Elt) {
382 // Grow manually in case Elt is an internal reference.
383 size_t NewCapacity;
384 T *NewElts = mallocForGrow(NumElts, NewCapacity);
385 std::uninitialized_fill_n(NewElts, NumElts, Elt);
386 this->destroy_range(this->begin(), this->end());
387 takeAllocationForGrow(NewElts, NewCapacity);
388 this->set_size(NumElts);
389 }
390
391 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
392 // Grow manually in case one of Args is an internal reference.
393 size_t NewCapacity;
394 T *NewElts = mallocForGrow(0, NewCapacity);
395 ::new ((void *)(NewElts + this->size())) T(std::forward<ArgTypes>(Args)...);
396 moveElementsForGrow(NewElts);
397 takeAllocationForGrow(NewElts, NewCapacity);
398 this->set_size(this->size() + 1);
399 return this->back();
400 }
401
402public:
403 void push_back(const T &Elt) {
404 const T *EltPtr = reserveForParamAndGetAddress(Elt);
405 ::new ((void *)this->end()) T(*EltPtr);
406 this->set_size(this->size() + 1);
407 }
408
409 void push_back(T &&Elt) {
410 T *EltPtr = reserveForParamAndGetAddress(Elt);
411 ::new ((void *)this->end()) T(::std::move(*EltPtr));
412 this->set_size(this->size() + 1);
413 }
414
415 void pop_back() {
416 this->set_size(this->size() - 1);
417 this->end()->~T();
418 }
419};
420
421// Define this out-of-line to dissuade the C++ compiler from inlining it.
422template <typename T, bool TriviallyCopyable>
423void SmallVectorTemplateBase<T, TriviallyCopyable>::grow(size_t MinSize) {
424 size_t NewCapacity;
425 T *NewElts = mallocForGrow(MinSize, NewCapacity);
426 moveElementsForGrow(NewElts);
427 takeAllocationForGrow(NewElts, NewCapacity);
428}
429
430// Define this out-of-line to dissuade the C++ compiler from inlining it.
431template <typename T, bool TriviallyCopyable>
432void SmallVectorTemplateBase<T, TriviallyCopyable>::moveElementsForGrow(
433 T *NewElts) {
434 // Move the elements over.
435 this->uninitialized_move(this->begin(), this->end(), NewElts);
436
437 // Destroy the original elements.
438 destroy_range(this->begin(), this->end());
439}
440
441// Define this out-of-line to dissuade the C++ compiler from inlining it.
442template <typename T, bool TriviallyCopyable>
443void SmallVectorTemplateBase<T, TriviallyCopyable>::takeAllocationForGrow(
444 T *NewElts, size_t NewCapacity) {
445 // If this wasn't grown from the inline copy, deallocate the old space.
446 if (!this->isSmall())
447 free(this->begin());
448
449 this->BeginX = NewElts;
450 this->Capacity = NewCapacity;
451}
452
453/// SmallVectorTemplateBase<TriviallyCopyable = true> - This is where we put
454/// method implementations that are designed to work with trivially copyable
455/// T's. This allows using memcpy in place of copy/move construction and
456/// skipping destruction.
457template <typename T>
458class SmallVectorTemplateBase<T, true> : public SmallVectorTemplateCommon<T> {
459 friend class SmallVectorTemplateCommon<T>;
460
461protected:
462 /// True if it's cheap enough to take parameters by value. Doing so avoids
463 /// overhead related to mitigations for reference invalidation.
464 static constexpr bool TakesParamByValue = sizeof(T) <= 2 * sizeof(void *);
465
466 /// Either const T& or T, depending on whether it's cheap enough to take
467 /// parameters by value.
468 using ValueParamT =
469 typename std::conditional<TakesParamByValue, T, const T &>::type;
470
471 SmallVectorTemplateBase(size_t Size) : SmallVectorTemplateCommon<T>(Size) {}
472
473 // No need to do a destroy loop for POD's.
474 static void destroy_range(T *, T *) {}
475
476 /// Move the range [I, E) onto the uninitialized memory
477 /// starting with "Dest", constructing elements into it as needed.
478 template<typename It1, typename It2>
479 static void uninitialized_move(It1 I, It1 E, It2 Dest) {
480 // Just do a copy.
481 uninitialized_copy(I, E, Dest);
482 }
483
484 /// Copy the range [I, E) onto the uninitialized memory
485 /// starting with "Dest", constructing elements into it as needed.
486 template<typename It1, typename It2>
487 static void uninitialized_copy(It1 I, It1 E, It2 Dest) {
488 // Arbitrary iterator types; just use the basic implementation.
489 std::uninitialized_copy(I, E, Dest);
490 }
491
492 /// Copy the range [I, E) onto the uninitialized memory
493 /// starting with "Dest", constructing elements into it as needed.
494 template <typename T1, typename T2>
495 static void uninitialized_copy(
496 T1 *I, T1 *E, T2 *Dest,
497 std::enable_if_t<std::is_same<typename std::remove_const<T1>::type,
498 T2>::value> * = nullptr) {
499 // Use memcpy for PODs iterated by pointers (which includes SmallVector
500 // iterators): std::uninitialized_copy optimizes to memmove, but we can
501 // use memcpy here. Note that I and E are iterators and thus might be
502 // invalid for memcpy if they are equal.
503 if (I != E)
504 memcpy(reinterpret_cast<void *>(Dest), I, (E - I) * sizeof(T));
505 }
506
507 /// Double the size of the allocated memory, guaranteeing space for at
508 /// least one more element or MinSize if specified.
509 void grow(size_t MinSize = 0) { this->grow_pod(MinSize, sizeof(T)); }
510
511 /// Reserve enough space to add one element, and return the updated element
512 /// pointer in case it was a reference to the storage.
513 const T *reserveForParamAndGetAddress(const T &Elt, size_t N = 1) {
514 return this->reserveForParamAndGetAddressImpl(this, Elt, N);
515 }
516
517 /// Reserve enough space to add one element, and return the updated element
518 /// pointer in case it was a reference to the storage.
519 T *reserveForParamAndGetAddress(T &Elt, size_t N = 1) {
520 return const_cast<T *>(
521 this->reserveForParamAndGetAddressImpl(this, Elt, N));
522 }
523
524 /// Copy \p V or return a reference, depending on \a ValueParamT.
525 static ValueParamT forward_value_param(ValueParamT V) { return V; }
526
527 void growAndAssign(size_t NumElts, T Elt) {
528 // Elt has been copied in case it's an internal reference, side-stepping
529 // reference invalidation problems without losing the realloc optimization.
530 this->set_size(0);
531 this->grow(NumElts);
532 std::uninitialized_fill_n(this->begin(), NumElts, Elt);
533 this->set_size(NumElts);
534 }
535
536 template <typename... ArgTypes> T &growAndEmplaceBack(ArgTypes &&... Args) {
537 // Use push_back with a copy in case Args has an internal reference,
538 // side-stepping reference invalidation problems without losing the realloc
539 // optimization.
540 push_back(T(std::forward<ArgTypes>(Args)...));
541 return this->back();
542 }
543
544public:
545 void push_back(ValueParamT Elt) {
546 const T *EltPtr = reserveForParamAndGetAddress(Elt);
547 memcpy(reinterpret_cast<void *>(this->end()), EltPtr, sizeof(T));
548 this->set_size(this->size() + 1);
549 }
550
551 void pop_back() { this->set_size(this->size() - 1); }
552};
553
554/// This class consists of common code factored out of the SmallVector class to
555/// reduce code duplication based on the SmallVector 'N' template parameter.
556template <typename T>
557class SmallVectorImpl : public SmallVectorTemplateBase<T> {
558 using SuperClass = SmallVectorTemplateBase<T>;
559
560public:
561 using iterator = typename SuperClass::iterator;
562 using const_iterator = typename SuperClass::const_iterator;
563 using reference = typename SuperClass::reference;
564 using size_type = typename SuperClass::size_type;
565
566protected:
567 using SmallVectorTemplateBase<T>::TakesParamByValue;
568 using ValueParamT = typename SuperClass::ValueParamT;
569
570 // Default ctor - Initialize to empty.
571 explicit SmallVectorImpl(unsigned N)
572 : SmallVectorTemplateBase<T>(N) {}
573
574public:
575 SmallVectorImpl(const SmallVectorImpl &) = delete;
576
577 ~SmallVectorImpl() {
578 // Subclass has already destructed this vector's elements.
579 // If this wasn't grown from the inline copy, deallocate the old space.
580 if (!this->isSmall())
581 free(this->begin());
582 }
583
584 void clear() {
585 this->destroy_range(this->begin(), this->end());
586 this->Size = 0;
587 }
588
589private:
590 template <bool ForOverwrite> void resizeImpl(size_type N) {
591 if (N < this->size()) {
592 this->pop_back_n(this->size() - N);
593 } else if (N > this->size()) {
594 this->reserve(N);
595 for (auto I = this->end(), E = this->begin() + N; I != E; ++I)
596 if (ForOverwrite)
597 new (&*I) T;
598 else
599 new (&*I) T();
600 this->set_size(N);
601 }
602 }
603
604public:
605 void resize(size_type N) { resizeImpl<false>(N); }
606
607 /// Like resize, but \ref T is POD, the new values won't be initialized.
608 void resize_for_overwrite(size_type N) { resizeImpl<true>(N); }
609
610 void resize(size_type N, ValueParamT NV) {
611 if (N == this->size())
612 return;
613
614 if (N < this->size()) {
615 this->pop_back_n(this->size() - N);
616 return;
617 }
618
619 // N > this->size(). Defer to append.
620 this->append(N - this->size(), NV);
621 }
622
623 void reserve(size_type N) {
624 if (this->capacity() < N)
625 this->grow(N);
626 }
627
628 void pop_back_n(size_type NumItems) {
629 assert(this->size() >= NumItems)(static_cast <bool> (this->size() >= NumItems) ? void
(0) : __assert_fail ("this->size() >= NumItems", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 629, __extension__ __PRETTY_FUNCTION__))
;
630 this->destroy_range(this->end() - NumItems, this->end());
631 this->set_size(this->size() - NumItems);
632 }
633
634 LLVM_NODISCARD[[clang::warn_unused_result]] T pop_back_val() {
635 T Result = ::std::move(this->back());
636 this->pop_back();
637 return Result;
638 }
639
640 void swap(SmallVectorImpl &RHS);
641
642 /// Add the specified range to the end of the SmallVector.
643 template <typename in_iter,
644 typename = std::enable_if_t<std::is_convertible<
645 typename std::iterator_traits<in_iter>::iterator_category,
646 std::input_iterator_tag>::value>>
647 void append(in_iter in_start, in_iter in_end) {
648 this->assertSafeToAddRange(in_start, in_end);
649 size_type NumInputs = std::distance(in_start, in_end);
650 this->reserve(this->size() + NumInputs);
651 this->uninitialized_copy(in_start, in_end, this->end());
652 this->set_size(this->size() + NumInputs);
653 }
654
655 /// Append \p NumInputs copies of \p Elt to the end.
656 void append(size_type NumInputs, ValueParamT Elt) {
657 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumInputs);
658 std::uninitialized_fill_n(this->end(), NumInputs, *EltPtr);
659 this->set_size(this->size() + NumInputs);
660 }
661
662 void append(std::initializer_list<T> IL) {
663 append(IL.begin(), IL.end());
664 }
665
666 void append(const SmallVectorImpl &RHS) { append(RHS.begin(), RHS.end()); }
667
668 void assign(size_type NumElts, ValueParamT Elt) {
669 // Note that Elt could be an internal reference.
670 if (NumElts > this->capacity()) {
671 this->growAndAssign(NumElts, Elt);
672 return;
673 }
674
675 // Assign over existing elements.
676 std::fill_n(this->begin(), std::min(NumElts, this->size()), Elt);
677 if (NumElts > this->size())
678 std::uninitialized_fill_n(this->end(), NumElts - this->size(), Elt);
679 else if (NumElts < this->size())
680 this->destroy_range(this->begin() + NumElts, this->end());
681 this->set_size(NumElts);
682 }
683
684 // FIXME: Consider assigning over existing elements, rather than clearing &
685 // re-initializing them - for all assign(...) variants.
686
687 template <typename in_iter,
688 typename = std::enable_if_t<std::is_convertible<
689 typename std::iterator_traits<in_iter>::iterator_category,
690 std::input_iterator_tag>::value>>
691 void assign(in_iter in_start, in_iter in_end) {
692 this->assertSafeToReferenceAfterClear(in_start, in_end);
693 clear();
694 append(in_start, in_end);
695 }
696
697 void assign(std::initializer_list<T> IL) {
698 clear();
699 append(IL);
700 }
701
702 void assign(const SmallVectorImpl &RHS) { assign(RHS.begin(), RHS.end()); }
703
704 iterator erase(const_iterator CI) {
705 // Just cast away constness because this is a non-const member function.
706 iterator I = const_cast<iterator>(CI);
707
708 assert(this->isReferenceToStorage(CI) && "Iterator to erase is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(CI) &&
"Iterator to erase is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(CI) && \"Iterator to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 708, __extension__ __PRETTY_FUNCTION__))
;
709
710 iterator N = I;
711 // Shift all elts down one.
712 std::move(I+1, this->end(), I);
713 // Drop the last elt.
714 this->pop_back();
715 return(N);
716 }
717
718 iterator erase(const_iterator CS, const_iterator CE) {
719 // Just cast away constness because this is a non-const member function.
720 iterator S = const_cast<iterator>(CS);
721 iterator E = const_cast<iterator>(CE);
722
723 assert(this->isRangeInStorage(S, E) && "Range to erase is out of bounds.")(static_cast <bool> (this->isRangeInStorage(S, E) &&
"Range to erase is out of bounds.") ? void (0) : __assert_fail
("this->isRangeInStorage(S, E) && \"Range to erase is out of bounds.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 723, __extension__ __PRETTY_FUNCTION__))
;
724
725 iterator N = S;
726 // Shift all elts down.
727 iterator I = std::move(E, this->end(), S);
728 // Drop the last elts.
729 this->destroy_range(I, this->end());
730 this->set_size(I - this->begin());
731 return(N);
732 }
733
734private:
735 template <class ArgType> iterator insert_one_impl(iterator I, ArgType &&Elt) {
736 // Callers ensure that ArgType is derived from T.
737 static_assert(
738 std::is_same<std::remove_const_t<std::remove_reference_t<ArgType>>,
739 T>::value,
740 "ArgType must be derived from T!");
741
742 if (I == this->end()) { // Important special case for empty vector.
743 this->push_back(::std::forward<ArgType>(Elt));
744 return this->end()-1;
745 }
746
747 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 747, __extension__ __PRETTY_FUNCTION__))
;
748
749 // Grow if necessary.
750 size_t Index = I - this->begin();
751 std::remove_reference_t<ArgType> *EltPtr =
752 this->reserveForParamAndGetAddress(Elt);
753 I = this->begin() + Index;
754
755 ::new ((void*) this->end()) T(::std::move(this->back()));
756 // Push everything else over.
757 std::move_backward(I, this->end()-1, this->end());
758 this->set_size(this->size() + 1);
759
760 // If we just moved the element we're inserting, be sure to update
761 // the reference (never happens if TakesParamByValue).
762 static_assert(!TakesParamByValue || std::is_same<ArgType, T>::value,
763 "ArgType must be 'T' when taking by value!");
764 if (!TakesParamByValue && this->isReferenceToRange(EltPtr, I, this->end()))
765 ++EltPtr;
766
767 *I = ::std::forward<ArgType>(*EltPtr);
768 return I;
769 }
770
771public:
772 iterator insert(iterator I, T &&Elt) {
773 return insert_one_impl(I, this->forward_value_param(std::move(Elt)));
774 }
775
776 iterator insert(iterator I, const T &Elt) {
777 return insert_one_impl(I, this->forward_value_param(Elt));
778 }
779
780 iterator insert(iterator I, size_type NumToInsert, ValueParamT Elt) {
781 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
782 size_t InsertElt = I - this->begin();
783
784 if (I == this->end()) { // Important special case for empty vector.
785 append(NumToInsert, Elt);
786 return this->begin()+InsertElt;
787 }
788
789 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 789, __extension__ __PRETTY_FUNCTION__))
;
790
791 // Ensure there is enough space, and get the (maybe updated) address of
792 // Elt.
793 const T *EltPtr = this->reserveForParamAndGetAddress(Elt, NumToInsert);
794
795 // Uninvalidate the iterator.
796 I = this->begin()+InsertElt;
797
798 // If there are more elements between the insertion point and the end of the
799 // range than there are being inserted, we can use a simple approach to
800 // insertion. Since we already reserved space, we know that this won't
801 // reallocate the vector.
802 if (size_t(this->end()-I) >= NumToInsert) {
803 T *OldEnd = this->end();
804 append(std::move_iterator<iterator>(this->end() - NumToInsert),
805 std::move_iterator<iterator>(this->end()));
806
807 // Copy the existing elements that get replaced.
808 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
809
810 // If we just moved the element we're inserting, be sure to update
811 // the reference (never happens if TakesParamByValue).
812 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
813 EltPtr += NumToInsert;
814
815 std::fill_n(I, NumToInsert, *EltPtr);
816 return I;
817 }
818
819 // Otherwise, we're inserting more elements than exist already, and we're
820 // not inserting at the end.
821
822 // Move over the elements that we're about to overwrite.
823 T *OldEnd = this->end();
824 this->set_size(this->size() + NumToInsert);
825 size_t NumOverwritten = OldEnd-I;
826 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
827
828 // If we just moved the element we're inserting, be sure to update
829 // the reference (never happens if TakesParamByValue).
830 if (!TakesParamByValue && I <= EltPtr && EltPtr < this->end())
831 EltPtr += NumToInsert;
832
833 // Replace the overwritten part.
834 std::fill_n(I, NumOverwritten, *EltPtr);
835
836 // Insert the non-overwritten middle part.
837 std::uninitialized_fill_n(OldEnd, NumToInsert - NumOverwritten, *EltPtr);
838 return I;
839 }
840
841 template <typename ItTy,
842 typename = std::enable_if_t<std::is_convertible<
843 typename std::iterator_traits<ItTy>::iterator_category,
844 std::input_iterator_tag>::value>>
845 iterator insert(iterator I, ItTy From, ItTy To) {
846 // Convert iterator to elt# to avoid invalidating iterator when we reserve()
847 size_t InsertElt = I - this->begin();
848
849 if (I == this->end()) { // Important special case for empty vector.
850 append(From, To);
851 return this->begin()+InsertElt;
852 }
853
854 assert(this->isReferenceToStorage(I) && "Insertion iterator is out of bounds.")(static_cast <bool> (this->isReferenceToStorage(I) &&
"Insertion iterator is out of bounds.") ? void (0) : __assert_fail
("this->isReferenceToStorage(I) && \"Insertion iterator is out of bounds.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/ADT/SmallVector.h"
, 854, __extension__ __PRETTY_FUNCTION__))
;
855
856 // Check that the reserve that follows doesn't invalidate the iterators.
857 this->assertSafeToAddRange(From, To);
858
859 size_t NumToInsert = std::distance(From, To);
860
861 // Ensure there is enough space.
862 reserve(this->size() + NumToInsert);
863
864 // Uninvalidate the iterator.
865 I = this->begin()+InsertElt;
866
867 // If there are more elements between the insertion point and the end of the
868 // range than there are being inserted, we can use a simple approach to
869 // insertion. Since we already reserved space, we know that this won't
870 // reallocate the vector.
871 if (size_t(this->end()-I) >= NumToInsert) {
872 T *OldEnd = this->end();
873 append(std::move_iterator<iterator>(this->end() - NumToInsert),
874 std::move_iterator<iterator>(this->end()));
875
876 // Copy the existing elements that get replaced.
877 std::move_backward(I, OldEnd-NumToInsert, OldEnd);
878
879 std::copy(From, To, I);
880 return I;
881 }
882
883 // Otherwise, we're inserting more elements than exist already, and we're
884 // not inserting at the end.
885
886 // Move over the elements that we're about to overwrite.
887 T *OldEnd = this->end();
888 this->set_size(this->size() + NumToInsert);
889 size_t NumOverwritten = OldEnd-I;
890 this->uninitialized_move(I, OldEnd, this->end()-NumOverwritten);
891
892 // Replace the overwritten part.
893 for (T *J = I; NumOverwritten > 0; --NumOverwritten) {
894 *J = *From;
895 ++J; ++From;
896 }
897
898 // Insert the non-overwritten middle part.
899 this->uninitialized_copy(From, To, OldEnd);
900 return I;
901 }
902
903 void insert(iterator I, std::initializer_list<T> IL) {
904 insert(I, IL.begin(), IL.end());
905 }
906
907 template <typename... ArgTypes> reference emplace_back(ArgTypes &&... Args) {
908 if (LLVM_UNLIKELY(this->size() >= this->capacity())__builtin_expect((bool)(this->size() >= this->capacity
()), false)
)
909 return this->growAndEmplaceBack(std::forward<ArgTypes>(Args)...);
910
911 ::new ((void *)this->end()) T(std::forward<ArgTypes>(Args)...);
912 this->set_size(this->size() + 1);
913 return this->back();
914 }
915
916 SmallVectorImpl &operator=(const SmallVectorImpl &RHS);
917
918 SmallVectorImpl &operator=(SmallVectorImpl &&RHS);
919
920 bool operator==(const SmallVectorImpl &RHS) const {
921 if (this->size() != RHS.size()) return false;
922 return std::equal(this->begin(), this->end(), RHS.begin());
923 }
924 bool operator!=(const SmallVectorImpl &RHS) const {
925 return !(*this == RHS);
926 }
927
928 bool operator<(const SmallVectorImpl &RHS) const {
929 return std::lexicographical_compare(this->begin(), this->end(),
930 RHS.begin(), RHS.end());
931 }
932};
933
934template <typename T>
935void SmallVectorImpl<T>::swap(SmallVectorImpl<T> &RHS) {
936 if (this == &RHS) return;
937
938 // We can only avoid copying elements if neither vector is small.
939 if (!this->isSmall() && !RHS.isSmall()) {
940 std::swap(this->BeginX, RHS.BeginX);
941 std::swap(this->Size, RHS.Size);
942 std::swap(this->Capacity, RHS.Capacity);
943 return;
944 }
945 this->reserve(RHS.size());
946 RHS.reserve(this->size());
947
948 // Swap the shared elements.
949 size_t NumShared = this->size();
950 if (NumShared > RHS.size()) NumShared = RHS.size();
951 for (size_type i = 0; i != NumShared; ++i)
952 std::swap((*this)[i], RHS[i]);
953
954 // Copy over the extra elts.
955 if (this->size() > RHS.size()) {
956 size_t EltDiff = this->size() - RHS.size();
957 this->uninitialized_copy(this->begin()+NumShared, this->end(), RHS.end());
958 RHS.set_size(RHS.size() + EltDiff);
959 this->destroy_range(this->begin()+NumShared, this->end());
960 this->set_size(NumShared);
961 } else if (RHS.size() > this->size()) {
962 size_t EltDiff = RHS.size() - this->size();
963 this->uninitialized_copy(RHS.begin()+NumShared, RHS.end(), this->end());
964 this->set_size(this->size() + EltDiff);
965 this->destroy_range(RHS.begin()+NumShared, RHS.end());
966 RHS.set_size(NumShared);
967 }
968}
969
970template <typename T>
971SmallVectorImpl<T> &SmallVectorImpl<T>::
972 operator=(const SmallVectorImpl<T> &RHS) {
973 // Avoid self-assignment.
974 if (this == &RHS) return *this;
975
976 // If we already have sufficient space, assign the common elements, then
977 // destroy any excess.
978 size_t RHSSize = RHS.size();
979 size_t CurSize = this->size();
980 if (CurSize >= RHSSize) {
981 // Assign common elements.
982 iterator NewEnd;
983 if (RHSSize)
984 NewEnd = std::copy(RHS.begin(), RHS.begin()+RHSSize, this->begin());
985 else
986 NewEnd = this->begin();
987
988 // Destroy excess elements.
989 this->destroy_range(NewEnd, this->end());
990
991 // Trim.
992 this->set_size(RHSSize);
993 return *this;
994 }
995
996 // If we have to grow to have enough elements, destroy the current elements.
997 // This allows us to avoid copying them during the grow.
998 // FIXME: don't do this if they're efficiently moveable.
999 if (this->capacity() < RHSSize) {
1000 // Destroy current elements.
1001 this->clear();
1002 CurSize = 0;
1003 this->grow(RHSSize);
1004 } else if (CurSize) {
1005 // Otherwise, use assignment for the already-constructed elements.
1006 std::copy(RHS.begin(), RHS.begin()+CurSize, this->begin());
1007 }
1008
1009 // Copy construct the new elements in place.
1010 this->uninitialized_copy(RHS.begin()+CurSize, RHS.end(),
1011 this->begin()+CurSize);
1012
1013 // Set end.
1014 this->set_size(RHSSize);
1015 return *this;
1016}
1017
1018template <typename T>
1019SmallVectorImpl<T> &SmallVectorImpl<T>::operator=(SmallVectorImpl<T> &&RHS) {
1020 // Avoid self-assignment.
1021 if (this == &RHS) return *this;
1022
1023 // If the RHS isn't small, clear this vector and then steal its buffer.
1024 if (!RHS.isSmall()) {
1025 this->destroy_range(this->begin(), this->end());
1026 if (!this->isSmall()) free(this->begin());
1027 this->BeginX = RHS.BeginX;
1028 this->Size = RHS.Size;
1029 this->Capacity = RHS.Capacity;
1030 RHS.resetToSmall();
1031 return *this;
1032 }
1033
1034 // If we already have sufficient space, assign the common elements, then
1035 // destroy any excess.
1036 size_t RHSSize = RHS.size();
1037 size_t CurSize = this->size();
1038 if (CurSize >= RHSSize) {
1039 // Assign common elements.
1040 iterator NewEnd = this->begin();
1041 if (RHSSize)
1042 NewEnd = std::move(RHS.begin(), RHS.end(), NewEnd);
1043
1044 // Destroy excess elements and trim the bounds.
1045 this->destroy_range(NewEnd, this->end());
1046 this->set_size(RHSSize);
1047
1048 // Clear the RHS.
1049 RHS.clear();
1050
1051 return *this;
1052 }
1053
1054 // If we have to grow to have enough elements, destroy the current elements.
1055 // This allows us to avoid copying them during the grow.
1056 // FIXME: this may not actually make any sense if we can efficiently move
1057 // elements.
1058 if (this->capacity() < RHSSize) {
1059 // Destroy current elements.
1060 this->clear();
1061 CurSize = 0;
1062 this->grow(RHSSize);
1063 } else if (CurSize) {
1064 // Otherwise, use assignment for the already-constructed elements.
1065 std::move(RHS.begin(), RHS.begin()+CurSize, this->begin());
1066 }
1067
1068 // Move-construct the new elements in place.
1069 this->uninitialized_move(RHS.begin()+CurSize, RHS.end(),
1070 this->begin()+CurSize);
1071
1072 // Set end.
1073 this->set_size(RHSSize);
1074
1075 RHS.clear();
1076 return *this;
1077}
1078
1079/// Storage for the SmallVector elements. This is specialized for the N=0 case
1080/// to avoid allocating unnecessary storage.
1081template <typename T, unsigned N>
1082struct SmallVectorStorage {
1083 alignas(T) char InlineElts[N * sizeof(T)];
1084};
1085
1086/// We need the storage to be properly aligned even for small-size of 0 so that
1087/// the pointer math in \a SmallVectorTemplateCommon::getFirstEl() is
1088/// well-defined.
1089template <typename T> struct alignas(T) SmallVectorStorage<T, 0> {};
1090
1091/// Forward declaration of SmallVector so that
1092/// calculateSmallVectorDefaultInlinedElements can reference
1093/// `sizeof(SmallVector<T, 0>)`.
1094template <typename T, unsigned N> class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector;
1095
1096/// Helper class for calculating the default number of inline elements for
1097/// `SmallVector<T>`.
1098///
1099/// This should be migrated to a constexpr function when our minimum
1100/// compiler support is enough for multi-statement constexpr functions.
1101template <typename T> struct CalculateSmallVectorDefaultInlinedElements {
1102 // Parameter controlling the default number of inlined elements
1103 // for `SmallVector<T>`.
1104 //
1105 // The default number of inlined elements ensures that
1106 // 1. There is at least one inlined element.
1107 // 2. `sizeof(SmallVector<T>) <= kPreferredSmallVectorSizeof` unless
1108 // it contradicts 1.
1109 static constexpr size_t kPreferredSmallVectorSizeof = 64;
1110
1111 // static_assert that sizeof(T) is not "too big".
1112 //
1113 // Because our policy guarantees at least one inlined element, it is possible
1114 // for an arbitrarily large inlined element to allocate an arbitrarily large
1115 // amount of inline storage. We generally consider it an antipattern for a
1116 // SmallVector to allocate an excessive amount of inline storage, so we want
1117 // to call attention to these cases and make sure that users are making an
1118 // intentional decision if they request a lot of inline storage.
1119 //
1120 // We want this assertion to trigger in pathological cases, but otherwise
1121 // not be too easy to hit. To accomplish that, the cutoff is actually somewhat
1122 // larger than kPreferredSmallVectorSizeof (otherwise,
1123 // `SmallVector<SmallVector<T>>` would be one easy way to trip it, and that
1124 // pattern seems useful in practice).
1125 //
1126 // One wrinkle is that this assertion is in theory non-portable, since
1127 // sizeof(T) is in general platform-dependent. However, we don't expect this
1128 // to be much of an issue, because most LLVM development happens on 64-bit
1129 // hosts, and therefore sizeof(T) is expected to *decrease* when compiled for
1130 // 32-bit hosts, dodging the issue. The reverse situation, where development
1131 // happens on a 32-bit host and then fails due to sizeof(T) *increasing* on a
1132 // 64-bit host, is expected to be very rare.
1133 static_assert(
1134 sizeof(T) <= 256,
1135 "You are trying to use a default number of inlined elements for "
1136 "`SmallVector<T>` but `sizeof(T)` is really big! Please use an "
1137 "explicit number of inlined elements with `SmallVector<T, N>` to make "
1138 "sure you really want that much inline storage.");
1139
1140 // Discount the size of the header itself when calculating the maximum inline
1141 // bytes.
1142 static constexpr size_t PreferredInlineBytes =
1143 kPreferredSmallVectorSizeof - sizeof(SmallVector<T, 0>);
1144 static constexpr size_t NumElementsThatFit = PreferredInlineBytes / sizeof(T);
1145 static constexpr size_t value =
1146 NumElementsThatFit == 0 ? 1 : NumElementsThatFit;
1147};
1148
1149/// This is a 'vector' (really, a variable-sized array), optimized
1150/// for the case when the array is small. It contains some number of elements
1151/// in-place, which allows it to avoid heap allocation when the actual number of
1152/// elements is below that threshold. This allows normal "small" cases to be
1153/// fast without losing generality for large inputs.
1154///
1155/// \note
1156/// In the absence of a well-motivated choice for the number of inlined
1157/// elements \p N, it is recommended to use \c SmallVector<T> (that is,
1158/// omitting the \p N). This will choose a default number of inlined elements
1159/// reasonable for allocation on the stack (for example, trying to keep \c
1160/// sizeof(SmallVector<T>) around 64 bytes).
1161///
1162/// \warning This does not attempt to be exception safe.
1163///
1164/// \see https://llvm.org/docs/ProgrammersManual.html#llvm-adt-smallvector-h
1165template <typename T,
1166 unsigned N = CalculateSmallVectorDefaultInlinedElements<T>::value>
1167class LLVM_GSL_OWNER[[gsl::Owner]] SmallVector : public SmallVectorImpl<T>,
1168 SmallVectorStorage<T, N> {
1169public:
1170 SmallVector() : SmallVectorImpl<T>(N) {}
1171
1172 ~SmallVector() {
1173 // Destroy the constructed elements in the vector.
1174 this->destroy_range(this->begin(), this->end());
1175 }
1176
1177 explicit SmallVector(size_t Size, const T &Value = T())
1178 : SmallVectorImpl<T>(N) {
1179 this->assign(Size, Value);
1180 }
1181
1182 template <typename ItTy,
1183 typename = std::enable_if_t<std::is_convertible<
1184 typename std::iterator_traits<ItTy>::iterator_category,
1185 std::input_iterator_tag>::value>>
1186 SmallVector(ItTy S, ItTy E) : SmallVectorImpl<T>(N) {
1187 this->append(S, E);
1188 }
1189
1190 template <typename RangeTy>
1191 explicit SmallVector(const iterator_range<RangeTy> &R)
1192 : SmallVectorImpl<T>(N) {
1193 this->append(R.begin(), R.end());
1194 }
1195
1196 SmallVector(std::initializer_list<T> IL) : SmallVectorImpl<T>(N) {
1197 this->assign(IL);
1198 }
1199
1200 SmallVector(const SmallVector &RHS) : SmallVectorImpl<T>(N) {
1201 if (!RHS.empty())
1202 SmallVectorImpl<T>::operator=(RHS);
1203 }
1204
1205 SmallVector &operator=(const SmallVector &RHS) {
1206 SmallVectorImpl<T>::operator=(RHS);
1207 return *this;
1208 }
1209
1210 SmallVector(SmallVector &&RHS) : SmallVectorImpl<T>(N) {
1211 if (!RHS.empty())
1212 SmallVectorImpl<T>::operator=(::std::move(RHS));
1213 }
1214
1215 SmallVector(SmallVectorImpl<T> &&RHS) : SmallVectorImpl<T>(N) {
1216 if (!RHS.empty())
1217 SmallVectorImpl<T>::operator=(::std::move(RHS));
1218 }
1219
1220 SmallVector &operator=(SmallVector &&RHS) {
1221 SmallVectorImpl<T>::operator=(::std::move(RHS));
1222 return *this;
1223 }
1224
1225 SmallVector &operator=(SmallVectorImpl<T> &&RHS) {
1226 SmallVectorImpl<T>::operator=(::std::move(RHS));
1227 return *this;
1228 }
1229
1230 SmallVector &operator=(std::initializer_list<T> IL) {
1231 this->assign(IL);
1232 return *this;
1233 }
1234};
1235
1236template <typename T, unsigned N>
1237inline size_t capacity_in_bytes(const SmallVector<T, N> &X) {
1238 return X.capacity_in_bytes();
1239}
1240
1241/// Given a range of type R, iterate the entire range and return a
1242/// SmallVector with elements of the vector. This is useful, for example,
1243/// when you want to iterate a range and then sort the results.
1244template <unsigned Size, typename R>
1245SmallVector<typename std::remove_const<typename std::remove_reference<
1246 decltype(*std::begin(std::declval<R &>()))>::type>::type,
1247 Size>
1248to_vector(R &&Range) {
1249 return {std::begin(Range), std::end(Range)};
1250}
1251
1252} // end namespace llvm
1253
1254namespace std {
1255
1256 /// Implement std::swap in terms of SmallVector swap.
1257 template<typename T>
1258 inline void
1259 swap(llvm::SmallVectorImpl<T> &LHS, llvm::SmallVectorImpl<T> &RHS) {
1260 LHS.swap(RHS);
1261 }
1262
1263 /// Implement std::swap in terms of SmallVector swap.
1264 template<typename T, unsigned N>
1265 inline void
1266 swap(llvm::SmallVector<T, N> &LHS, llvm::SmallVector<T, N> &RHS) {
1267 LHS.swap(RHS);
1268 }
1269
1270} // end namespace std
1271
1272#endif // LLVM_ADT_SMALLVECTOR_H

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/None.h"
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SmallVector.h"
23#include "llvm/ADT/StringRef.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/Attributes.h"
28#include "llvm/IR/BasicBlock.h"
29#include "llvm/IR/CallingConv.h"
30#include "llvm/IR/CFG.h"
31#include "llvm/IR/Constant.h"
32#include "llvm/IR/DerivedTypes.h"
33#include "llvm/IR/Function.h"
34#include "llvm/IR/InstrTypes.h"
35#include "llvm/IR/Instruction.h"
36#include "llvm/IR/OperandTraits.h"
37#include "llvm/IR/Type.h"
38#include "llvm/IR/Use.h"
39#include "llvm/IR/User.h"
40#include "llvm/IR/Value.h"
41#include "llvm/Support/AtomicOrdering.h"
42#include "llvm/Support/Casting.h"
43#include "llvm/Support/ErrorHandling.h"
44#include <cassert>
45#include <cstddef>
46#include <cstdint>
47#include <iterator>
48
49namespace llvm {
50
51class APInt;
52class ConstantInt;
53class DataLayout;
54class LLVMContext;
55
56//===----------------------------------------------------------------------===//
57// AllocaInst Class
58//===----------------------------------------------------------------------===//
59
60/// an instruction to allocate memory on the stack
61class AllocaInst : public UnaryInstruction {
62 Type *AllocatedType;
63
64 using AlignmentField = AlignmentBitfieldElementT<0>;
65 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
66 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
67 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
68 SwiftErrorField>(),
69 "Bitfields must be contiguous");
70
71protected:
72 // Note: Instruction needs to be a friend here to call cloneImpl.
73 friend class Instruction;
74
75 AllocaInst *cloneImpl() const;
76
77public:
78 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, Instruction *InsertBefore);
80 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
81 const Twine &Name, BasicBlock *InsertAtEnd);
82
83 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
84 Instruction *InsertBefore);
85 AllocaInst(Type *Ty, unsigned AddrSpace,
86 const Twine &Name, BasicBlock *InsertAtEnd);
87
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name = "", Instruction *InsertBefore = nullptr);
90 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
91 const Twine &Name, BasicBlock *InsertAtEnd);
92
93 /// Return true if there is an allocation size parameter to the allocation
94 /// instruction that is not 1.
95 bool isArrayAllocation() const;
96
97 /// Get the number of elements allocated. For a simple allocation of a single
98 /// element, this will return a constant 1 value.
99 const Value *getArraySize() const { return getOperand(0); }
100 Value *getArraySize() { return getOperand(0); }
101
102 /// Overload to return most specific pointer type.
103 PointerType *getType() const {
104 return cast<PointerType>(Instruction::getType());
105 }
106
107 /// Get allocation size in bits. Returns None if size can't be determined,
108 /// e.g. in case of a VLA.
109 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
110
111 /// Return the type that is being allocated by the instruction.
112 Type *getAllocatedType() const { return AllocatedType; }
113 /// for use only in special circumstances that need to generically
114 /// transform a whole instruction (eg: IR linking and vectorization).
115 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
116
117 /// Return the alignment of the memory that is being allocated by the
118 /// instruction.
119 Align getAlign() const {
120 return Align(1ULL << getSubclassData<AlignmentField>());
121 }
122
123 void setAlignment(Align Align) {
124 setSubclassData<AlignmentField>(Log2(Align));
125 }
126
127 // FIXME: Remove this one transition to Align is over.
128 unsigned getAlignment() const { return getAlign().value(); }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 /// FIXME: Remove this function once transition to Align is over.
218 /// Use getAlign() instead.
219 unsigned getAlignment() const { return getAlign().value(); }
220
221 /// Return the alignment of the access that is being performed.
222 Align getAlign() const {
223 return Align(1ULL << (getSubclassData<AlignmentField>()));
224 }
225
226 void setAlignment(Align Align) {
227 setSubclassData<AlignmentField>(Log2(Align));
228 }
229
230 /// Returns the ordering constraint of this load instruction.
231 AtomicOrdering getOrdering() const {
232 return getSubclassData<OrderingField>();
233 }
234 /// Sets the ordering constraint of this load instruction. May not be Release
235 /// or AcquireRelease.
236 void setOrdering(AtomicOrdering Ordering) {
237 setSubclassData<OrderingField>(Ordering);
238 }
239
240 /// Returns the synchronization scope ID of this load instruction.
241 SyncScope::ID getSyncScopeID() const {
242 return SSID;
243 }
244
245 /// Sets the synchronization scope ID of this load instruction.
246 void setSyncScopeID(SyncScope::ID SSID) {
247 this->SSID = SSID;
248 }
249
250 /// Sets the ordering constraint and the synchronization scope ID of this load
251 /// instruction.
252 void setAtomic(AtomicOrdering Ordering,
253 SyncScope::ID SSID = SyncScope::System) {
254 setOrdering(Ordering);
255 setSyncScopeID(SSID);
256 }
257
258 bool isSimple() const { return !isAtomic() && !isVolatile(); }
259
260 bool isUnordered() const {
261 return (getOrdering() == AtomicOrdering::NotAtomic ||
262 getOrdering() == AtomicOrdering::Unordered) &&
263 !isVolatile();
264 }
265
266 Value *getPointerOperand() { return getOperand(0); }
267 const Value *getPointerOperand() const { return getOperand(0); }
268 static unsigned getPointerOperandIndex() { return 0U; }
269 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
270
271 /// Returns the address space of the pointer operand.
272 unsigned getPointerAddressSpace() const {
273 return getPointerOperandType()->getPointerAddressSpace();
274 }
275
276 // Methods for support type inquiry through isa, cast, and dyn_cast:
277 static bool classof(const Instruction *I) {
278 return I->getOpcode() == Instruction::Load;
279 }
280 static bool classof(const Value *V) {
281 return isa<Instruction>(V) && classof(cast<Instruction>(V));
282 }
283
284private:
285 // Shadow Instruction::setInstructionSubclassData with a private forwarding
286 // method so that subclasses cannot accidentally use it.
287 template <typename Bitfield>
288 void setSubclassData(typename Bitfield::Type Value) {
289 Instruction::setSubclassData<Bitfield>(Value);
290 }
291
292 /// The synchronization scope ID of this load instruction. Not quite enough
293 /// room in SubClassData for everything, so synchronization scope ID gets its
294 /// own field.
295 SyncScope::ID SSID;
296};
297
298//===----------------------------------------------------------------------===//
299// StoreInst Class
300//===----------------------------------------------------------------------===//
301
302/// An instruction for storing to memory.
303class StoreInst : public Instruction {
304 using VolatileField = BoolBitfieldElementT<0>;
305 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
306 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
307 static_assert(
308 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
309 "Bitfields must be contiguous");
310
311 void AssertOK();
312
313protected:
314 // Note: Instruction needs to be a friend here to call cloneImpl.
315 friend class Instruction;
316
317 StoreInst *cloneImpl() const;
318
319public:
320 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
321 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
322 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 BasicBlock *InsertAtEnd);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
330 Instruction *InsertBefore = nullptr);
331 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
332 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
333
334 // allocate space for exactly two operands
335 void *operator new(size_t s) {
336 return User::operator new(s, 2);
337 }
338
339 /// Return true if this is a store to a volatile memory location.
340 bool isVolatile() const { return getSubclassData<VolatileField>(); }
341
342 /// Specify whether this is a volatile store or not.
343 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
344
345 /// Transparently provide more efficient getOperand methods.
346 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
347
348 /// Return the alignment of the access that is being performed
349 /// FIXME: Remove this function once transition to Align is over.
350 /// Use getAlign() instead.
351 unsigned getAlignment() const { return getAlign().value(); }
352
353 Align getAlign() const {
354 return Align(1ULL << (getSubclassData<AlignmentField>()));
355 }
356
357 void setAlignment(Align Align) {
358 setSubclassData<AlignmentField>(Log2(Align));
359 }
360
361 /// Returns the ordering constraint of this store instruction.
362 AtomicOrdering getOrdering() const {
363 return getSubclassData<OrderingField>();
364 }
365
366 /// Sets the ordering constraint of this store instruction. May not be
367 /// Acquire or AcquireRelease.
368 void setOrdering(AtomicOrdering Ordering) {
369 setSubclassData<OrderingField>(Ordering);
370 }
371
372 /// Returns the synchronization scope ID of this store instruction.
373 SyncScope::ID getSyncScopeID() const {
374 return SSID;
375 }
376
377 /// Sets the synchronization scope ID of this store instruction.
378 void setSyncScopeID(SyncScope::ID SSID) {
379 this->SSID = SSID;
380 }
381
382 /// Sets the ordering constraint and the synchronization scope ID of this
383 /// store instruction.
384 void setAtomic(AtomicOrdering Ordering,
385 SyncScope::ID SSID = SyncScope::System) {
386 setOrdering(Ordering);
387 setSyncScopeID(SSID);
388 }
389
390 bool isSimple() const { return !isAtomic() && !isVolatile(); }
391
392 bool isUnordered() const {
393 return (getOrdering() == AtomicOrdering::NotAtomic ||
394 getOrdering() == AtomicOrdering::Unordered) &&
395 !isVolatile();
396 }
397
398 Value *getValueOperand() { return getOperand(0); }
399 const Value *getValueOperand() const { return getOperand(0); }
400
401 Value *getPointerOperand() { return getOperand(1); }
402 const Value *getPointerOperand() const { return getOperand(1); }
403 static unsigned getPointerOperandIndex() { return 1U; }
404 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
405
406 /// Returns the address space of the pointer operand.
407 unsigned getPointerAddressSpace() const {
408 return getPointerOperandType()->getPointerAddressSpace();
409 }
410
411 // Methods for support type inquiry through isa, cast, and dyn_cast:
412 static bool classof(const Instruction *I) {
413 return I->getOpcode() == Instruction::Store;
414 }
415 static bool classof(const Value *V) {
416 return isa<Instruction>(V) && classof(cast<Instruction>(V));
417 }
418
419private:
420 // Shadow Instruction::setInstructionSubclassData with a private forwarding
421 // method so that subclasses cannot accidentally use it.
422 template <typename Bitfield>
423 void setSubclassData(typename Bitfield::Type Value) {
424 Instruction::setSubclassData<Bitfield>(Value);
425 }
426
427 /// The synchronization scope ID of this store instruction. Not quite enough
428 /// room in SubClassData for everything, so synchronization scope ID gets its
429 /// own field.
430 SyncScope::ID SSID;
431};
432
433template <>
434struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
435};
436
437DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<StoreInst>::op_begin(const_cast
<StoreInst*>(this))[i_nocapture].get()); } void StoreInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<StoreInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 437, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
StoreInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned StoreInst::getNumOperands() const { return OperandTraits
<StoreInst>::operands(this); } template <int Idx_nocapture
> Use &StoreInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
StoreInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
438
439//===----------------------------------------------------------------------===//
440// FenceInst Class
441//===----------------------------------------------------------------------===//
442
443/// An instruction for ordering other memory operations.
444class FenceInst : public Instruction {
445 using OrderingField = AtomicOrderingBitfieldElementT<0>;
446
447 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
448
449protected:
450 // Note: Instruction needs to be a friend here to call cloneImpl.
451 friend class Instruction;
452
453 FenceInst *cloneImpl() const;
454
455public:
456 // Ordering may only be Acquire, Release, AcquireRelease, or
457 // SequentiallyConsistent.
458 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
459 SyncScope::ID SSID = SyncScope::System,
460 Instruction *InsertBefore = nullptr);
461 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
462 BasicBlock *InsertAtEnd);
463
464 // allocate space for exactly zero operands
465 void *operator new(size_t s) {
466 return User::operator new(s, 0);
467 }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t s) {
550 return User::operator new(s, 3);
551 }
552
553 using VolatileField = BoolBitfieldElementT<0>;
554 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
555 using SuccessOrderingField =
556 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
557 using FailureOrderingField =
558 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
559 using AlignmentField =
560 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
561 static_assert(
562 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
563 FailureOrderingField, AlignmentField>(),
564 "Bitfields must be contiguous");
565
566 /// Return the alignment of the memory that is being allocated by the
567 /// instruction.
568 Align getAlign() const {
569 return Align(1ULL << getSubclassData<AlignmentField>());
570 }
571
572 void setAlignment(Align Align) {
573 setSubclassData<AlignmentField>(Log2(Align));
574 }
575
576 /// Return true if this is a cmpxchg from a volatile memory
577 /// location.
578 ///
579 bool isVolatile() const { return getSubclassData<VolatileField>(); }
580
581 /// Specify whether this is a volatile cmpxchg.
582 ///
583 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
584
585 /// Return true if this cmpxchg may spuriously fail.
586 bool isWeak() const { return getSubclassData<WeakField>(); }
587
588 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
589
590 /// Transparently provide more efficient getOperand methods.
591 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
592
593 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
594 return Ordering != AtomicOrdering::NotAtomic &&
595 Ordering != AtomicOrdering::Unordered;
596 }
597
598 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
599 return Ordering != AtomicOrdering::NotAtomic &&
600 Ordering != AtomicOrdering::Unordered &&
601 Ordering != AtomicOrdering::AcquireRelease &&
602 Ordering != AtomicOrdering::Release;
603 }
604
605 /// Returns the success ordering constraint of this cmpxchg instruction.
606 AtomicOrdering getSuccessOrdering() const {
607 return getSubclassData<SuccessOrderingField>();
608 }
609
610 /// Sets the success ordering constraint of this cmpxchg instruction.
611 void setSuccessOrdering(AtomicOrdering Ordering) {
612 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 613, __extension__ __PRETTY_FUNCTION__))
613 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 613, __extension__ __PRETTY_FUNCTION__))
;
614 setSubclassData<SuccessOrderingField>(Ordering);
615 }
616
617 /// Returns the failure ordering constraint of this cmpxchg instruction.
618 AtomicOrdering getFailureOrdering() const {
619 return getSubclassData<FailureOrderingField>();
620 }
621
622 /// Sets the failure ordering constraint of this cmpxchg instruction.
623 void setFailureOrdering(AtomicOrdering Ordering) {
624 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 625, __extension__ __PRETTY_FUNCTION__))
625 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 625, __extension__ __PRETTY_FUNCTION__))
;
626 setSubclassData<FailureOrderingField>(Ordering);
627 }
628
629 /// Returns a single ordering which is at least as strong as both the
630 /// success and failure orderings for this cmpxchg.
631 AtomicOrdering getMergedOrdering() const {
632 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
633 return AtomicOrdering::SequentiallyConsistent;
634 if (getFailureOrdering() == AtomicOrdering::Acquire) {
635 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
636 return AtomicOrdering::Acquire;
637 if (getSuccessOrdering() == AtomicOrdering::Release)
638 return AtomicOrdering::AcquireRelease;
639 }
640 return getSuccessOrdering();
641 }
642
643 /// Returns the synchronization scope ID of this cmpxchg instruction.
644 SyncScope::ID getSyncScopeID() const {
645 return SSID;
646 }
647
648 /// Sets the synchronization scope ID of this cmpxchg instruction.
649 void setSyncScopeID(SyncScope::ID SSID) {
650 this->SSID = SSID;
651 }
652
653 Value *getPointerOperand() { return getOperand(0); }
654 const Value *getPointerOperand() const { return getOperand(0); }
655 static unsigned getPointerOperandIndex() { return 0U; }
656
657 Value *getCompareOperand() { return getOperand(1); }
658 const Value *getCompareOperand() const { return getOperand(1); }
659
660 Value *getNewValOperand() { return getOperand(2); }
661 const Value *getNewValOperand() const { return getOperand(2); }
662
663 /// Returns the address space of the pointer operand.
664 unsigned getPointerAddressSpace() const {
665 return getPointerOperand()->getType()->getPointerAddressSpace();
666 }
667
668 /// Returns the strongest permitted ordering on failure, given the
669 /// desired ordering on success.
670 ///
671 /// If the comparison in a cmpxchg operation fails, there is no atomic store
672 /// so release semantics cannot be provided. So this function drops explicit
673 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
674 /// operation would remain SequentiallyConsistent.
675 static AtomicOrdering
676 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
677 switch (SuccessOrdering) {
678 default:
679 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 679)
;
680 case AtomicOrdering::Release:
681 case AtomicOrdering::Monotonic:
682 return AtomicOrdering::Monotonic;
683 case AtomicOrdering::AcquireRelease:
684 case AtomicOrdering::Acquire:
685 return AtomicOrdering::Acquire;
686 case AtomicOrdering::SequentiallyConsistent:
687 return AtomicOrdering::SequentiallyConsistent;
688 }
689 }
690
691 // Methods for support type inquiry through isa, cast, and dyn_cast:
692 static bool classof(const Instruction *I) {
693 return I->getOpcode() == Instruction::AtomicCmpXchg;
694 }
695 static bool classof(const Value *V) {
696 return isa<Instruction>(V) && classof(cast<Instruction>(V));
697 }
698
699private:
700 // Shadow Instruction::setInstructionSubclassData with a private forwarding
701 // method so that subclasses cannot accidentally use it.
702 template <typename Bitfield>
703 void setSubclassData(typename Bitfield::Type Value) {
704 Instruction::setSubclassData<Bitfield>(Value);
705 }
706
707 /// The synchronization scope ID of this cmpxchg instruction. Not quite
708 /// enough room in SubClassData for everything, so synchronization scope ID
709 /// gets its own field.
710 SyncScope::ID SSID;
711};
712
713template <>
714struct OperandTraits<AtomicCmpXchgInst> :
715 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
716};
717
718DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 718, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicCmpXchgInst>::op_begin
(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture].get
()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 718, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicCmpXchgInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicCmpXchgInst::getNumOperands() const { return
OperandTraits<AtomicCmpXchgInst>::operands(this); } template
<int Idx_nocapture> Use &AtomicCmpXchgInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &AtomicCmpXchgInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
719
720//===----------------------------------------------------------------------===//
721// AtomicRMWInst Class
722//===----------------------------------------------------------------------===//
723
724/// an instruction that atomically reads a memory location,
725/// combines it with another value, and then stores the result back. Returns
726/// the old value.
727///
728class AtomicRMWInst : public Instruction {
729protected:
730 // Note: Instruction needs to be a friend here to call cloneImpl.
731 friend class Instruction;
732
733 AtomicRMWInst *cloneImpl() const;
734
735public:
736 /// This enumeration lists the possible modifications atomicrmw can make. In
737 /// the descriptions, 'p' is the pointer to the instruction's memory location,
738 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
739 /// instruction. These instructions always return 'old'.
740 enum BinOp : unsigned {
741 /// *p = v
742 Xchg,
743 /// *p = old + v
744 Add,
745 /// *p = old - v
746 Sub,
747 /// *p = old & v
748 And,
749 /// *p = ~(old & v)
750 Nand,
751 /// *p = old | v
752 Or,
753 /// *p = old ^ v
754 Xor,
755 /// *p = old >signed v ? old : v
756 Max,
757 /// *p = old <signed v ? old : v
758 Min,
759 /// *p = old >unsigned v ? old : v
760 UMax,
761 /// *p = old <unsigned v ? old : v
762 UMin,
763
764 /// *p = old + v
765 FAdd,
766
767 /// *p = old - v
768 FSub,
769
770 FIRST_BINOP = Xchg,
771 LAST_BINOP = FSub,
772 BAD_BINOP
773 };
774
775private:
776 template <unsigned Offset>
777 using AtomicOrderingBitfieldElement =
778 typename Bitfield::Element<AtomicOrdering, Offset, 3,
779 AtomicOrdering::LAST>;
780
781 template <unsigned Offset>
782 using BinOpBitfieldElement =
783 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
784
785public:
786 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
787 AtomicOrdering Ordering, SyncScope::ID SSID,
788 Instruction *InsertBefore = nullptr);
789 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
790 AtomicOrdering Ordering, SyncScope::ID SSID,
791 BasicBlock *InsertAtEnd);
792
793 // allocate space for exactly two operands
794 void *operator new(size_t s) {
795 return User::operator new(s, 2);
796 }
797
798 using VolatileField = BoolBitfieldElementT<0>;
799 using AtomicOrderingField =
800 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
801 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
802 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
803 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
804 OperationField, AlignmentField>(),
805 "Bitfields must be contiguous");
806
807 BinOp getOperation() const { return getSubclassData<OperationField>(); }
808
809 static StringRef getOperationName(BinOp Op);
810
811 static bool isFPOperation(BinOp Op) {
812 switch (Op) {
813 case AtomicRMWInst::FAdd:
814 case AtomicRMWInst::FSub:
815 return true;
816 default:
817 return false;
818 }
819 }
820
821 void setOperation(BinOp Operation) {
822 setSubclassData<OperationField>(Operation);
823 }
824
825 /// Return the alignment of the memory that is being allocated by the
826 /// instruction.
827 Align getAlign() const {
828 return Align(1ULL << getSubclassData<AlignmentField>());
829 }
830
831 void setAlignment(Align Align) {
832 setSubclassData<AlignmentField>(Log2(Align));
833 }
834
835 /// Return true if this is a RMW on a volatile memory location.
836 ///
837 bool isVolatile() const { return getSubclassData<VolatileField>(); }
838
839 /// Specify whether this is a volatile RMW or not.
840 ///
841 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
842
843 /// Transparently provide more efficient getOperand methods.
844 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
845
846 /// Returns the ordering constraint of this rmw instruction.
847 AtomicOrdering getOrdering() const {
848 return getSubclassData<AtomicOrderingField>();
849 }
850
851 /// Sets the ordering constraint of this rmw instruction.
852 void setOrdering(AtomicOrdering Ordering) {
853 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 854, __extension__ __PRETTY_FUNCTION__))
854 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 854, __extension__ __PRETTY_FUNCTION__))
;
855 setSubclassData<AtomicOrderingField>(Ordering);
856 }
857
858 /// Returns the synchronization scope ID of this rmw instruction.
859 SyncScope::ID getSyncScopeID() const {
860 return SSID;
861 }
862
863 /// Sets the synchronization scope ID of this rmw instruction.
864 void setSyncScopeID(SyncScope::ID SSID) {
865 this->SSID = SSID;
866 }
867
868 Value *getPointerOperand() { return getOperand(0); }
869 const Value *getPointerOperand() const { return getOperand(0); }
870 static unsigned getPointerOperandIndex() { return 0U; }
871
872 Value *getValOperand() { return getOperand(1); }
873 const Value *getValOperand() const { return getOperand(1); }
874
875 /// Returns the address space of the pointer operand.
876 unsigned getPointerAddressSpace() const {
877 return getPointerOperand()->getType()->getPointerAddressSpace();
878 }
879
880 bool isFloatingPointOperation() const {
881 return isFPOperation(getOperation());
882 }
883
884 // Methods for support type inquiry through isa, cast, and dyn_cast:
885 static bool classof(const Instruction *I) {
886 return I->getOpcode() == Instruction::AtomicRMW;
887 }
888 static bool classof(const Value *V) {
889 return isa<Instruction>(V) && classof(cast<Instruction>(V));
890 }
891
892private:
893 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
894 AtomicOrdering Ordering, SyncScope::ID SSID);
895
896 // Shadow Instruction::setInstructionSubclassData with a private forwarding
897 // method so that subclasses cannot accidentally use it.
898 template <typename Bitfield>
899 void setSubclassData(typename Bitfield::Type Value) {
900 Instruction::setSubclassData<Bitfield>(Value);
901 }
902
903 /// The synchronization scope ID of this rmw instruction. Not quite enough
904 /// room in SubClassData for everything, so synchronization scope ID gets its
905 /// own field.
906 SyncScope::ID SSID;
907};
908
909template <>
910struct OperandTraits<AtomicRMWInst>
911 : public FixedNumOperandTraits<AtomicRMWInst,2> {
912};
913
914DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 914, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<AtomicRMWInst>::op_begin(const_cast
<AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<AtomicRMWInst
>::operands(this) && "setOperand() out of range!")
? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 914, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
AtomicRMWInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits
<AtomicRMWInst>::operands(this); } template <int Idx_nocapture
> Use &AtomicRMWInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &AtomicRMWInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
915
916//===----------------------------------------------------------------------===//
917// GetElementPtrInst Class
918//===----------------------------------------------------------------------===//
919
920// checkGEPType - Simple wrapper function to give a better assertion failure
921// message on bad indexes for a gep instruction.
922//
923inline Type *checkGEPType(Type *Ty) {
924 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 924, __extension__ __PRETTY_FUNCTION__))
;
925 return Ty;
926}
927
928/// an instruction for type-safe pointer arithmetic to
929/// access elements of arrays and structs
930///
931class GetElementPtrInst : public Instruction {
932 Type *SourceElementType;
933 Type *ResultElementType;
934
935 GetElementPtrInst(const GetElementPtrInst &GEPI);
936
937 /// Constructors - Create a getelementptr instruction with a base pointer an
938 /// list of indices. The first ctor can optionally insert before an existing
939 /// instruction, the second appends the new instruction to the specified
940 /// BasicBlock.
941 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
942 ArrayRef<Value *> IdxList, unsigned Values,
943 const Twine &NameStr, Instruction *InsertBefore);
944 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
945 ArrayRef<Value *> IdxList, unsigned Values,
946 const Twine &NameStr, BasicBlock *InsertAtEnd);
947
948 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
949
950protected:
951 // Note: Instruction needs to be a friend here to call cloneImpl.
952 friend class Instruction;
953
954 GetElementPtrInst *cloneImpl() const;
955
956public:
957 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
958 ArrayRef<Value *> IdxList,
959 const Twine &NameStr = "",
960 Instruction *InsertBefore = nullptr) {
961 unsigned Values = 1 + unsigned(IdxList.size());
962 if (!PointeeType) {
963 PointeeType =
964 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
965 } else {
966 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 967, __extension__ __PRETTY_FUNCTION__))
967 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 967, __extension__ __PRETTY_FUNCTION__))
;
968 }
969 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
970 NameStr, InsertBefore);
971 }
972
973 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
974 ArrayRef<Value *> IdxList,
975 const Twine &NameStr,
976 BasicBlock *InsertAtEnd) {
977 unsigned Values = 1 + unsigned(IdxList.size());
978 if (!PointeeType) {
979 PointeeType =
980 cast<PointerType>(Ptr->getType()->getScalarType())->getElementType();
981 } else {
982 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 983, __extension__ __PRETTY_FUNCTION__))
983 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 983, __extension__ __PRETTY_FUNCTION__))
;
984 }
985 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
986 NameStr, InsertAtEnd);
987 }
988
989 /// Create an "inbounds" getelementptr. See the documentation for the
990 /// "inbounds" flag in LangRef.html for details.
991 static GetElementPtrInst *CreateInBounds(Value *Ptr,
992 ArrayRef<Value *> IdxList,
993 const Twine &NameStr = "",
994 Instruction *InsertBefore = nullptr){
995 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore);
996 }
997
998 static GetElementPtrInst *
999 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
1000 const Twine &NameStr = "",
1001 Instruction *InsertBefore = nullptr) {
1002 GetElementPtrInst *GEP =
1003 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
1004 GEP->setIsInBounds(true);
1005 return GEP;
1006 }
1007
1008 static GetElementPtrInst *CreateInBounds(Value *Ptr,
1009 ArrayRef<Value *> IdxList,
1010 const Twine &NameStr,
1011 BasicBlock *InsertAtEnd) {
1012 return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd);
1013 }
1014
1015 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
1016 ArrayRef<Value *> IdxList,
1017 const Twine &NameStr,
1018 BasicBlock *InsertAtEnd) {
1019 GetElementPtrInst *GEP =
1020 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
1021 GEP->setIsInBounds(true);
1022 return GEP;
1023 }
1024
1025 /// Transparently provide more efficient getOperand methods.
1026 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1027
1028 Type *getSourceElementType() const { return SourceElementType; }
1029
1030 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1031 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1032
1033 Type *getResultElementType() const {
1034 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
1035 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1035, __extension__ __PRETTY_FUNCTION__))
;
1036 return ResultElementType;
1037 }
1038
1039 /// Returns the address space of this instruction's pointer type.
1040 unsigned getAddressSpace() const {
1041 // Note that this is always the same as the pointer operand's address space
1042 // and that is cheaper to compute, so cheat here.
1043 return getPointerAddressSpace();
1044 }
1045
1046 /// Returns the result type of a getelementptr with the given source
1047 /// element type and indexes.
1048 ///
1049 /// Null is returned if the indices are invalid for the specified
1050 /// source element type.
1051 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1052 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1053 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1054
1055 /// Return the type of the element at the given index of an indexable
1056 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1057 ///
1058 /// Returns null if the type can't be indexed, or the given index is not
1059 /// legal for the given type.
1060 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1061 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1062
1063 inline op_iterator idx_begin() { return op_begin()+1; }
1064 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1065 inline op_iterator idx_end() { return op_end(); }
1066 inline const_op_iterator idx_end() const { return op_end(); }
1067
1068 inline iterator_range<op_iterator> indices() {
1069 return make_range(idx_begin(), idx_end());
1070 }
1071
1072 inline iterator_range<const_op_iterator> indices() const {
1073 return make_range(idx_begin(), idx_end());
1074 }
1075
1076 Value *getPointerOperand() {
1077 return getOperand(0);
1078 }
1079 const Value *getPointerOperand() const {
1080 return getOperand(0);
1081 }
1082 static unsigned getPointerOperandIndex() {
1083 return 0U; // get index for modifying correct operand.
1084 }
1085
1086 /// Method to return the pointer operand as a
1087 /// PointerType.
1088 Type *getPointerOperandType() const {
1089 return getPointerOperand()->getType();
1090 }
1091
1092 /// Returns the address space of the pointer operand.
1093 unsigned getPointerAddressSpace() const {
1094 return getPointerOperandType()->getPointerAddressSpace();
1095 }
1096
1097 /// Returns the pointer type returned by the GEP
1098 /// instruction, which may be a vector of pointers.
1099 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1100 ArrayRef<Value *> IdxList) {
1101 Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)),
1102 Ptr->getType()->getPointerAddressSpace());
1103 // Vector GEP
1104 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1105 ElementCount EltCount = PtrVTy->getElementCount();
1106 return VectorType::get(PtrTy, EltCount);
1107 }
1108 for (Value *Index : IdxList)
1109 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1110 ElementCount EltCount = IndexVTy->getElementCount();
1111 return VectorType::get(PtrTy, EltCount);
1112 }
1113 // Scalar GEP
1114 return PtrTy;
1115 }
1116
1117 unsigned getNumIndices() const { // Note: always non-negative
1118 return getNumOperands() - 1;
1119 }
1120
1121 bool hasIndices() const {
1122 return getNumOperands() > 1;
1123 }
1124
1125 /// Return true if all of the indices of this GEP are
1126 /// zeros. If so, the result pointer and the first operand have the same
1127 /// value, just potentially different types.
1128 bool hasAllZeroIndices() const;
1129
1130 /// Return true if all of the indices of this GEP are
1131 /// constant integers. If so, the result pointer and the first operand have
1132 /// a constant offset between them.
1133 bool hasAllConstantIndices() const;
1134
1135 /// Set or clear the inbounds flag on this GEP instruction.
1136 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1137 void setIsInBounds(bool b = true);
1138
1139 /// Determine whether the GEP has the inbounds flag.
1140 bool isInBounds() const;
1141
1142 /// Accumulate the constant address offset of this GEP if possible.
1143 ///
1144 /// This routine accepts an APInt into which it will accumulate the constant
1145 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1146 /// all-constant, it returns false and the value of the offset APInt is
1147 /// undefined (it is *not* preserved!). The APInt passed into this routine
1148 /// must be at least as wide as the IntPtr type for the address space of
1149 /// the base GEP pointer.
1150 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1151
1152 // Methods for support type inquiry through isa, cast, and dyn_cast:
1153 static bool classof(const Instruction *I) {
1154 return (I->getOpcode() == Instruction::GetElementPtr);
1155 }
1156 static bool classof(const Value *V) {
1157 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1158 }
1159};
1160
1161template <>
1162struct OperandTraits<GetElementPtrInst> :
1163 public VariadicOperandTraits<GetElementPtrInst, 1> {
1164};
1165
1166GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1167 ArrayRef<Value *> IdxList, unsigned Values,
1168 const Twine &NameStr,
1169 Instruction *InsertBefore)
1170 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1171 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1172 Values, InsertBefore),
1173 SourceElementType(PointeeType),
1174 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1175 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1176, __extension__ __PRETTY_FUNCTION__))
1176 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1176, __extension__ __PRETTY_FUNCTION__))
;
1177 init(Ptr, IdxList, NameStr);
1178}
1179
1180GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1181 ArrayRef<Value *> IdxList, unsigned Values,
1182 const Twine &NameStr,
1183 BasicBlock *InsertAtEnd)
1184 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1185 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1186 Values, InsertAtEnd),
1187 SourceElementType(PointeeType),
1188 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1189 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1190, __extension__ __PRETTY_FUNCTION__))
1190 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1190, __extension__ __PRETTY_FUNCTION__))
;
1191 init(Ptr, IdxList, NameStr);
1192}
1193
1194DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1194, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<GetElementPtrInst>::op_begin
(const_cast<GetElementPtrInst*>(this))[i_nocapture].get
()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1194, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
GetElementPtrInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned GetElementPtrInst::getNumOperands() const { return
OperandTraits<GetElementPtrInst>::operands(this); } template
<int Idx_nocapture> Use &GetElementPtrInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &GetElementPtrInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1195
1196//===----------------------------------------------------------------------===//
1197// ICmpInst Class
1198//===----------------------------------------------------------------------===//
1199
1200/// This instruction compares its operands according to the predicate given
1201/// to the constructor. It only operates on integers or pointers. The operands
1202/// must be identical types.
1203/// Represent an integer comparison operator.
1204class ICmpInst: public CmpInst {
1205 void AssertOK() {
1206 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1207, __extension__ __PRETTY_FUNCTION__))
1207 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1207, __extension__ __PRETTY_FUNCTION__))
;
1208 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1209, __extension__ __PRETTY_FUNCTION__))
1209 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1209, __extension__ __PRETTY_FUNCTION__))
;
1210 // Check that the operands are the right type
1211 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
1212 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
1213 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1213, __extension__ __PRETTY_FUNCTION__))
;
1214 }
1215
1216protected:
1217 // Note: Instruction needs to be a friend here to call cloneImpl.
1218 friend class Instruction;
1219
1220 /// Clone an identical ICmpInst
1221 ICmpInst *cloneImpl() const;
1222
1223public:
1224 /// Constructor with insert-before-instruction semantics.
1225 ICmpInst(
1226 Instruction *InsertBefore, ///< Where to insert
1227 Predicate pred, ///< The predicate to use for the comparison
1228 Value *LHS, ///< The left-hand-side of the expression
1229 Value *RHS, ///< The right-hand-side of the expression
1230 const Twine &NameStr = "" ///< Name of the instruction
1231 ) : CmpInst(makeCmpResultType(LHS->getType()),
1232 Instruction::ICmp, pred, LHS, RHS, NameStr,
1233 InsertBefore) {
1234#ifndef NDEBUG
1235 AssertOK();
1236#endif
1237 }
1238
1239 /// Constructor with insert-at-end semantics.
1240 ICmpInst(
1241 BasicBlock &InsertAtEnd, ///< Block to insert into.
1242 Predicate pred, ///< The predicate to use for the comparison
1243 Value *LHS, ///< The left-hand-side of the expression
1244 Value *RHS, ///< The right-hand-side of the expression
1245 const Twine &NameStr = "" ///< Name of the instruction
1246 ) : CmpInst(makeCmpResultType(LHS->getType()),
1247 Instruction::ICmp, pred, LHS, RHS, NameStr,
1248 &InsertAtEnd) {
1249#ifndef NDEBUG
1250 AssertOK();
1251#endif
1252 }
1253
1254 /// Constructor with no-insertion semantics
1255 ICmpInst(
1256 Predicate pred, ///< The predicate to use for the comparison
1257 Value *LHS, ///< The left-hand-side of the expression
1258 Value *RHS, ///< The right-hand-side of the expression
1259 const Twine &NameStr = "" ///< Name of the instruction
1260 ) : CmpInst(makeCmpResultType(LHS->getType()),
1261 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1262#ifndef NDEBUG
1263 AssertOK();
1264#endif
1265 }
1266
1267 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1268 /// @returns the predicate that would be the result if the operand were
1269 /// regarded as signed.
1270 /// Return the signed version of the predicate
1271 Predicate getSignedPredicate() const {
1272 return getSignedPredicate(getPredicate());
1273 }
1274
1275 /// This is a static version that you can use without an instruction.
1276 /// Return the signed version of the predicate.
1277 static Predicate getSignedPredicate(Predicate pred);
1278
1279 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1280 /// @returns the predicate that would be the result if the operand were
1281 /// regarded as unsigned.
1282 /// Return the unsigned version of the predicate
1283 Predicate getUnsignedPredicate() const {
1284 return getUnsignedPredicate(getPredicate());
1285 }
1286
1287 /// This is a static version that you can use without an instruction.
1288 /// Return the unsigned version of the predicate.
1289 static Predicate getUnsignedPredicate(Predicate pred);
1290
1291 /// Return true if this predicate is either EQ or NE. This also
1292 /// tests for commutativity.
1293 static bool isEquality(Predicate P) {
1294 return P == ICMP_EQ || P == ICMP_NE;
1295 }
1296
1297 /// Return true if this predicate is either EQ or NE. This also
1298 /// tests for commutativity.
1299 bool isEquality() const {
1300 return isEquality(getPredicate());
1301 }
1302
1303 /// @returns true if the predicate of this ICmpInst is commutative
1304 /// Determine if this relation is commutative.
1305 bool isCommutative() const { return isEquality(); }
1306
1307 /// Return true if the predicate is relational (not EQ or NE).
1308 ///
1309 bool isRelational() const {
1310 return !isEquality();
1311 }
1312
1313 /// Return true if the predicate is relational (not EQ or NE).
1314 ///
1315 static bool isRelational(Predicate P) {
1316 return !isEquality(P);
1317 }
1318
1319 /// Return true if the predicate is SGT or UGT.
1320 ///
1321 static bool isGT(Predicate P) {
1322 return P == ICMP_SGT || P == ICMP_UGT;
1323 }
1324
1325 /// Return true if the predicate is SLT or ULT.
1326 ///
1327 static bool isLT(Predicate P) {
1328 return P == ICMP_SLT || P == ICMP_ULT;
1329 }
1330
1331 /// Return true if the predicate is SGE or UGE.
1332 ///
1333 static bool isGE(Predicate P) {
1334 return P == ICMP_SGE || P == ICMP_UGE;
1335 }
1336
1337 /// Return true if the predicate is SLE or ULE.
1338 ///
1339 static bool isLE(Predicate P) {
1340 return P == ICMP_SLE || P == ICMP_ULE;
1341 }
1342
1343 /// Exchange the two operands to this instruction in such a way that it does
1344 /// not modify the semantics of the instruction. The predicate value may be
1345 /// changed to retain the same result if the predicate is order dependent
1346 /// (e.g. ult).
1347 /// Swap operands and adjust predicate.
1348 void swapOperands() {
1349 setPredicate(getSwappedPredicate());
1350 Op<0>().swap(Op<1>());
1351 }
1352
1353 // Methods for support type inquiry through isa, cast, and dyn_cast:
1354 static bool classof(const Instruction *I) {
1355 return I->getOpcode() == Instruction::ICmp;
1356 }
1357 static bool classof(const Value *V) {
1358 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1359 }
1360};
1361
1362//===----------------------------------------------------------------------===//
1363// FCmpInst Class
1364//===----------------------------------------------------------------------===//
1365
1366/// This instruction compares its operands according to the predicate given
1367/// to the constructor. It only operates on floating point values or packed
1368/// vectors of floating point values. The operands must be identical types.
1369/// Represents a floating point comparison operator.
1370class FCmpInst: public CmpInst {
1371 void AssertOK() {
1372 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1372, __extension__ __PRETTY_FUNCTION__))
;
1373 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1374, __extension__ __PRETTY_FUNCTION__))
1374 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1374, __extension__ __PRETTY_FUNCTION__))
;
1375 // Check that the operands are the right type
1376 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1377, __extension__ __PRETTY_FUNCTION__))
1377 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1377, __extension__ __PRETTY_FUNCTION__))
;
1378 }
1379
1380protected:
1381 // Note: Instruction needs to be a friend here to call cloneImpl.
1382 friend class Instruction;
1383
1384 /// Clone an identical FCmpInst
1385 FCmpInst *cloneImpl() const;
1386
1387public:
1388 /// Constructor with insert-before-instruction semantics.
1389 FCmpInst(
1390 Instruction *InsertBefore, ///< Where to insert
1391 Predicate pred, ///< The predicate to use for the comparison
1392 Value *LHS, ///< The left-hand-side of the expression
1393 Value *RHS, ///< The right-hand-side of the expression
1394 const Twine &NameStr = "" ///< Name of the instruction
1395 ) : CmpInst(makeCmpResultType(LHS->getType()),
1396 Instruction::FCmp, pred, LHS, RHS, NameStr,
1397 InsertBefore) {
1398 AssertOK();
1399 }
1400
1401 /// Constructor with insert-at-end semantics.
1402 FCmpInst(
1403 BasicBlock &InsertAtEnd, ///< Block to insert into.
1404 Predicate pred, ///< The predicate to use for the comparison
1405 Value *LHS, ///< The left-hand-side of the expression
1406 Value *RHS, ///< The right-hand-side of the expression
1407 const Twine &NameStr = "" ///< Name of the instruction
1408 ) : CmpInst(makeCmpResultType(LHS->getType()),
1409 Instruction::FCmp, pred, LHS, RHS, NameStr,
1410 &InsertAtEnd) {
1411 AssertOK();
1412 }
1413
1414 /// Constructor with no-insertion semantics
1415 FCmpInst(
1416 Predicate Pred, ///< The predicate to use for the comparison
1417 Value *LHS, ///< The left-hand-side of the expression
1418 Value *RHS, ///< The right-hand-side of the expression
1419 const Twine &NameStr = "", ///< Name of the instruction
1420 Instruction *FlagsSource = nullptr
1421 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1422 RHS, NameStr, nullptr, FlagsSource) {
1423 AssertOK();
1424 }
1425
1426 /// @returns true if the predicate of this instruction is EQ or NE.
1427 /// Determine if this is an equality predicate.
1428 static bool isEquality(Predicate Pred) {
1429 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1430 Pred == FCMP_UNE;
1431 }
1432
1433 /// @returns true if the predicate of this instruction is EQ or NE.
1434 /// Determine if this is an equality predicate.
1435 bool isEquality() const { return isEquality(getPredicate()); }
1436
1437 /// @returns true if the predicate of this instruction is commutative.
1438 /// Determine if this is a commutative predicate.
1439 bool isCommutative() const {
1440 return isEquality() ||
1441 getPredicate() == FCMP_FALSE ||
1442 getPredicate() == FCMP_TRUE ||
1443 getPredicate() == FCMP_ORD ||
1444 getPredicate() == FCMP_UNO;
1445 }
1446
1447 /// @returns true if the predicate is relational (not EQ or NE).
1448 /// Determine if this a relational predicate.
1449 bool isRelational() const { return !isEquality(); }
1450
1451 /// Exchange the two operands to this instruction in such a way that it does
1452 /// not modify the semantics of the instruction. The predicate value may be
1453 /// changed to retain the same result if the predicate is order dependent
1454 /// (e.g. ult).
1455 /// Swap operands and adjust predicate.
1456 void swapOperands() {
1457 setPredicate(getSwappedPredicate());
1458 Op<0>().swap(Op<1>());
1459 }
1460
1461 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1462 static bool classof(const Instruction *I) {
1463 return I->getOpcode() == Instruction::FCmp;
1464 }
1465 static bool classof(const Value *V) {
1466 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1467 }
1468};
1469
1470//===----------------------------------------------------------------------===//
1471/// This class represents a function call, abstracting a target
1472/// machine's calling convention. This class uses low bit of the SubClassData
1473/// field to indicate whether or not this is a tail call. The rest of the bits
1474/// hold the calling convention of the call.
1475///
1476class CallInst : public CallBase {
1477 CallInst(const CallInst &CI);
1478
1479 /// Construct a CallInst given a range of arguments.
1480 /// Construct a CallInst from a range of arguments
1481 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1482 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1483 Instruction *InsertBefore);
1484
1485 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1486 const Twine &NameStr, Instruction *InsertBefore)
1487 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1488
1489 /// Construct a CallInst given a range of arguments.
1490 /// Construct a CallInst from a range of arguments
1491 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1492 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1493 BasicBlock *InsertAtEnd);
1494
1495 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1496 Instruction *InsertBefore);
1497
1498 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1499 BasicBlock *InsertAtEnd);
1500
1501 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1502 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1503 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1504
1505 /// Compute the number of operands to allocate.
1506 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1507 // We need one operand for the called function, plus the input operand
1508 // counts provided.
1509 return 1 + NumArgs + NumBundleInputs;
1510 }
1511
1512protected:
1513 // Note: Instruction needs to be a friend here to call cloneImpl.
1514 friend class Instruction;
1515
1516 CallInst *cloneImpl() const;
1517
1518public:
1519 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1520 Instruction *InsertBefore = nullptr) {
1521 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1522 }
1523
1524 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1525 const Twine &NameStr,
1526 Instruction *InsertBefore = nullptr) {
1527 return new (ComputeNumOperands(Args.size()))
1528 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1529 }
1530
1531 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1532 ArrayRef<OperandBundleDef> Bundles = None,
1533 const Twine &NameStr = "",
1534 Instruction *InsertBefore = nullptr) {
1535 const int NumOperands =
1536 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1537 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1538
1539 return new (NumOperands, DescriptorBytes)
1540 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1541 }
1542
1543 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1544 BasicBlock *InsertAtEnd) {
1545 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1546 }
1547
1548 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1549 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1550 return new (ComputeNumOperands(Args.size()))
1551 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1552 }
1553
1554 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1555 ArrayRef<OperandBundleDef> Bundles,
1556 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1557 const int NumOperands =
1558 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1559 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1560
1561 return new (NumOperands, DescriptorBytes)
1562 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1563 }
1564
1565 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1566 Instruction *InsertBefore = nullptr) {
1567 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1568 InsertBefore);
1569 }
1570
1571 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1572 ArrayRef<OperandBundleDef> Bundles = None,
1573 const Twine &NameStr = "",
1574 Instruction *InsertBefore = nullptr) {
1575 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1576 NameStr, InsertBefore);
1577 }
1578
1579 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1580 const Twine &NameStr,
1581 Instruction *InsertBefore = nullptr) {
1582 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1583 InsertBefore);
1584 }
1585
1586 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1587 BasicBlock *InsertAtEnd) {
1588 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1589 InsertAtEnd);
1590 }
1591
1592 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1593 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1594 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1595 InsertAtEnd);
1596 }
1597
1598 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1599 ArrayRef<OperandBundleDef> Bundles,
1600 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1601 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1602 NameStr, InsertAtEnd);
1603 }
1604
1605 /// Create a clone of \p CI with a different set of operand bundles and
1606 /// insert it before \p InsertPt.
1607 ///
1608 /// The returned call instruction is identical \p CI in every way except that
1609 /// the operand bundles for the new instruction are set to the operand bundles
1610 /// in \p Bundles.
1611 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1612 Instruction *InsertPt = nullptr);
1613
1614 /// Generate the IR for a call to malloc:
1615 /// 1. Compute the malloc call's argument as the specified type's size,
1616 /// possibly multiplied by the array size if the array size is not
1617 /// constant 1.
1618 /// 2. Call malloc with that argument.
1619 /// 3. Bitcast the result of the malloc call to the specified type.
1620 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1621 Type *AllocTy, Value *AllocSize,
1622 Value *ArraySize = nullptr,
1623 Function *MallocF = nullptr,
1624 const Twine &Name = "");
1625 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1626 Type *AllocTy, Value *AllocSize,
1627 Value *ArraySize = nullptr,
1628 Function *MallocF = nullptr,
1629 const Twine &Name = "");
1630 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1631 Type *AllocTy, Value *AllocSize,
1632 Value *ArraySize = nullptr,
1633 ArrayRef<OperandBundleDef> Bundles = None,
1634 Function *MallocF = nullptr,
1635 const Twine &Name = "");
1636 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1637 Type *AllocTy, Value *AllocSize,
1638 Value *ArraySize = nullptr,
1639 ArrayRef<OperandBundleDef> Bundles = None,
1640 Function *MallocF = nullptr,
1641 const Twine &Name = "");
1642 /// Generate the IR for a call to the builtin free function.
1643 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1644 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1645 static Instruction *CreateFree(Value *Source,
1646 ArrayRef<OperandBundleDef> Bundles,
1647 Instruction *InsertBefore);
1648 static Instruction *CreateFree(Value *Source,
1649 ArrayRef<OperandBundleDef> Bundles,
1650 BasicBlock *InsertAtEnd);
1651
1652 // Note that 'musttail' implies 'tail'.
1653 enum TailCallKind : unsigned {
1654 TCK_None = 0,
1655 TCK_Tail = 1,
1656 TCK_MustTail = 2,
1657 TCK_NoTail = 3,
1658 TCK_LAST = TCK_NoTail
1659 };
1660
1661 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1662 static_assert(
1663 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1664 "Bitfields must be contiguous");
1665
1666 TailCallKind getTailCallKind() const {
1667 return getSubclassData<TailCallKindField>();
1668 }
1669
1670 bool isTailCall() const {
1671 TailCallKind Kind = getTailCallKind();
1672 return Kind == TCK_Tail || Kind == TCK_MustTail;
1673 }
1674
1675 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1676
1677 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1678
1679 void setTailCallKind(TailCallKind TCK) {
1680 setSubclassData<TailCallKindField>(TCK);
1681 }
1682
1683 void setTailCall(bool IsTc = true) {
1684 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1685 }
1686
1687 /// Return true if the call can return twice
1688 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1689 void setCanReturnTwice() {
1690 addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice);
1691 }
1692
1693 // Methods for support type inquiry through isa, cast, and dyn_cast:
1694 static bool classof(const Instruction *I) {
1695 return I->getOpcode() == Instruction::Call;
1696 }
1697 static bool classof(const Value *V) {
1698 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1699 }
1700
1701 /// Updates profile metadata by scaling it by \p S / \p T.
1702 void updateProfWeight(uint64_t S, uint64_t T);
1703
1704private:
1705 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1706 // method so that subclasses cannot accidentally use it.
1707 template <typename Bitfield>
1708 void setSubclassData(typename Bitfield::Type Value) {
1709 Instruction::setSubclassData<Bitfield>(Value);
1710 }
1711};
1712
1713CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1714 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1715 BasicBlock *InsertAtEnd)
1716 : CallBase(Ty->getReturnType(), Instruction::Call,
1717 OperandTraits<CallBase>::op_end(this) -
1718 (Args.size() + CountBundleInputs(Bundles) + 1),
1719 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1720 InsertAtEnd) {
1721 init(Ty, Func, Args, Bundles, NameStr);
1722}
1723
1724CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1725 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1726 Instruction *InsertBefore)
1727 : CallBase(Ty->getReturnType(), Instruction::Call,
1728 OperandTraits<CallBase>::op_end(this) -
1729 (Args.size() + CountBundleInputs(Bundles) + 1),
1730 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1731 InsertBefore) {
1732 init(Ty, Func, Args, Bundles, NameStr);
1733}
1734
1735//===----------------------------------------------------------------------===//
1736// SelectInst Class
1737//===----------------------------------------------------------------------===//
1738
1739/// This class represents the LLVM 'select' instruction.
1740///
1741class SelectInst : public Instruction {
1742 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1743 Instruction *InsertBefore)
1744 : Instruction(S1->getType(), Instruction::Select,
1745 &Op<0>(), 3, InsertBefore) {
1746 init(C, S1, S2);
1747 setName(NameStr);
1748 }
1749
1750 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1751 BasicBlock *InsertAtEnd)
1752 : Instruction(S1->getType(), Instruction::Select,
1753 &Op<0>(), 3, InsertAtEnd) {
1754 init(C, S1, S2);
1755 setName(NameStr);
1756 }
1757
1758 void init(Value *C, Value *S1, Value *S2) {
1759 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1759, __extension__ __PRETTY_FUNCTION__))
;
1760 Op<0>() = C;
1761 Op<1>() = S1;
1762 Op<2>() = S2;
1763 }
1764
1765protected:
1766 // Note: Instruction needs to be a friend here to call cloneImpl.
1767 friend class Instruction;
1768
1769 SelectInst *cloneImpl() const;
1770
1771public:
1772 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1773 const Twine &NameStr = "",
1774 Instruction *InsertBefore = nullptr,
1775 Instruction *MDFrom = nullptr) {
1776 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1777 if (MDFrom)
1778 Sel->copyMetadata(*MDFrom);
1779 return Sel;
1780 }
1781
1782 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1783 const Twine &NameStr,
1784 BasicBlock *InsertAtEnd) {
1785 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1786 }
1787
1788 const Value *getCondition() const { return Op<0>(); }
1789 const Value *getTrueValue() const { return Op<1>(); }
1790 const Value *getFalseValue() const { return Op<2>(); }
1791 Value *getCondition() { return Op<0>(); }
1792 Value *getTrueValue() { return Op<1>(); }
1793 Value *getFalseValue() { return Op<2>(); }
1794
1795 void setCondition(Value *V) { Op<0>() = V; }
1796 void setTrueValue(Value *V) { Op<1>() = V; }
1797 void setFalseValue(Value *V) { Op<2>() = V; }
1798
1799 /// Swap the true and false values of the select instruction.
1800 /// This doesn't swap prof metadata.
1801 void swapValues() { Op<1>().swap(Op<2>()); }
1802
1803 /// Return a string if the specified operands are invalid
1804 /// for a select operation, otherwise return null.
1805 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1806
1807 /// Transparently provide more efficient getOperand methods.
1808 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1809
1810 OtherOps getOpcode() const {
1811 return static_cast<OtherOps>(Instruction::getOpcode());
1812 }
1813
1814 // Methods for support type inquiry through isa, cast, and dyn_cast:
1815 static bool classof(const Instruction *I) {
1816 return I->getOpcode() == Instruction::Select;
1817 }
1818 static bool classof(const Value *V) {
1819 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1820 }
1821};
1822
1823template <>
1824struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1825};
1826
1827DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1827, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SelectInst>::op_begin(const_cast
<SelectInst*>(this))[i_nocapture].get()); } void SelectInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SelectInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1827, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SelectInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SelectInst::getNumOperands() const { return OperandTraits
<SelectInst>::operands(this); } template <int Idx_nocapture
> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SelectInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
1828
1829//===----------------------------------------------------------------------===//
1830// VAArgInst Class
1831//===----------------------------------------------------------------------===//
1832
1833/// This class represents the va_arg llvm instruction, which returns
1834/// an argument of the specified type given a va_list and increments that list
1835///
1836class VAArgInst : public UnaryInstruction {
1837protected:
1838 // Note: Instruction needs to be a friend here to call cloneImpl.
1839 friend class Instruction;
1840
1841 VAArgInst *cloneImpl() const;
1842
1843public:
1844 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1845 Instruction *InsertBefore = nullptr)
1846 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1847 setName(NameStr);
1848 }
1849
1850 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1851 BasicBlock *InsertAtEnd)
1852 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1853 setName(NameStr);
1854 }
1855
1856 Value *getPointerOperand() { return getOperand(0); }
1857 const Value *getPointerOperand() const { return getOperand(0); }
1858 static unsigned getPointerOperandIndex() { return 0U; }
1859
1860 // Methods for support type inquiry through isa, cast, and dyn_cast:
1861 static bool classof(const Instruction *I) {
1862 return I->getOpcode() == VAArg;
1863 }
1864 static bool classof(const Value *V) {
1865 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1866 }
1867};
1868
1869//===----------------------------------------------------------------------===//
1870// ExtractElementInst Class
1871//===----------------------------------------------------------------------===//
1872
1873/// This instruction extracts a single (scalar)
1874/// element from a VectorType value
1875///
1876class ExtractElementInst : public Instruction {
1877 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1878 Instruction *InsertBefore = nullptr);
1879 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1880 BasicBlock *InsertAtEnd);
1881
1882protected:
1883 // Note: Instruction needs to be a friend here to call cloneImpl.
1884 friend class Instruction;
1885
1886 ExtractElementInst *cloneImpl() const;
1887
1888public:
1889 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1890 const Twine &NameStr = "",
1891 Instruction *InsertBefore = nullptr) {
1892 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1893 }
1894
1895 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1896 const Twine &NameStr,
1897 BasicBlock *InsertAtEnd) {
1898 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1899 }
1900
1901 /// Return true if an extractelement instruction can be
1902 /// formed with the specified operands.
1903 static bool isValidOperands(const Value *Vec, const Value *Idx);
1904
1905 Value *getVectorOperand() { return Op<0>(); }
1906 Value *getIndexOperand() { return Op<1>(); }
1907 const Value *getVectorOperand() const { return Op<0>(); }
1908 const Value *getIndexOperand() const { return Op<1>(); }
1909
1910 VectorType *getVectorOperandType() const {
1911 return cast<VectorType>(getVectorOperand()->getType());
1912 }
1913
1914 /// Transparently provide more efficient getOperand methods.
1915 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1916
1917 // Methods for support type inquiry through isa, cast, and dyn_cast:
1918 static bool classof(const Instruction *I) {
1919 return I->getOpcode() == Instruction::ExtractElement;
1920 }
1921 static bool classof(const Value *V) {
1922 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1923 }
1924};
1925
1926template <>
1927struct OperandTraits<ExtractElementInst> :
1928 public FixedNumOperandTraits<ExtractElementInst, 2> {
1929};
1930
1931DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1931, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ExtractElementInst>::op_begin
(const_cast<ExtractElementInst*>(this))[i_nocapture].get
()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1931, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ExtractElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ExtractElementInst::getNumOperands() const { return
OperandTraits<ExtractElementInst>::operands(this); } template
<int Idx_nocapture> Use &ExtractElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ExtractElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1932
1933//===----------------------------------------------------------------------===//
1934// InsertElementInst Class
1935//===----------------------------------------------------------------------===//
1936
1937/// This instruction inserts a single (scalar)
1938/// element into a VectorType value
1939///
1940class InsertElementInst : public Instruction {
1941 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1942 const Twine &NameStr = "",
1943 Instruction *InsertBefore = nullptr);
1944 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1945 BasicBlock *InsertAtEnd);
1946
1947protected:
1948 // Note: Instruction needs to be a friend here to call cloneImpl.
1949 friend class Instruction;
1950
1951 InsertElementInst *cloneImpl() const;
1952
1953public:
1954 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1955 const Twine &NameStr = "",
1956 Instruction *InsertBefore = nullptr) {
1957 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1958 }
1959
1960 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1961 const Twine &NameStr,
1962 BasicBlock *InsertAtEnd) {
1963 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1964 }
1965
1966 /// Return true if an insertelement instruction can be
1967 /// formed with the specified operands.
1968 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1969 const Value *Idx);
1970
1971 /// Overload to return most specific vector type.
1972 ///
1973 VectorType *getType() const {
1974 return cast<VectorType>(Instruction::getType());
1975 }
1976
1977 /// Transparently provide more efficient getOperand methods.
1978 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1979
1980 // Methods for support type inquiry through isa, cast, and dyn_cast:
1981 static bool classof(const Instruction *I) {
1982 return I->getOpcode() == Instruction::InsertElement;
1983 }
1984 static bool classof(const Value *V) {
1985 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1986 }
1987};
1988
1989template <>
1990struct OperandTraits<InsertElementInst> :
1991 public FixedNumOperandTraits<InsertElementInst, 3> {
1992};
1993
1994DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1994, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertElementInst>::op_begin
(const_cast<InsertElementInst*>(this))[i_nocapture].get
()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 1994, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertElementInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertElementInst::getNumOperands() const { return
OperandTraits<InsertElementInst>::operands(this); } template
<int Idx_nocapture> Use &InsertElementInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &InsertElementInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1995
1996//===----------------------------------------------------------------------===//
1997// ShuffleVectorInst Class
1998//===----------------------------------------------------------------------===//
1999
2000constexpr int UndefMaskElem = -1;
2001
2002/// This instruction constructs a fixed permutation of two
2003/// input vectors.
2004///
2005/// For each element of the result vector, the shuffle mask selects an element
2006/// from one of the input vectors to copy to the result. Non-negative elements
2007/// in the mask represent an index into the concatenated pair of input vectors.
2008/// UndefMaskElem (-1) specifies that the result element is undefined.
2009///
2010/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2011/// requirement may be relaxed in the future.
2012class ShuffleVectorInst : public Instruction {
2013 SmallVector<int, 4> ShuffleMask;
2014 Constant *ShuffleMaskForBitcode;
2015
2016protected:
2017 // Note: Instruction needs to be a friend here to call cloneImpl.
2018 friend class Instruction;
2019
2020 ShuffleVectorInst *cloneImpl() const;
2021
2022public:
2023 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2024 const Twine &NameStr = "",
2025 Instruction *InsertBefor = nullptr);
2026 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2027 const Twine &NameStr, BasicBlock *InsertAtEnd);
2028 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2029 const Twine &NameStr = "",
2030 Instruction *InsertBefor = nullptr);
2031 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2032 const Twine &NameStr, BasicBlock *InsertAtEnd);
2033
2034 void *operator new(size_t s) { return User::operator new(s, 2); }
2035
2036 /// Swap the operands and adjust the mask to preserve the semantics
2037 /// of the instruction.
2038 void commute();
2039
2040 /// Return true if a shufflevector instruction can be
2041 /// formed with the specified operands.
2042 static bool isValidOperands(const Value *V1, const Value *V2,
2043 const Value *Mask);
2044 static bool isValidOperands(const Value *V1, const Value *V2,
2045 ArrayRef<int> Mask);
2046
2047 /// Overload to return most specific vector type.
2048 ///
2049 VectorType *getType() const {
2050 return cast<VectorType>(Instruction::getType());
2051 }
2052
2053 /// Transparently provide more efficient getOperand methods.
2054 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2055
2056 /// Return the shuffle mask value of this instruction for the given element
2057 /// index. Return UndefMaskElem if the element is undef.
2058 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2059
2060 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2061 /// elements of the mask are returned as UndefMaskElem.
2062 static void getShuffleMask(const Constant *Mask,
2063 SmallVectorImpl<int> &Result);
2064
2065 /// Return the mask for this instruction as a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2068 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2069 }
2070
2071 /// Return the mask for this instruction, for use in bitcode.
2072 ///
2073 /// TODO: This is temporary until we decide a new bitcode encoding for
2074 /// shufflevector.
2075 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2076
2077 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2078 Type *ResultTy);
2079
2080 void setShuffleMask(ArrayRef<int> Mask);
2081
2082 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2083
2084 /// Return true if this shuffle returns a vector with a different number of
2085 /// elements than its source vectors.
2086 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2087 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2088 bool changesLength() const {
2089 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2090 ->getElementCount()
2091 .getKnownMinValue();
2092 unsigned NumMaskElts = ShuffleMask.size();
2093 return NumSourceElts != NumMaskElts;
2094 }
2095
2096 /// Return true if this shuffle returns a vector with a greater number of
2097 /// elements than its source vectors.
2098 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2099 bool increasesLength() const {
2100 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2101 ->getElementCount()
2102 .getKnownMinValue();
2103 unsigned NumMaskElts = ShuffleMask.size();
2104 return NumSourceElts < NumMaskElts;
2105 }
2106
2107 /// Return true if this shuffle mask chooses elements from exactly one source
2108 /// vector.
2109 /// Example: <7,5,undef,7>
2110 /// This assumes that vector operands are the same length as the mask.
2111 static bool isSingleSourceMask(ArrayRef<int> Mask);
2112 static bool isSingleSourceMask(const Constant *Mask) {
2113 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2113, __extension__ __PRETTY_FUNCTION__))
;
2114 SmallVector<int, 16> MaskAsInts;
2115 getShuffleMask(Mask, MaskAsInts);
2116 return isSingleSourceMask(MaskAsInts);
2117 }
2118
2119 /// Return true if this shuffle chooses elements from exactly one source
2120 /// vector without changing the length of that vector.
2121 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2122 /// TODO: Optionally allow length-changing shuffles.
2123 bool isSingleSource() const {
2124 return !changesLength() && isSingleSourceMask(ShuffleMask);
2125 }
2126
2127 /// Return true if this shuffle mask chooses elements from exactly one source
2128 /// vector without lane crossings. A shuffle using this mask is not
2129 /// necessarily a no-op because it may change the number of elements from its
2130 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2131 /// Example: <undef,undef,2,3>
2132 static bool isIdentityMask(ArrayRef<int> Mask);
2133 static bool isIdentityMask(const Constant *Mask) {
2134 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2134, __extension__ __PRETTY_FUNCTION__))
;
2135 SmallVector<int, 16> MaskAsInts;
2136 getShuffleMask(Mask, MaskAsInts);
2137 return isIdentityMask(MaskAsInts);
2138 }
2139
2140 /// Return true if this shuffle chooses elements from exactly one source
2141 /// vector without lane crossings and does not change the number of elements
2142 /// from its input vectors.
2143 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2144 bool isIdentity() const {
2145 return !changesLength() && isIdentityMask(ShuffleMask);
2146 }
2147
2148 /// Return true if this shuffle lengthens exactly one source vector with
2149 /// undefs in the high elements.
2150 bool isIdentityWithPadding() const;
2151
2152 /// Return true if this shuffle extracts the first N elements of exactly one
2153 /// source vector.
2154 bool isIdentityWithExtract() const;
2155
2156 /// Return true if this shuffle concatenates its 2 source vectors. This
2157 /// returns false if either input is undefined. In that case, the shuffle is
2158 /// is better classified as an identity with padding operation.
2159 bool isConcat() const;
2160
2161 /// Return true if this shuffle mask chooses elements from its source vectors
2162 /// without lane crossings. A shuffle using this mask would be
2163 /// equivalent to a vector select with a constant condition operand.
2164 /// Example: <4,1,6,undef>
2165 /// This returns false if the mask does not choose from both input vectors.
2166 /// In that case, the shuffle is better classified as an identity shuffle.
2167 /// This assumes that vector operands are the same length as the mask
2168 /// (a length-changing shuffle can never be equivalent to a vector select).
2169 static bool isSelectMask(ArrayRef<int> Mask);
2170 static bool isSelectMask(const Constant *Mask) {
2171 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2171, __extension__ __PRETTY_FUNCTION__))
;
2172 SmallVector<int, 16> MaskAsInts;
2173 getShuffleMask(Mask, MaskAsInts);
2174 return isSelectMask(MaskAsInts);
2175 }
2176
2177 /// Return true if this shuffle chooses elements from its source vectors
2178 /// without lane crossings and all operands have the same number of elements.
2179 /// In other words, this shuffle is equivalent to a vector select with a
2180 /// constant condition operand.
2181 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2182 /// This returns false if the mask does not choose from both input vectors.
2183 /// In that case, the shuffle is better classified as an identity shuffle.
2184 /// TODO: Optionally allow length-changing shuffles.
2185 bool isSelect() const {
2186 return !changesLength() && isSelectMask(ShuffleMask);
2187 }
2188
2189 /// Return true if this shuffle mask swaps the order of elements from exactly
2190 /// one source vector.
2191 /// Example: <7,6,undef,4>
2192 /// This assumes that vector operands are the same length as the mask.
2193 static bool isReverseMask(ArrayRef<int> Mask);
2194 static bool isReverseMask(const Constant *Mask) {
2195 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2195, __extension__ __PRETTY_FUNCTION__))
;
2196 SmallVector<int, 16> MaskAsInts;
2197 getShuffleMask(Mask, MaskAsInts);
2198 return isReverseMask(MaskAsInts);
2199 }
2200
2201 /// Return true if this shuffle swaps the order of elements from exactly
2202 /// one source vector.
2203 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2204 /// TODO: Optionally allow length-changing shuffles.
2205 bool isReverse() const {
2206 return !changesLength() && isReverseMask(ShuffleMask);
2207 }
2208
2209 /// Return true if this shuffle mask chooses all elements with the same value
2210 /// as the first element of exactly one source vector.
2211 /// Example: <4,undef,undef,4>
2212 /// This assumes that vector operands are the same length as the mask.
2213 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2214 static bool isZeroEltSplatMask(const Constant *Mask) {
2215 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2215, __extension__ __PRETTY_FUNCTION__))
;
2216 SmallVector<int, 16> MaskAsInts;
2217 getShuffleMask(Mask, MaskAsInts);
2218 return isZeroEltSplatMask(MaskAsInts);
2219 }
2220
2221 /// Return true if all elements of this shuffle are the same value as the
2222 /// first element of exactly one source vector without changing the length
2223 /// of that vector.
2224 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2225 /// TODO: Optionally allow length-changing shuffles.
2226 /// TODO: Optionally allow splats from other elements.
2227 bool isZeroEltSplat() const {
2228 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2229 }
2230
2231 /// Return true if this shuffle mask is a transpose mask.
2232 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2233 /// even- or odd-numbered vector elements from two n-dimensional source
2234 /// vectors and write each result into consecutive elements of an
2235 /// n-dimensional destination vector. Two shuffles are necessary to complete
2236 /// the transpose, one for the even elements and another for the odd elements.
2237 /// This description closely follows how the TRN1 and TRN2 AArch64
2238 /// instructions operate.
2239 ///
2240 /// For example, a simple 2x2 matrix can be transposed with:
2241 ///
2242 /// ; Original matrix
2243 /// m0 = < a, b >
2244 /// m1 = < c, d >
2245 ///
2246 /// ; Transposed matrix
2247 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2248 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2249 ///
2250 /// For matrices having greater than n columns, the resulting nx2 transposed
2251 /// matrix is stored in two result vectors such that one vector contains
2252 /// interleaved elements from all the even-numbered rows and the other vector
2253 /// contains interleaved elements from all the odd-numbered rows. For example,
2254 /// a 2x4 matrix can be transposed with:
2255 ///
2256 /// ; Original matrix
2257 /// m0 = < a, b, c, d >
2258 /// m1 = < e, f, g, h >
2259 ///
2260 /// ; Transposed matrix
2261 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2262 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2263 static bool isTransposeMask(ArrayRef<int> Mask);
2264 static bool isTransposeMask(const Constant *Mask) {
2265 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2265, __extension__ __PRETTY_FUNCTION__))
;
2266 SmallVector<int, 16> MaskAsInts;
2267 getShuffleMask(Mask, MaskAsInts);
2268 return isTransposeMask(MaskAsInts);
2269 }
2270
2271 /// Return true if this shuffle transposes the elements of its inputs without
2272 /// changing the length of the vectors. This operation may also be known as a
2273 /// merge or interleave. See the description for isTransposeMask() for the
2274 /// exact specification.
2275 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2276 bool isTranspose() const {
2277 return !changesLength() && isTransposeMask(ShuffleMask);
2278 }
2279
2280 /// Return true if this shuffle mask is an extract subvector mask.
2281 /// A valid extract subvector mask returns a smaller vector from a single
2282 /// source operand. The base extraction index is returned as well.
2283 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2284 int &Index);
2285 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2286 int &Index) {
2287 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2287, __extension__ __PRETTY_FUNCTION__))
;
2288 // Not possible to express a shuffle mask for a scalable vector for this
2289 // case.
2290 if (isa<ScalableVectorType>(Mask->getType()))
2291 return false;
2292 SmallVector<int, 16> MaskAsInts;
2293 getShuffleMask(Mask, MaskAsInts);
2294 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2295 }
2296
2297 /// Return true if this shuffle mask is an extract subvector mask.
2298 bool isExtractSubvectorMask(int &Index) const {
2299 // Not possible to express a shuffle mask for a scalable vector for this
2300 // case.
2301 if (isa<ScalableVectorType>(getType()))
2302 return false;
2303
2304 int NumSrcElts =
2305 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2306 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2307 }
2308
2309 /// Change values in a shuffle permute mask assuming the two vector operands
2310 /// of length InVecNumElts have swapped position.
2311 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2312 unsigned InVecNumElts) {
2313 for (int &Idx : Mask) {
2314 if (Idx == -1)
2315 continue;
2316 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2317 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
2318 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2318, __extension__ __PRETTY_FUNCTION__))
;
2319 }
2320 }
2321
2322 // Methods for support type inquiry through isa, cast, and dyn_cast:
2323 static bool classof(const Instruction *I) {
2324 return I->getOpcode() == Instruction::ShuffleVector;
2325 }
2326 static bool classof(const Value *V) {
2327 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2328 }
2329};
2330
2331template <>
2332struct OperandTraits<ShuffleVectorInst>
2333 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2334
2335DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ShuffleVectorInst>::op_begin
(const_cast<ShuffleVectorInst*>(this))[i_nocapture].get
()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2335, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ShuffleVectorInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned ShuffleVectorInst::getNumOperands() const { return
OperandTraits<ShuffleVectorInst>::operands(this); } template
<int Idx_nocapture> Use &ShuffleVectorInst::Op() {
return this->OpFrom<Idx_nocapture>(this); } template
<int Idx_nocapture> const Use &ShuffleVectorInst::
Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2336
2337//===----------------------------------------------------------------------===//
2338// ExtractValueInst Class
2339//===----------------------------------------------------------------------===//
2340
2341/// This instruction extracts a struct member or array
2342/// element value from an aggregate value.
2343///
2344class ExtractValueInst : public UnaryInstruction {
2345 SmallVector<unsigned, 4> Indices;
2346
2347 ExtractValueInst(const ExtractValueInst &EVI);
2348
2349 /// Constructors - Create a extractvalue instruction with a base aggregate
2350 /// value and a list of indices. The first ctor can optionally insert before
2351 /// an existing instruction, the second appends the new instruction to the
2352 /// specified BasicBlock.
2353 inline ExtractValueInst(Value *Agg,
2354 ArrayRef<unsigned> Idxs,
2355 const Twine &NameStr,
2356 Instruction *InsertBefore);
2357 inline ExtractValueInst(Value *Agg,
2358 ArrayRef<unsigned> Idxs,
2359 const Twine &NameStr, BasicBlock *InsertAtEnd);
2360
2361 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2362
2363protected:
2364 // Note: Instruction needs to be a friend here to call cloneImpl.
2365 friend class Instruction;
2366
2367 ExtractValueInst *cloneImpl() const;
2368
2369public:
2370 static ExtractValueInst *Create(Value *Agg,
2371 ArrayRef<unsigned> Idxs,
2372 const Twine &NameStr = "",
2373 Instruction *InsertBefore = nullptr) {
2374 return new
2375 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2376 }
2377
2378 static ExtractValueInst *Create(Value *Agg,
2379 ArrayRef<unsigned> Idxs,
2380 const Twine &NameStr,
2381 BasicBlock *InsertAtEnd) {
2382 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2383 }
2384
2385 /// Returns the type of the element that would be extracted
2386 /// with an extractvalue instruction with the specified parameters.
2387 ///
2388 /// Null is returned if the indices are invalid for the specified type.
2389 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2390
2391 using idx_iterator = const unsigned*;
2392
2393 inline idx_iterator idx_begin() const { return Indices.begin(); }
2394 inline idx_iterator idx_end() const { return Indices.end(); }
2395 inline iterator_range<idx_iterator> indices() const {
2396 return make_range(idx_begin(), idx_end());
2397 }
2398
2399 Value *getAggregateOperand() {
2400 return getOperand(0);
2401 }
2402 const Value *getAggregateOperand() const {
2403 return getOperand(0);
2404 }
2405 static unsigned getAggregateOperandIndex() {
2406 return 0U; // get index for modifying correct operand
2407 }
2408
2409 ArrayRef<unsigned> getIndices() const {
2410 return Indices;
2411 }
2412
2413 unsigned getNumIndices() const {
2414 return (unsigned)Indices.size();
2415 }
2416
2417 bool hasIndices() const {
2418 return true;
2419 }
2420
2421 // Methods for support type inquiry through isa, cast, and dyn_cast:
2422 static bool classof(const Instruction *I) {
2423 return I->getOpcode() == Instruction::ExtractValue;
2424 }
2425 static bool classof(const Value *V) {
2426 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2427 }
2428};
2429
2430ExtractValueInst::ExtractValueInst(Value *Agg,
2431 ArrayRef<unsigned> Idxs,
2432 const Twine &NameStr,
2433 Instruction *InsertBefore)
2434 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2435 ExtractValue, Agg, InsertBefore) {
2436 init(Idxs, NameStr);
2437}
2438
2439ExtractValueInst::ExtractValueInst(Value *Agg,
2440 ArrayRef<unsigned> Idxs,
2441 const Twine &NameStr,
2442 BasicBlock *InsertAtEnd)
2443 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2444 ExtractValue, Agg, InsertAtEnd) {
2445 init(Idxs, NameStr);
2446}
2447
2448//===----------------------------------------------------------------------===//
2449// InsertValueInst Class
2450//===----------------------------------------------------------------------===//
2451
2452/// This instruction inserts a struct field of array element
2453/// value into an aggregate value.
2454///
2455class InsertValueInst : public Instruction {
2456 SmallVector<unsigned, 4> Indices;
2457
2458 InsertValueInst(const InsertValueInst &IVI);
2459
2460 /// Constructors - Create a insertvalue instruction with a base aggregate
2461 /// value, a value to insert, and a list of indices. The first ctor can
2462 /// optionally insert before an existing instruction, the second appends
2463 /// the new instruction to the specified BasicBlock.
2464 inline InsertValueInst(Value *Agg, Value *Val,
2465 ArrayRef<unsigned> Idxs,
2466 const Twine &NameStr,
2467 Instruction *InsertBefore);
2468 inline InsertValueInst(Value *Agg, Value *Val,
2469 ArrayRef<unsigned> Idxs,
2470 const Twine &NameStr, BasicBlock *InsertAtEnd);
2471
2472 /// Constructors - These two constructors are convenience methods because one
2473 /// and two index insertvalue instructions are so common.
2474 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2475 const Twine &NameStr = "",
2476 Instruction *InsertBefore = nullptr);
2477 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2478 BasicBlock *InsertAtEnd);
2479
2480 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2481 const Twine &NameStr);
2482
2483protected:
2484 // Note: Instruction needs to be a friend here to call cloneImpl.
2485 friend class Instruction;
2486
2487 InsertValueInst *cloneImpl() const;
2488
2489public:
2490 // allocate space for exactly two operands
2491 void *operator new(size_t s) {
2492 return User::operator new(s, 2);
2493 }
2494
2495 static InsertValueInst *Create(Value *Agg, Value *Val,
2496 ArrayRef<unsigned> Idxs,
2497 const Twine &NameStr = "",
2498 Instruction *InsertBefore = nullptr) {
2499 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2500 }
2501
2502 static InsertValueInst *Create(Value *Agg, Value *Val,
2503 ArrayRef<unsigned> Idxs,
2504 const Twine &NameStr,
2505 BasicBlock *InsertAtEnd) {
2506 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2507 }
2508
2509 /// Transparently provide more efficient getOperand methods.
2510 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2511
2512 using idx_iterator = const unsigned*;
2513
2514 inline idx_iterator idx_begin() const { return Indices.begin(); }
2515 inline idx_iterator idx_end() const { return Indices.end(); }
2516 inline iterator_range<idx_iterator> indices() const {
2517 return make_range(idx_begin(), idx_end());
2518 }
2519
2520 Value *getAggregateOperand() {
2521 return getOperand(0);
2522 }
2523 const Value *getAggregateOperand() const {
2524 return getOperand(0);
2525 }
2526 static unsigned getAggregateOperandIndex() {
2527 return 0U; // get index for modifying correct operand
2528 }
2529
2530 Value *getInsertedValueOperand() {
2531 return getOperand(1);
2532 }
2533 const Value *getInsertedValueOperand() const {
2534 return getOperand(1);
2535 }
2536 static unsigned getInsertedValueOperandIndex() {
2537 return 1U; // get index for modifying correct operand
2538 }
2539
2540 ArrayRef<unsigned> getIndices() const {
2541 return Indices;
2542 }
2543
2544 unsigned getNumIndices() const {
2545 return (unsigned)Indices.size();
2546 }
2547
2548 bool hasIndices() const {
2549 return true;
2550 }
2551
2552 // Methods for support type inquiry through isa, cast, and dyn_cast:
2553 static bool classof(const Instruction *I) {
2554 return I->getOpcode() == Instruction::InsertValue;
2555 }
2556 static bool classof(const Value *V) {
2557 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2558 }
2559};
2560
2561template <>
2562struct OperandTraits<InsertValueInst> :
2563 public FixedNumOperandTraits<InsertValueInst, 2> {
2564};
2565
2566InsertValueInst::InsertValueInst(Value *Agg,
2567 Value *Val,
2568 ArrayRef<unsigned> Idxs,
2569 const Twine &NameStr,
2570 Instruction *InsertBefore)
2571 : Instruction(Agg->getType(), InsertValue,
2572 OperandTraits<InsertValueInst>::op_begin(this),
2573 2, InsertBefore) {
2574 init(Agg, Val, Idxs, NameStr);
2575}
2576
2577InsertValueInst::InsertValueInst(Value *Agg,
2578 Value *Val,
2579 ArrayRef<unsigned> Idxs,
2580 const Twine &NameStr,
2581 BasicBlock *InsertAtEnd)
2582 : Instruction(Agg->getType(), InsertValue,
2583 OperandTraits<InsertValueInst>::op_begin(this),
2584 2, InsertAtEnd) {
2585 init(Agg, Val, Idxs, NameStr);
2586}
2587
2588DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2588, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<InsertValueInst>::op_begin
(const_cast<InsertValueInst*>(this))[i_nocapture].get()
); } void InsertValueInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2588, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
InsertValueInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned InsertValueInst::getNumOperands() const { return
OperandTraits<InsertValueInst>::operands(this); } template
<int Idx_nocapture> Use &InsertValueInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &InsertValueInst::Op() const
{ return this->OpFrom<Idx_nocapture>(this); }
2589
2590//===----------------------------------------------------------------------===//
2591// PHINode Class
2592//===----------------------------------------------------------------------===//
2593
2594// PHINode - The PHINode class is used to represent the magical mystical PHI
2595// node, that can not exist in nature, but can be synthesized in a computer
2596// scientist's overactive imagination.
2597//
2598class PHINode : public Instruction {
2599 /// The number of operands actually allocated. NumOperands is
2600 /// the number actually in use.
2601 unsigned ReservedSpace;
2602
2603 PHINode(const PHINode &PN);
2604
2605 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2606 const Twine &NameStr = "",
2607 Instruction *InsertBefore = nullptr)
2608 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2609 ReservedSpace(NumReservedValues) {
2610 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2610, __extension__ __PRETTY_FUNCTION__))
;
2611 setName(NameStr);
2612 allocHungoffUses(ReservedSpace);
2613 }
2614
2615 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2616 BasicBlock *InsertAtEnd)
2617 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2618 ReservedSpace(NumReservedValues) {
2619 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2619, __extension__ __PRETTY_FUNCTION__))
;
34
Called C++ object pointer is null
2620 setName(NameStr);
2621 allocHungoffUses(ReservedSpace);
2622 }
2623
2624protected:
2625 // Note: Instruction needs to be a friend here to call cloneImpl.
2626 friend class Instruction;
2627
2628 PHINode *cloneImpl() const;
2629
2630 // allocHungoffUses - this is more complicated than the generic
2631 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2632 // values and pointers to the incoming blocks, all in one allocation.
2633 void allocHungoffUses(unsigned N) {
2634 User::allocHungoffUses(N, /* IsPhi */ true);
2635 }
2636
2637public:
2638 /// Constructors - NumReservedValues is a hint for the number of incoming
2639 /// edges that this phi node will have (use 0 if you really have no idea).
2640 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2641 const Twine &NameStr = "",
2642 Instruction *InsertBefore = nullptr) {
2643 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2644 }
2645
2646 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2647 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2648 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
32
Passing null pointer value via 1st parameter 'Ty'
33
Calling constructor for 'PHINode'
2649 }
2650
2651 /// Provide fast operand accessors
2652 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2653
2654 // Block iterator interface. This provides access to the list of incoming
2655 // basic blocks, which parallels the list of incoming values.
2656
2657 using block_iterator = BasicBlock **;
2658 using const_block_iterator = BasicBlock * const *;
2659
2660 block_iterator block_begin() {
2661 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2662 }
2663
2664 const_block_iterator block_begin() const {
2665 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2666 }
2667
2668 block_iterator block_end() {
2669 return block_begin() + getNumOperands();
2670 }
2671
2672 const_block_iterator block_end() const {
2673 return block_begin() + getNumOperands();
2674 }
2675
2676 iterator_range<block_iterator> blocks() {
2677 return make_range(block_begin(), block_end());
2678 }
2679
2680 iterator_range<const_block_iterator> blocks() const {
2681 return make_range(block_begin(), block_end());
2682 }
2683
2684 op_range incoming_values() { return operands(); }
2685
2686 const_op_range incoming_values() const { return operands(); }
2687
2688 /// Return the number of incoming edges
2689 ///
2690 unsigned getNumIncomingValues() const { return getNumOperands(); }
2691
2692 /// Return incoming value number x
2693 ///
2694 Value *getIncomingValue(unsigned i) const {
2695 return getOperand(i);
2696 }
2697 void setIncomingValue(unsigned i, Value *V) {
2698 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2698, __extension__ __PRETTY_FUNCTION__))
;
2699 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2700, __extension__ __PRETTY_FUNCTION__))
2700 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2700, __extension__ __PRETTY_FUNCTION__))
;
2701 setOperand(i, V);
2702 }
2703
2704 static unsigned getOperandNumForIncomingValue(unsigned i) {
2705 return i;
2706 }
2707
2708 static unsigned getIncomingValueNumForOperand(unsigned i) {
2709 return i;
2710 }
2711
2712 /// Return incoming basic block number @p i.
2713 ///
2714 BasicBlock *getIncomingBlock(unsigned i) const {
2715 return block_begin()[i];
2716 }
2717
2718 /// Return incoming basic block corresponding
2719 /// to an operand of the PHI.
2720 ///
2721 BasicBlock *getIncomingBlock(const Use &U) const {
2722 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2722, __extension__ __PRETTY_FUNCTION__))
;
2723 return getIncomingBlock(unsigned(&U - op_begin()));
2724 }
2725
2726 /// Return incoming basic block corresponding
2727 /// to value use iterator.
2728 ///
2729 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2730 return getIncomingBlock(I.getUse());
2731 }
2732
2733 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2734 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2734, __extension__ __PRETTY_FUNCTION__))
;
2735 block_begin()[i] = BB;
2736 }
2737
2738 /// Replace every incoming basic block \p Old to basic block \p New.
2739 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2740 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2740, __extension__ __PRETTY_FUNCTION__))
;
2741 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2742 if (getIncomingBlock(Op) == Old)
2743 setIncomingBlock(Op, New);
2744 }
2745
2746 /// Add an incoming value to the end of the PHI list
2747 ///
2748 void addIncoming(Value *V, BasicBlock *BB) {
2749 if (getNumOperands() == ReservedSpace)
2750 growOperands(); // Get more space!
2751 // Initialize some new operands.
2752 setNumHungOffUseOperands(getNumOperands() + 1);
2753 setIncomingValue(getNumOperands() - 1, V);
2754 setIncomingBlock(getNumOperands() - 1, BB);
2755 }
2756
2757 /// Remove an incoming value. This is useful if a
2758 /// predecessor basic block is deleted. The value removed is returned.
2759 ///
2760 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2761 /// is true), the PHI node is destroyed and any uses of it are replaced with
2762 /// dummy values. The only time there should be zero incoming values to a PHI
2763 /// node is when the block is dead, so this strategy is sound.
2764 ///
2765 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2766
2767 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2768 int Idx = getBasicBlockIndex(BB);
2769 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2769, __extension__ __PRETTY_FUNCTION__))
;
2770 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2771 }
2772
2773 /// Return the first index of the specified basic
2774 /// block in the value list for this PHI. Returns -1 if no instance.
2775 ///
2776 int getBasicBlockIndex(const BasicBlock *BB) const {
2777 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2778 if (block_begin()[i] == BB)
2779 return i;
2780 return -1;
2781 }
2782
2783 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2784 int Idx = getBasicBlockIndex(BB);
2785 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2785, __extension__ __PRETTY_FUNCTION__))
;
2786 return getIncomingValue(Idx);
2787 }
2788
2789 /// Set every incoming value(s) for block \p BB to \p V.
2790 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2791 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2791, __extension__ __PRETTY_FUNCTION__))
;
2792 bool Found = false;
2793 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2794 if (getIncomingBlock(Op) == BB) {
2795 Found = true;
2796 setIncomingValue(Op, V);
2797 }
2798 (void)Found;
2799 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2799, __extension__ __PRETTY_FUNCTION__))
;
2800 }
2801
2802 /// If the specified PHI node always merges together the
2803 /// same value, return the value, otherwise return null.
2804 Value *hasConstantValue() const;
2805
2806 /// Whether the specified PHI node always merges
2807 /// together the same value, assuming undefs are equal to a unique
2808 /// non-undef value.
2809 bool hasConstantOrUndefValue() const;
2810
2811 /// If the PHI node is complete which means all of its parent's predecessors
2812 /// have incoming value in this PHI, return true, otherwise return false.
2813 bool isComplete() const {
2814 return llvm::all_of(predecessors(getParent()),
2815 [this](const BasicBlock *Pred) {
2816 return getBasicBlockIndex(Pred) >= 0;
2817 });
2818 }
2819
2820 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2821 static bool classof(const Instruction *I) {
2822 return I->getOpcode() == Instruction::PHI;
2823 }
2824 static bool classof(const Value *V) {
2825 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2826 }
2827
2828private:
2829 void growOperands();
2830};
2831
2832template <>
2833struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2834};
2835
2836DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2836, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<PHINode>::op_begin(const_cast
<PHINode*>(this))[i_nocapture].get()); } void PHINode::
setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<PHINode>::
operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2836, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned
PHINode::getNumOperands() const { return OperandTraits<PHINode
>::operands(this); } template <int Idx_nocapture> Use
&PHINode::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
PHINode::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
2837
2838//===----------------------------------------------------------------------===//
2839// LandingPadInst Class
2840//===----------------------------------------------------------------------===//
2841
2842//===---------------------------------------------------------------------------
2843/// The landingpad instruction holds all of the information
2844/// necessary to generate correct exception handling. The landingpad instruction
2845/// cannot be moved from the top of a landing pad block, which itself is
2846/// accessible only from the 'unwind' edge of an invoke. This uses the
2847/// SubclassData field in Value to store whether or not the landingpad is a
2848/// cleanup.
2849///
2850class LandingPadInst : public Instruction {
2851 using CleanupField = BoolBitfieldElementT<0>;
2852
2853 /// The number of operands actually allocated. NumOperands is
2854 /// the number actually in use.
2855 unsigned ReservedSpace;
2856
2857 LandingPadInst(const LandingPadInst &LP);
2858
2859public:
2860 enum ClauseType { Catch, Filter };
2861
2862private:
2863 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2864 const Twine &NameStr, Instruction *InsertBefore);
2865 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2866 const Twine &NameStr, BasicBlock *InsertAtEnd);
2867
2868 // Allocate space for exactly zero operands.
2869 void *operator new(size_t s) {
2870 return User::operator new(s);
2871 }
2872
2873 void growOperands(unsigned Size);
2874 void init(unsigned NumReservedValues, const Twine &NameStr);
2875
2876protected:
2877 // Note: Instruction needs to be a friend here to call cloneImpl.
2878 friend class Instruction;
2879
2880 LandingPadInst *cloneImpl() const;
2881
2882public:
2883 /// Constructors - NumReservedClauses is a hint for the number of incoming
2884 /// clauses that this landingpad will have (use 0 if you really have no idea).
2885 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2886 const Twine &NameStr = "",
2887 Instruction *InsertBefore = nullptr);
2888 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2889 const Twine &NameStr, BasicBlock *InsertAtEnd);
2890
2891 /// Provide fast operand accessors
2892 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2893
2894 /// Return 'true' if this landingpad instruction is a
2895 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2896 /// doesn't catch the exception.
2897 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2898
2899 /// Indicate that this landingpad instruction is a cleanup.
2900 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2901
2902 /// Add a catch or filter clause to the landing pad.
2903 void addClause(Constant *ClauseVal);
2904
2905 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2906 /// determine what type of clause this is.
2907 Constant *getClause(unsigned Idx) const {
2908 return cast<Constant>(getOperandList()[Idx]);
2909 }
2910
2911 /// Return 'true' if the clause and index Idx is a catch clause.
2912 bool isCatch(unsigned Idx) const {
2913 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2914 }
2915
2916 /// Return 'true' if the clause and index Idx is a filter clause.
2917 bool isFilter(unsigned Idx) const {
2918 return isa<ArrayType>(getOperandList()[Idx]->getType());
2919 }
2920
2921 /// Get the number of clauses for this landing pad.
2922 unsigned getNumClauses() const { return getNumOperands(); }
2923
2924 /// Grow the size of the operand list to accommodate the new
2925 /// number of clauses.
2926 void reserveClauses(unsigned Size) { growOperands(Size); }
2927
2928 // Methods for support type inquiry through isa, cast, and dyn_cast:
2929 static bool classof(const Instruction *I) {
2930 return I->getOpcode() == Instruction::LandingPad;
2931 }
2932 static bool classof(const Value *V) {
2933 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2934 }
2935};
2936
2937template <>
2938struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2939};
2940
2941DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2941, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<LandingPadInst>::op_begin(
const_cast<LandingPadInst*>(this))[i_nocapture].get());
} void LandingPadInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 2941, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
LandingPadInst>::op_begin(this)[i_nocapture] = Val_nocapture
; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits
<LandingPadInst>::operands(this); } template <int Idx_nocapture
> Use &LandingPadInst::Op() { return this->OpFrom<
Idx_nocapture>(this); } template <int Idx_nocapture>
const Use &LandingPadInst::Op() const { return this->
OpFrom<Idx_nocapture>(this); }
2942
2943//===----------------------------------------------------------------------===//
2944// ReturnInst Class
2945//===----------------------------------------------------------------------===//
2946
2947//===---------------------------------------------------------------------------
2948/// Return a value (possibly void), from a function. Execution
2949/// does not continue in this function any longer.
2950///
2951class ReturnInst : public Instruction {
2952 ReturnInst(const ReturnInst &RI);
2953
2954private:
2955 // ReturnInst constructors:
2956 // ReturnInst() - 'ret void' instruction
2957 // ReturnInst( null) - 'ret void' instruction
2958 // ReturnInst(Value* X) - 'ret X' instruction
2959 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
2960 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
2961 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
2962 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
2963 //
2964 // NOTE: If the Value* passed is of type void then the constructor behaves as
2965 // if it was passed NULL.
2966 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
2967 Instruction *InsertBefore = nullptr);
2968 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
2969 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
2970
2971protected:
2972 // Note: Instruction needs to be a friend here to call cloneImpl.
2973 friend class Instruction;
2974
2975 ReturnInst *cloneImpl() const;
2976
2977public:
2978 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
2979 Instruction *InsertBefore = nullptr) {
2980 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
2981 }
2982
2983 static ReturnInst* Create(LLVMContext &C, Value *retVal,
2984 BasicBlock *InsertAtEnd) {
2985 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
2986 }
2987
2988 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
2989 return new(0) ReturnInst(C, InsertAtEnd);
2990 }
2991
2992 /// Provide fast operand accessors
2993 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2994
2995 /// Convenience accessor. Returns null if there is no return value.
2996 Value *getReturnValue() const {
2997 return getNumOperands() != 0 ? getOperand(0) : nullptr;
2998 }
2999
3000 unsigned getNumSuccessors() const { return 0; }
3001
3002 // Methods for support type inquiry through isa, cast, and dyn_cast:
3003 static bool classof(const Instruction *I) {
3004 return (I->getOpcode() == Instruction::Ret);
3005 }
3006 static bool classof(const Value *V) {
3007 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3008 }
3009
3010private:
3011 BasicBlock *getSuccessor(unsigned idx) const {
3012 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3012)
;
3013 }
3014
3015 void setSuccessor(unsigned idx, BasicBlock *B) {
3016 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3016)
;
3017 }
3018};
3019
3020template <>
3021struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3022};
3023
3024DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3024, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<ReturnInst>::op_begin(const_cast
<ReturnInst*>(this))[i_nocapture].get()); } void ReturnInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<ReturnInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3024, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
ReturnInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned ReturnInst::getNumOperands() const { return OperandTraits
<ReturnInst>::operands(this); } template <int Idx_nocapture
> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
ReturnInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3025
3026//===----------------------------------------------------------------------===//
3027// BranchInst Class
3028//===----------------------------------------------------------------------===//
3029
3030//===---------------------------------------------------------------------------
3031/// Conditional or Unconditional Branch instruction.
3032///
3033class BranchInst : public Instruction {
3034 /// Ops list - Branches are strange. The operands are ordered:
3035 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3036 /// they don't have to check for cond/uncond branchness. These are mostly
3037 /// accessed relative from op_end().
3038 BranchInst(const BranchInst &BI);
3039 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3040 // BranchInst(BB *B) - 'br B'
3041 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3042 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3043 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3044 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3045 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3046 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3047 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3048 Instruction *InsertBefore = nullptr);
3049 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3050 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3051 BasicBlock *InsertAtEnd);
3052
3053 void AssertOK();
3054
3055protected:
3056 // Note: Instruction needs to be a friend here to call cloneImpl.
3057 friend class Instruction;
3058
3059 BranchInst *cloneImpl() const;
3060
3061public:
3062 /// Iterator type that casts an operand to a basic block.
3063 ///
3064 /// This only makes sense because the successors are stored as adjacent
3065 /// operands for branch instructions.
3066 struct succ_op_iterator
3067 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3068 std::random_access_iterator_tag, BasicBlock *,
3069 ptrdiff_t, BasicBlock *, BasicBlock *> {
3070 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3071
3072 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3073 BasicBlock *operator->() const { return operator*(); }
3074 };
3075
3076 /// The const version of `succ_op_iterator`.
3077 struct const_succ_op_iterator
3078 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3079 std::random_access_iterator_tag,
3080 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3081 const BasicBlock *> {
3082 explicit const_succ_op_iterator(const_value_op_iterator I)
3083 : iterator_adaptor_base(I) {}
3084
3085 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3086 const BasicBlock *operator->() const { return operator*(); }
3087 };
3088
3089 static BranchInst *Create(BasicBlock *IfTrue,
3090 Instruction *InsertBefore = nullptr) {
3091 return new(1) BranchInst(IfTrue, InsertBefore);
3092 }
3093
3094 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3095 Value *Cond, Instruction *InsertBefore = nullptr) {
3096 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3097 }
3098
3099 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3100 return new(1) BranchInst(IfTrue, InsertAtEnd);
3101 }
3102
3103 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3104 Value *Cond, BasicBlock *InsertAtEnd) {
3105 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3106 }
3107
3108 /// Transparently provide more efficient getOperand methods.
3109 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3110
3111 bool isUnconditional() const { return getNumOperands() == 1; }
3112 bool isConditional() const { return getNumOperands() == 3; }
3113
3114 Value *getCondition() const {
3115 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3115, __extension__ __PRETTY_FUNCTION__))
;
3116 return Op<-3>();
3117 }
3118
3119 void setCondition(Value *V) {
3120 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3120, __extension__ __PRETTY_FUNCTION__))
;
3121 Op<-3>() = V;
3122 }
3123
3124 unsigned getNumSuccessors() const { return 1+isConditional(); }
3125
3126 BasicBlock *getSuccessor(unsigned i) const {
3127 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3127, __extension__ __PRETTY_FUNCTION__))
;
3128 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3129 }
3130
3131 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3132 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3132, __extension__ __PRETTY_FUNCTION__))
;
3133 *(&Op<-1>() - idx) = NewSucc;
3134 }
3135
3136 /// Swap the successors of this branch instruction.
3137 ///
3138 /// Swaps the successors of the branch instruction. This also swaps any
3139 /// branch weight metadata associated with the instruction so that it
3140 /// continues to map correctly to each operand.
3141 void swapSuccessors();
3142
3143 iterator_range<succ_op_iterator> successors() {
3144 return make_range(
3145 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3146 succ_op_iterator(value_op_end()));
3147 }
3148
3149 iterator_range<const_succ_op_iterator> successors() const {
3150 return make_range(const_succ_op_iterator(
3151 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3152 const_succ_op_iterator(value_op_end()));
3153 }
3154
3155 // Methods for support type inquiry through isa, cast, and dyn_cast:
3156 static bool classof(const Instruction *I) {
3157 return (I->getOpcode() == Instruction::Br);
3158 }
3159 static bool classof(const Value *V) {
3160 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3161 }
3162};
3163
3164template <>
3165struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3166};
3167
3168DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3168, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<BranchInst>::op_begin(const_cast
<BranchInst*>(this))[i_nocapture].get()); } void BranchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<BranchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3168, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
BranchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned BranchInst::getNumOperands() const { return OperandTraits
<BranchInst>::operands(this); } template <int Idx_nocapture
> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
BranchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3169
3170//===----------------------------------------------------------------------===//
3171// SwitchInst Class
3172//===----------------------------------------------------------------------===//
3173
3174//===---------------------------------------------------------------------------
3175/// Multiway switch
3176///
3177class SwitchInst : public Instruction {
3178 unsigned ReservedSpace;
3179
3180 // Operand[0] = Value to switch on
3181 // Operand[1] = Default basic block destination
3182 // Operand[2n ] = Value to match
3183 // Operand[2n+1] = BasicBlock to go to on match
3184 SwitchInst(const SwitchInst &SI);
3185
3186 /// Create a new switch instruction, specifying a value to switch on and a
3187 /// default destination. The number of additional cases can be specified here
3188 /// to make memory allocation more efficient. This constructor can also
3189 /// auto-insert before another instruction.
3190 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3191 Instruction *InsertBefore);
3192
3193 /// Create a new switch instruction, specifying a value to switch on and a
3194 /// default destination. The number of additional cases can be specified here
3195 /// to make memory allocation more efficient. This constructor also
3196 /// auto-inserts at the end of the specified BasicBlock.
3197 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3198 BasicBlock *InsertAtEnd);
3199
3200 // allocate space for exactly zero operands
3201 void *operator new(size_t s) {
3202 return User::operator new(s);
3203 }
3204
3205 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3206 void growOperands();
3207
3208protected:
3209 // Note: Instruction needs to be a friend here to call cloneImpl.
3210 friend class Instruction;
3211
3212 SwitchInst *cloneImpl() const;
3213
3214public:
3215 // -2
3216 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3217
3218 template <typename CaseHandleT> class CaseIteratorImpl;
3219
3220 /// A handle to a particular switch case. It exposes a convenient interface
3221 /// to both the case value and the successor block.
3222 ///
3223 /// We define this as a template and instantiate it to form both a const and
3224 /// non-const handle.
3225 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3226 class CaseHandleImpl {
3227 // Directly befriend both const and non-const iterators.
3228 friend class SwitchInst::CaseIteratorImpl<
3229 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3230
3231 protected:
3232 // Expose the switch type we're parameterized with to the iterator.
3233 using SwitchInstType = SwitchInstT;
3234
3235 SwitchInstT *SI;
3236 ptrdiff_t Index;
3237
3238 CaseHandleImpl() = default;
3239 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3240
3241 public:
3242 /// Resolves case value for current case.
3243 ConstantIntT *getCaseValue() const {
3244 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3245, __extension__ __PRETTY_FUNCTION__))
3245 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3245, __extension__ __PRETTY_FUNCTION__))
;
3246 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3247 }
3248
3249 /// Resolves successor for current case.
3250 BasicBlockT *getCaseSuccessor() const {
3251 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
3252 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
3253 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3253, __extension__ __PRETTY_FUNCTION__))
;
3254 return SI->getSuccessor(getSuccessorIndex());
3255 }
3256
3257 /// Returns number of current case.
3258 unsigned getCaseIndex() const { return Index; }
3259
3260 /// Returns successor index for current case successor.
3261 unsigned getSuccessorIndex() const {
3262 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
3263 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
3264 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3264, __extension__ __PRETTY_FUNCTION__))
;
3265 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3266 }
3267
3268 bool operator==(const CaseHandleImpl &RHS) const {
3269 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3269, __extension__ __PRETTY_FUNCTION__))
;
3270 return Index == RHS.Index;
3271 }
3272 };
3273
3274 using ConstCaseHandle =
3275 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3276
3277 class CaseHandle
3278 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3279 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3280
3281 public:
3282 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3283
3284 /// Sets the new value for current case.
3285 void setValue(ConstantInt *V) {
3286 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3287, __extension__ __PRETTY_FUNCTION__))
3287 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3287, __extension__ __PRETTY_FUNCTION__))
;
3288 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3289 }
3290
3291 /// Sets the new successor for current case.
3292 void setSuccessor(BasicBlock *S) {
3293 SI->setSuccessor(getSuccessorIndex(), S);
3294 }
3295 };
3296
3297 template <typename CaseHandleT>
3298 class CaseIteratorImpl
3299 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3300 std::random_access_iterator_tag,
3301 CaseHandleT> {
3302 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3303
3304 CaseHandleT Case;
3305
3306 public:
3307 /// Default constructed iterator is in an invalid state until assigned to
3308 /// a case for a particular switch.
3309 CaseIteratorImpl() = default;
3310
3311 /// Initializes case iterator for given SwitchInst and for given
3312 /// case number.
3313 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3314
3315 /// Initializes case iterator for given SwitchInst and for given
3316 /// successor index.
3317 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3318 unsigned SuccessorIndex) {
3319 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3320, __extension__ __PRETTY_FUNCTION__))
3320 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3320, __extension__ __PRETTY_FUNCTION__))
;
3321 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3322 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3323 }
3324
3325 /// Support converting to the const variant. This will be a no-op for const
3326 /// variant.
3327 operator CaseIteratorImpl<ConstCaseHandle>() const {
3328 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3329 }
3330
3331 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3332 // Check index correctness after addition.
3333 // Note: Index == getNumCases() means end().
3334 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
3335 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
3336 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3336, __extension__ __PRETTY_FUNCTION__))
;
3337 Case.Index += N;
3338 return *this;
3339 }
3340 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3341 // Check index correctness after subtraction.
3342 // Note: Case.Index == getNumCases() means end().
3343 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3344 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
3345 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3345, __extension__ __PRETTY_FUNCTION__))
;
3346 Case.Index -= N;
3347 return *this;
3348 }
3349 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3350 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3350, __extension__ __PRETTY_FUNCTION__))
;
3351 return Case.Index - RHS.Case.Index;
3352 }
3353 bool operator==(const CaseIteratorImpl &RHS) const {
3354 return Case == RHS.Case;
3355 }
3356 bool operator<(const CaseIteratorImpl &RHS) const {
3357 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3357, __extension__ __PRETTY_FUNCTION__))
;
3358 return Case.Index < RHS.Case.Index;
3359 }
3360 CaseHandleT &operator*() { return Case; }
3361 const CaseHandleT &operator*() const { return Case; }
3362 };
3363
3364 using CaseIt = CaseIteratorImpl<CaseHandle>;
3365 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3366
3367 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3368 unsigned NumCases,
3369 Instruction *InsertBefore = nullptr) {
3370 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3371 }
3372
3373 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3374 unsigned NumCases, BasicBlock *InsertAtEnd) {
3375 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3376 }
3377
3378 /// Provide fast operand accessors
3379 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3380
3381 // Accessor Methods for Switch stmt
3382 Value *getCondition() const { return getOperand(0); }
3383 void setCondition(Value *V) { setOperand(0, V); }
3384
3385 BasicBlock *getDefaultDest() const {
3386 return cast<BasicBlock>(getOperand(1));
3387 }
3388
3389 void setDefaultDest(BasicBlock *DefaultCase) {
3390 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3391 }
3392
3393 /// Return the number of 'cases' in this switch instruction, excluding the
3394 /// default case.
3395 unsigned getNumCases() const {
3396 return getNumOperands()/2 - 1;
3397 }
3398
3399 /// Returns a read/write iterator that points to the first case in the
3400 /// SwitchInst.
3401 CaseIt case_begin() {
3402 return CaseIt(this, 0);
3403 }
3404
3405 /// Returns a read-only iterator that points to the first case in the
3406 /// SwitchInst.
3407 ConstCaseIt case_begin() const {
3408 return ConstCaseIt(this, 0);
3409 }
3410
3411 /// Returns a read/write iterator that points one past the last in the
3412 /// SwitchInst.
3413 CaseIt case_end() {
3414 return CaseIt(this, getNumCases());
3415 }
3416
3417 /// Returns a read-only iterator that points one past the last in the
3418 /// SwitchInst.
3419 ConstCaseIt case_end() const {
3420 return ConstCaseIt(this, getNumCases());
3421 }
3422
3423 /// Iteration adapter for range-for loops.
3424 iterator_range<CaseIt> cases() {
3425 return make_range(case_begin(), case_end());
3426 }
3427
3428 /// Constant iteration adapter for range-for loops.
3429 iterator_range<ConstCaseIt> cases() const {
3430 return make_range(case_begin(), case_end());
3431 }
3432
3433 /// Returns an iterator that points to the default case.
3434 /// Note: this iterator allows to resolve successor only. Attempt
3435 /// to resolve case value causes an assertion.
3436 /// Also note, that increment and decrement also causes an assertion and
3437 /// makes iterator invalid.
3438 CaseIt case_default() {
3439 return CaseIt(this, DefaultPseudoIndex);
3440 }
3441 ConstCaseIt case_default() const {
3442 return ConstCaseIt(this, DefaultPseudoIndex);
3443 }
3444
3445 /// Search all of the case values for the specified constant. If it is
3446 /// explicitly handled, return the case iterator of it, otherwise return
3447 /// default case iterator to indicate that it is handled by the default
3448 /// handler.
3449 CaseIt findCaseValue(const ConstantInt *C) {
3450 CaseIt I = llvm::find_if(
3451 cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; });
3452 if (I != case_end())
3453 return I;
3454
3455 return case_default();
3456 }
3457 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3458 ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) {
3459 return Case.getCaseValue() == C;
3460 });
3461 if (I != case_end())
3462 return I;
3463
3464 return case_default();
3465 }
3466
3467 /// Finds the unique case value for a given successor. Returns null if the
3468 /// successor is not found, not unique, or is the default case.
3469 ConstantInt *findCaseDest(BasicBlock *BB) {
3470 if (BB == getDefaultDest())
3471 return nullptr;
3472
3473 ConstantInt *CI = nullptr;
3474 for (auto Case : cases()) {
3475 if (Case.getCaseSuccessor() != BB)
3476 continue;
3477
3478 if (CI)
3479 return nullptr; // Multiple cases lead to BB.
3480
3481 CI = Case.getCaseValue();
3482 }
3483
3484 return CI;
3485 }
3486
3487 /// Add an entry to the switch instruction.
3488 /// Note:
3489 /// This action invalidates case_end(). Old case_end() iterator will
3490 /// point to the added case.
3491 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3492
3493 /// This method removes the specified case and its successor from the switch
3494 /// instruction. Note that this operation may reorder the remaining cases at
3495 /// index idx and above.
3496 /// Note:
3497 /// This action invalidates iterators for all cases following the one removed,
3498 /// including the case_end() iterator. It returns an iterator for the next
3499 /// case.
3500 CaseIt removeCase(CaseIt I);
3501
3502 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3503 BasicBlock *getSuccessor(unsigned idx) const {
3504 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3504, __extension__ __PRETTY_FUNCTION__))
;
3505 return cast<BasicBlock>(getOperand(idx*2+1));
3506 }
3507 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3508 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3508, __extension__ __PRETTY_FUNCTION__))
;
3509 setOperand(idx * 2 + 1, NewSucc);
3510 }
3511
3512 // Methods for support type inquiry through isa, cast, and dyn_cast:
3513 static bool classof(const Instruction *I) {
3514 return I->getOpcode() == Instruction::Switch;
3515 }
3516 static bool classof(const Value *V) {
3517 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3518 }
3519};
3520
3521/// A wrapper class to simplify modification of SwitchInst cases along with
3522/// their prof branch_weights metadata.
3523class SwitchInstProfUpdateWrapper {
3524 SwitchInst &SI;
3525 Optional<SmallVector<uint32_t, 8> > Weights = None;
3526 bool Changed = false;
3527
3528protected:
3529 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3530
3531 MDNode *buildProfBranchWeightsMD();
3532
3533 void init();
3534
3535public:
3536 using CaseWeightOpt = Optional<uint32_t>;
3537 SwitchInst *operator->() { return &SI; }
3538 SwitchInst &operator*() { return SI; }
3539 operator SwitchInst *() { return &SI; }
3540
3541 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3542
3543 ~SwitchInstProfUpdateWrapper() {
3544 if (Changed)
3545 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3546 }
3547
3548 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3549 /// correspondent branch weight.
3550 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3551
3552 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3553 /// specified branch weight for the added case.
3554 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3555
3556 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3557 /// this object to not touch the underlying SwitchInst in destructor.
3558 SymbolTableList<Instruction>::iterator eraseFromParent();
3559
3560 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3561 CaseWeightOpt getSuccessorWeight(unsigned idx);
3562
3563 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3564};
3565
3566template <>
3567struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3568};
3569
3570DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3570, __extension__ __PRETTY_FUNCTION__)); return cast_or_null
<Value>( OperandTraits<SwitchInst>::op_begin(const_cast
<SwitchInst*>(this))[i_nocapture].get()); } void SwitchInst
::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast
<bool> (i_nocapture < OperandTraits<SwitchInst>
::operands(this) && "setOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/Instructions.h"
, 3570, __extension__ __PRETTY_FUNCTION__)); OperandTraits<
SwitchInst>::op_begin(this)[i_nocapture] = Val_nocapture; }
unsigned SwitchInst::getNumOperands() const { return OperandTraits
<SwitchInst>::operands(this); } template <int Idx_nocapture
> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture
>(this); } template <int Idx_nocapture> const Use &
SwitchInst::Op() const { return this->OpFrom<Idx_nocapture
>(this); }
3571
3572//===----------------------------------------------------------------------===//
3573// IndirectBrInst Class
3574//===----------------------------------------------------------------------===//
3575
3576//===---------------------------------------------------------------------------
3577/// Indirect Branch Instruction.
3578///
3579class IndirectBrInst : public Instruction {
3580 unsigned ReservedSpace;
3581
3582 // Operand[0] = Address to jump to
3583 // Operand[n+1] = n-th destination
3584 IndirectBrInst(const IndirectBrInst &IBI);
3585
3586 /// Create a new indirectbr instruction, specifying an
3587 /// Address to jump to. The number of expected destinations can be specified
3588 /// here to make memory allocation more efficient. This constructor can also
3589 /// autoinsert before another instruction.
3590 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3591
3592 /// Create a new indirectbr instruction, specifying an
3593 /// Address to jump to. The number of expected destinations can be specified
3594 /// here to make memory allocation more efficient. This constructor also
3595 /// autoinserts at the end of the specified BasicBlock.
3596 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3597
3598 // allocate space for exactly zero operands
3599 void *operator new(size_t s) {
3600 return User::operator new(s);
3601 }
3602
3603 void init(Value *Address, unsigned NumDests);
3604 void growOperands();
3605
3606protected:
3607 // Note: Instruction needs to be a friend here to call cloneImpl.