Bug Summary

File:build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/llvm/lib/Transforms/Scalar/LICM.cpp
Warning:line 1196, column 33
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name LICM.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/build-llvm -resource-dir /usr/lib/llvm-15/lib/clang/15.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/llvm/lib/Transforms/Scalar -I include -I /build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-15/lib/clang/15.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-04-07-233643-126351-1 -x c++ /build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/llvm/lib/Transforms/Scalar/LICM.cpp

/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/llvm/lib/Transforms/Scalar/LICM.cpp

1//===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This pass performs loop invariant code motion, attempting to remove as much
10// code from the body of a loop as possible. It does this by either hoisting
11// code into the preheader block, or by sinking code to the exit blocks if it is
12// safe. This pass also promotes must-aliased memory locations in the loop to
13// live in registers, thus hoisting and sinking "invariant" loads and stores.
14//
15// Hoisting operations out of loops is a canonicalization transform. It
16// enables and simplifies subsequent optimizations in the middle-end.
17// Rematerialization of hoisted instructions to reduce register pressure is the
18// responsibility of the back-end, which has more accurate information about
19// register pressure and also handles other optimizations than LICM that
20// increase live-ranges.
21//
22// This pass uses alias analysis for two purposes:
23//
24// 1. Moving loop invariant loads and calls out of loops. If we can determine
25// that a load or call inside of a loop never aliases anything stored to,
26// we can hoist it or sink it like any other instruction.
27// 2. Scalar Promotion of Memory - If there is a store instruction inside of
28// the loop, we try to move the store to happen AFTER the loop instead of
29// inside of the loop. This can only happen if a few conditions are true:
30// A. The pointer stored through is loop invariant
31// B. There are no stores or loads in the loop which _may_ alias the
32// pointer. There are no calls in the loop which mod/ref the pointer.
33// If these conditions are true, we can promote the loads and stores in the
34// loop of the pointer to use a temporary alloca'd variable. We then use
35// the SSAUpdater to construct the appropriate SSA form for the value.
36//
37//===----------------------------------------------------------------------===//
38
39#include "llvm/Transforms/Scalar/LICM.h"
40#include "llvm/ADT/PriorityWorklist.h"
41#include "llvm/ADT/SetOperations.h"
42#include "llvm/ADT/Statistic.h"
43#include "llvm/Analysis/AliasAnalysis.h"
44#include "llvm/Analysis/AliasSetTracker.h"
45#include "llvm/Analysis/CaptureTracking.h"
46#include "llvm/Analysis/ConstantFolding.h"
47#include "llvm/Analysis/GuardUtils.h"
48#include "llvm/Analysis/LazyBlockFrequencyInfo.h"
49#include "llvm/Analysis/Loads.h"
50#include "llvm/Analysis/LoopInfo.h"
51#include "llvm/Analysis/LoopIterator.h"
52#include "llvm/Analysis/LoopNestAnalysis.h"
53#include "llvm/Analysis/LoopPass.h"
54#include "llvm/Analysis/MemorySSA.h"
55#include "llvm/Analysis/MemorySSAUpdater.h"
56#include "llvm/Analysis/MustExecute.h"
57#include "llvm/Analysis/OptimizationRemarkEmitter.h"
58#include "llvm/Analysis/ScalarEvolution.h"
59#include "llvm/Analysis/TargetLibraryInfo.h"
60#include "llvm/Analysis/TargetTransformInfo.h"
61#include "llvm/Analysis/ValueTracking.h"
62#include "llvm/IR/CFG.h"
63#include "llvm/IR/Constants.h"
64#include "llvm/IR/DataLayout.h"
65#include "llvm/IR/DebugInfoMetadata.h"
66#include "llvm/IR/DerivedTypes.h"
67#include "llvm/IR/Dominators.h"
68#include "llvm/IR/Instructions.h"
69#include "llvm/IR/IntrinsicInst.h"
70#include "llvm/IR/LLVMContext.h"
71#include "llvm/IR/Metadata.h"
72#include "llvm/IR/PatternMatch.h"
73#include "llvm/IR/PredIteratorCache.h"
74#include "llvm/InitializePasses.h"
75#include "llvm/Support/CommandLine.h"
76#include "llvm/Support/Debug.h"
77#include "llvm/Support/raw_ostream.h"
78#include "llvm/Transforms/Scalar.h"
79#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
80#include "llvm/Transforms/Utils/BasicBlockUtils.h"
81#include "llvm/Transforms/Utils/Local.h"
82#include "llvm/Transforms/Utils/LoopUtils.h"
83#include "llvm/Transforms/Utils/SSAUpdater.h"
84#include <algorithm>
85#include <utility>
86using namespace llvm;
87
88namespace llvm {
89class BlockFrequencyInfo;
90class LPMUpdater;
91} // namespace llvm
92
93#define DEBUG_TYPE"licm" "licm"
94
95STATISTIC(NumCreatedBlocks, "Number of blocks created")static llvm::Statistic NumCreatedBlocks = {"licm", "NumCreatedBlocks"
, "Number of blocks created"}
;
96STATISTIC(NumClonedBranches, "Number of branches cloned")static llvm::Statistic NumClonedBranches = {"licm", "NumClonedBranches"
, "Number of branches cloned"}
;
97STATISTIC(NumSunk, "Number of instructions sunk out of loop")static llvm::Statistic NumSunk = {"licm", "NumSunk", "Number of instructions sunk out of loop"
}
;
98STATISTIC(NumHoisted, "Number of instructions hoisted out of loop")static llvm::Statistic NumHoisted = {"licm", "NumHoisted", "Number of instructions hoisted out of loop"
}
;
99STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk")static llvm::Statistic NumMovedLoads = {"licm", "NumMovedLoads"
, "Number of load insts hoisted or sunk"}
;
100STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk")static llvm::Statistic NumMovedCalls = {"licm", "NumMovedCalls"
, "Number of call insts hoisted or sunk"}
;
101STATISTIC(NumPromoted, "Number of memory locations promoted to registers")static llvm::Statistic NumPromoted = {"licm", "NumPromoted", "Number of memory locations promoted to registers"
}
;
102
103/// Memory promotion is enabled by default.
104static cl::opt<bool>
105 DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
106 cl::desc("Disable memory promotion in LICM pass"));
107
108static cl::opt<bool> ControlFlowHoisting(
109 "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
110 cl::desc("Enable control flow (and PHI) hoisting in LICM"));
111
112static cl::opt<uint32_t> MaxNumUsesTraversed(
113 "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
114 cl::desc("Max num uses visited for identifying load "
115 "invariance in loop using invariant start (default = 8)"));
116
117// Experimental option to allow imprecision in LICM in pathological cases, in
118// exchange for faster compile. This is to be removed if MemorySSA starts to
119// address the same issue. This flag applies only when LICM uses MemorySSA
120// instead on AliasSetTracker. LICM calls MemorySSAWalker's
121// getClobberingMemoryAccess, up to the value of the Cap, getting perfect
122// accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
123// which may not be precise, since optimizeUses is capped. The result is
124// correct, but we may not get as "far up" as possible to get which access is
125// clobbering the one queried.
126cl::opt<unsigned> llvm::SetLicmMssaOptCap(
127 "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
128 cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
129 "for faster compile. Caps the MemorySSA clobbering calls."));
130
131// Experimentally, memory promotion carries less importance than sinking and
132// hoisting. Limit when we do promotion when using MemorySSA, in order to save
133// compile time.
134cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
135 "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
136 cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
137 "effect. When MSSA in LICM is enabled, then this is the maximum "
138 "number of accesses allowed to be present in a loop in order to "
139 "enable memory promotion."));
140
141static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
142static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
143 const LoopSafetyInfo *SafetyInfo,
144 TargetTransformInfo *TTI, bool &FreeInLoop,
145 bool LoopNestMode);
146static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
147 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
148 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
149 OptimizationRemarkEmitter *ORE);
150static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
151 BlockFrequencyInfo *BFI, const Loop *CurLoop,
152 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
153 OptimizationRemarkEmitter *ORE);
154static bool isSafeToExecuteUnconditionally(
155 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
156 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
157 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
158 bool AllowSpeculation);
159static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
160 AliasSetTracker *CurAST, Loop *CurLoop,
161 AAResults *AA);
162static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
163 Loop *CurLoop, Instruction &I,
164 SinkAndHoistLICMFlags &Flags);
165static bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
166 MemoryUse &MU);
167static Instruction *cloneInstructionInExitBlock(
168 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
169 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
170
171static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
172 MemorySSAUpdater *MSSAU);
173
174static void moveInstructionBefore(Instruction &I, Instruction &Dest,
175 ICFLoopSafetyInfo &SafetyInfo,
176 MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
177
178static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
179 function_ref<void(Instruction *)> Fn);
180static SmallVector<SmallSetVector<Value *, 8>, 0>
181collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L);
182
183namespace {
184struct LoopInvariantCodeMotion {
185 bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
186 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI,
187 TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA,
188 OptimizationRemarkEmitter *ORE, bool LoopNestMode = false);
189
190 LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
191 unsigned LicmMssaNoAccForPromotionCap,
192 bool LicmAllowSpeculation)
193 : LicmMssaOptCap(LicmMssaOptCap),
194 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
195 LicmAllowSpeculation(LicmAllowSpeculation) {}
196
197private:
198 unsigned LicmMssaOptCap;
199 unsigned LicmMssaNoAccForPromotionCap;
200 bool LicmAllowSpeculation;
201};
202
203struct LegacyLICMPass : public LoopPass {
204 static char ID; // Pass identification, replacement for typeid
205 LegacyLICMPass(
206 unsigned LicmMssaOptCap = SetLicmMssaOptCap,
207 unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap,
208 bool LicmAllowSpeculation = true)
209 : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
210 LicmAllowSpeculation) {
211 initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
212 }
213
214 bool runOnLoop(Loop *L, LPPassManager &LPM) override {
215 if (skipLoop(L))
216 return false;
217
218 LLVM_DEBUG(dbgs() << "Perform LICM on Loop with header at block "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "Perform LICM on Loop with header at block "
<< L->getHeader()->getNameOrAsOperand() <<
"\n"; } } while (false)
219 << L->getHeader()->getNameOrAsOperand() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "Perform LICM on Loop with header at block "
<< L->getHeader()->getNameOrAsOperand() <<
"\n"; } } while (false)
;
220
221 auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
222 MemorySSA *MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
223 bool hasProfileData = L->getHeader()->getParent()->hasProfileData();
224 BlockFrequencyInfo *BFI =
225 hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI()
226 : nullptr;
227 // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
228 // pass. Function analyses need to be preserved across loop transformations
229 // but ORE cannot be preserved (see comment before the pass definition).
230 OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
231 return LICM.runOnLoop(
232 L, &getAnalysis<AAResultsWrapperPass>().getAAResults(),
233 &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
234 &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI,
235 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
236 *L->getHeader()->getParent()),
237 &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
238 *L->getHeader()->getParent()),
239 SE ? &SE->getSE() : nullptr, MSSA, &ORE);
240 }
241
242 /// This transformation requires natural loop information & requires that
243 /// loop preheaders be inserted into the CFG...
244 ///
245 void getAnalysisUsage(AnalysisUsage &AU) const override {
246 AU.addPreserved<DominatorTreeWrapperPass>();
247 AU.addPreserved<LoopInfoWrapperPass>();
248 AU.addRequired<TargetLibraryInfoWrapperPass>();
249 AU.addRequired<MemorySSAWrapperPass>();
250 AU.addPreserved<MemorySSAWrapperPass>();
251 AU.addRequired<TargetTransformInfoWrapperPass>();
252 getLoopAnalysisUsage(AU);
253 LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU);
254 AU.addPreserved<LazyBlockFrequencyInfoPass>();
255 AU.addPreserved<LazyBranchProbabilityInfoPass>();
256 }
257
258private:
259 LoopInvariantCodeMotion LICM;
260};
261} // namespace
262
263PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
264 LoopStandardAnalysisResults &AR, LPMUpdater &) {
265 if (!AR.MSSA)
266 report_fatal_error("LICM requires MemorySSA (loop-mssa)");
267
268 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
269 // pass. Function analyses need to be preserved across loop transformations
270 // but ORE cannot be preserved (see comment before the pass definition).
271 OptimizationRemarkEmitter ORE(L.getHeader()->getParent());
272
273 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
274 Opts.AllowSpeculation);
275 if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI,
276 &AR.SE, AR.MSSA, &ORE))
277 return PreservedAnalyses::all();
278
279 auto PA = getLoopPassPreservedAnalyses();
280
281 PA.preserve<DominatorTreeAnalysis>();
282 PA.preserve<LoopAnalysis>();
283 PA.preserve<MemorySSAAnalysis>();
284
285 return PA;
286}
287
288void LICMPass::printPipeline(
289 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
290 static_cast<PassInfoMixin<LICMPass> *>(this)->printPipeline(
291 OS, MapClassName2PassName);
292
293 OS << "<";
294 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
295 OS << ">";
296}
297
298PreservedAnalyses LNICMPass::run(LoopNest &LN, LoopAnalysisManager &AM,
299 LoopStandardAnalysisResults &AR,
300 LPMUpdater &) {
301 if (!AR.MSSA)
302 report_fatal_error("LNICM requires MemorySSA (loop-mssa)");
303
304 // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis
305 // pass. Function analyses need to be preserved across loop transformations
306 // but ORE cannot be preserved (see comment before the pass definition).
307 OptimizationRemarkEmitter ORE(LN.getParent());
308
309 LoopInvariantCodeMotion LICM(Opts.MssaOptCap, Opts.MssaNoAccForPromotionCap,
310 Opts.AllowSpeculation);
311
312 Loop &OutermostLoop = LN.getOutermostLoop();
313 bool Changed = LICM.runOnLoop(&OutermostLoop, &AR.AA, &AR.LI, &AR.DT, AR.BFI,
314 &AR.TLI, &AR.TTI, &AR.SE, AR.MSSA, &ORE, true);
315
316 if (!Changed)
317 return PreservedAnalyses::all();
318
319 auto PA = getLoopPassPreservedAnalyses();
320
321 PA.preserve<DominatorTreeAnalysis>();
322 PA.preserve<LoopAnalysis>();
323 PA.preserve<MemorySSAAnalysis>();
324
325 return PA;
326}
327
328void LNICMPass::printPipeline(
329 raw_ostream &OS, function_ref<StringRef(StringRef)> MapClassName2PassName) {
330 static_cast<PassInfoMixin<LNICMPass> *>(this)->printPipeline(
331 OS, MapClassName2PassName);
332
333 OS << "<";
334 OS << (Opts.AllowSpeculation ? "" : "no-") << "allowspeculation";
335 OS << ">";
336}
337
338char LegacyLICMPass::ID = 0;
339INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",static void *initializeLegacyLICMPassPassOnce(PassRegistry &
Registry) {
340 false, false)static void *initializeLegacyLICMPassPassOnce(PassRegistry &
Registry) {
341INITIALIZE_PASS_DEPENDENCY(LoopPass)initializeLoopPassPass(Registry);
342INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
343INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry);
344INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry);
345INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)initializeLazyBFIPassPass(Registry);
346INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm"
, &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LegacyLICMPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag
; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce
, std::ref(Registry)); }
347 false)PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm"
, &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<LegacyLICMPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag
; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry
) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce
, std::ref(Registry)); }
348
349Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
350Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
351 unsigned LicmMssaNoAccForPromotionCap,
352 bool LicmAllowSpeculation) {
353 return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
354 LicmAllowSpeculation);
355}
356
357llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(bool IsSink, Loop *L,
358 MemorySSA *MSSA)
359 : SinkAndHoistLICMFlags(SetLicmMssaOptCap, SetLicmMssaNoAccForPromotionCap,
360 IsSink, L, MSSA) {}
361
362llvm::SinkAndHoistLICMFlags::SinkAndHoistLICMFlags(
363 unsigned LicmMssaOptCap, unsigned LicmMssaNoAccForPromotionCap, bool IsSink,
364 Loop *L, MemorySSA *MSSA)
365 : LicmMssaOptCap(LicmMssaOptCap),
366 LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap),
367 IsSink(IsSink) {
368 assert(((L != nullptr) == (MSSA != nullptr)) &&(static_cast <bool> (((L != nullptr) == (MSSA != nullptr
)) && "Unexpected values for SinkAndHoistLICMFlags") ?
void (0) : __assert_fail ("((L != nullptr) == (MSSA != nullptr)) && \"Unexpected values for SinkAndHoistLICMFlags\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 369, __extension__ __PRETTY_FUNCTION__
))
369 "Unexpected values for SinkAndHoistLICMFlags")(static_cast <bool> (((L != nullptr) == (MSSA != nullptr
)) && "Unexpected values for SinkAndHoistLICMFlags") ?
void (0) : __assert_fail ("((L != nullptr) == (MSSA != nullptr)) && \"Unexpected values for SinkAndHoistLICMFlags\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 369, __extension__ __PRETTY_FUNCTION__
))
;
370 if (!MSSA)
371 return;
372
373 unsigned AccessCapCount = 0;
374 for (auto *BB : L->getBlocks())
375 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
376 for (const auto &MA : *Accesses) {
377 (void)MA;
378 ++AccessCapCount;
379 if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
380 NoOfMemAccTooLarge = true;
381 return;
382 }
383 }
384}
385
386/// Hoist expressions out of the specified loop. Note, alias info for inner
387/// loop is not preserved so it is not a good idea to run LICM multiple
388/// times on one loop.
389bool LoopInvariantCodeMotion::runOnLoop(
390 Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
391 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
392 ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE,
393 bool LoopNestMode) {
394 bool Changed = false;
395
396 assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.")(static_cast <bool> (L->isLCSSAForm(*DT) && "Loop is not in LCSSA form."
) ? void (0) : __assert_fail ("L->isLCSSAForm(*DT) && \"Loop is not in LCSSA form.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 396, __extension__ __PRETTY_FUNCTION__
))
;
397 MSSA->ensureOptimizedUses();
398
399 // If this loop has metadata indicating that LICM is not to be performed then
400 // just exit.
401 if (hasDisableLICMTransformsHint(L)) {
402 return false;
403 }
404
405 // Don't sink stores from loops with coroutine suspend instructions.
406 // LICM would sink instructions into the default destination of
407 // the coroutine switch. The default destination of the switch is to
408 // handle the case where the coroutine is suspended, by which point the
409 // coroutine frame may have been destroyed. No instruction can be sunk there.
410 // FIXME: This would unfortunately hurt the performance of coroutines, however
411 // there is currently no general solution for this. Similar issues could also
412 // potentially happen in other passes where instructions are being moved
413 // across that edge.
414 bool HasCoroSuspendInst = llvm::any_of(L->getBlocks(), [](BasicBlock *BB) {
415 return llvm::any_of(*BB, [](Instruction &I) {
416 IntrinsicInst *II = dyn_cast<IntrinsicInst>(&I);
417 return II && II->getIntrinsicID() == Intrinsic::coro_suspend;
418 });
419 });
420
421 MemorySSAUpdater MSSAU(MSSA);
422 SinkAndHoistLICMFlags Flags(LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
423 /*IsSink=*/true, L, MSSA);
424
425 // Get the preheader block to move instructions into...
426 BasicBlock *Preheader = L->getLoopPreheader();
427
428 // Compute loop safety information.
429 ICFLoopSafetyInfo SafetyInfo;
430 SafetyInfo.computeLoopSafetyInfo(L);
431
432 // We want to visit all of the instructions in this loop... that are not parts
433 // of our subloops (they have already had their invariants hoisted out of
434 // their loop, into this loop, so there is no need to process the BODIES of
435 // the subloops).
436 //
437 // Traverse the body of the loop in depth first order on the dominator tree so
438 // that we are guaranteed to see definitions before we see uses. This allows
439 // us to sink instructions in one pass, without iteration. After sinking
440 // instructions, we perform another pass to hoist them out of the loop.
441 if (L->hasDedicatedExits())
442 Changed |= LoopNestMode
443 ? sinkRegionForLoopNest(DT->getNode(L->getHeader()), AA, LI,
444 DT, BFI, TLI, TTI, L, &MSSAU,
445 &SafetyInfo, Flags, ORE)
446 : sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI,
447 TLI, TTI, L, &MSSAU, &SafetyInfo, Flags, ORE);
448 Flags.setIsSink(false);
449 if (Preheader)
450 Changed |= hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L,
451 &MSSAU, SE, &SafetyInfo, Flags, ORE, LoopNestMode,
452 LicmAllowSpeculation);
453
454 // Now that all loop invariants have been removed from the loop, promote any
455 // memory references to scalars that we can.
456 // Don't sink stores from loops without dedicated block exits. Exits
457 // containing indirect branches are not transformed by loop simplify,
458 // make sure we catch that. An additional load may be generated in the
459 // preheader for SSA updater, so also avoid sinking when no preheader
460 // is available.
461 if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
462 !Flags.tooManyMemoryAccesses() && !HasCoroSuspendInst) {
463 // Figure out the loop exits and their insertion points
464 SmallVector<BasicBlock *, 8> ExitBlocks;
465 L->getUniqueExitBlocks(ExitBlocks);
466
467 // We can't insert into a catchswitch.
468 bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
469 return isa<CatchSwitchInst>(Exit->getTerminator());
470 });
471
472 if (!HasCatchSwitch) {
473 SmallVector<Instruction *, 8> InsertPts;
474 SmallVector<MemoryAccess *, 8> MSSAInsertPts;
475 InsertPts.reserve(ExitBlocks.size());
476 MSSAInsertPts.reserve(ExitBlocks.size());
477 for (BasicBlock *ExitBlock : ExitBlocks) {
478 InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
479 MSSAInsertPts.push_back(nullptr);
480 }
481
482 PredIteratorCache PIC;
483
484 // Promoting one set of accesses may make the pointers for another set
485 // loop invariant, so run this in a loop.
486 bool Promoted = false;
487 bool LocalPromoted;
488 do {
489 LocalPromoted = false;
490 for (const SmallSetVector<Value *, 8> &PointerMustAliases :
491 collectPromotionCandidates(MSSA, AA, L)) {
492 LocalPromoted |= promoteLoopAccessesToScalars(
493 PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
494 DT, TLI, L, &MSSAU, &SafetyInfo, ORE, LicmAllowSpeculation);
495 }
496 Promoted |= LocalPromoted;
497 } while (LocalPromoted);
498
499 // Once we have promoted values across the loop body we have to
500 // recursively reform LCSSA as any nested loop may now have values defined
501 // within the loop used in the outer loop.
502 // FIXME: This is really heavy handed. It would be a bit better to use an
503 // SSAUpdater strategy during promotion that was LCSSA aware and reformed
504 // it as it went.
505 if (Promoted)
506 formLCSSARecursively(*L, *DT, LI, SE);
507
508 Changed |= Promoted;
509 }
510 }
511
512 // Check that neither this loop nor its parent have had LCSSA broken. LICM is
513 // specifically moving instructions across the loop boundary and so it is
514 // especially in need of basic functional correctness checking here.
515 assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!")(static_cast <bool> (L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!"
) ? void (0) : __assert_fail ("L->isLCSSAForm(*DT) && \"Loop not left in LCSSA form after LICM!\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 515, __extension__ __PRETTY_FUNCTION__
))
;
516 assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&(static_cast <bool> ((L->isOutermost() || L->getParentLoop
()->isLCSSAForm(*DT)) && "Parent loop not left in LCSSA form after LICM!"
) ? void (0) : __assert_fail ("(L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) && \"Parent loop not left in LCSSA form after LICM!\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 517, __extension__ __PRETTY_FUNCTION__
))
517 "Parent loop not left in LCSSA form after LICM!")(static_cast <bool> ((L->isOutermost() || L->getParentLoop
()->isLCSSAForm(*DT)) && "Parent loop not left in LCSSA form after LICM!"
) ? void (0) : __assert_fail ("(L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) && \"Parent loop not left in LCSSA form after LICM!\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 517, __extension__ __PRETTY_FUNCTION__
))
;
518
519 if (VerifyMemorySSA)
520 MSSA->verifyMemorySSA();
521
522 if (Changed && SE)
523 SE->forgetLoopDispositions(L);
524 return Changed;
525}
526
527/// Walk the specified region of the CFG (defined by all blocks dominated by
528/// the specified block, and that are in the current loop) in reverse depth
529/// first order w.r.t the DominatorTree. This allows us to visit uses before
530/// definitions, allowing us to sink a loop body in one pass without iteration.
531///
532bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
533 DominatorTree *DT, BlockFrequencyInfo *BFI,
534 TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
535 Loop *CurLoop, MemorySSAUpdater *MSSAU,
536 ICFLoopSafetyInfo *SafetyInfo,
537 SinkAndHoistLICMFlags &Flags,
538 OptimizationRemarkEmitter *ORE, Loop *OutermostLoop) {
539
540 // Verify inputs.
541 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&(static_cast <bool> (N != nullptr && AA != nullptr
&& LI != nullptr && DT != nullptr &&
CurLoop != nullptr && MSSAU != nullptr && SafetyInfo
!= nullptr && "Unexpected input to sinkRegion.") ? void
(0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 543, __extension__ __PRETTY_FUNCTION__
))
542 CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr &&(static_cast <bool> (N != nullptr && AA != nullptr
&& LI != nullptr && DT != nullptr &&
CurLoop != nullptr && MSSAU != nullptr && SafetyInfo
!= nullptr && "Unexpected input to sinkRegion.") ? void
(0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 543, __extension__ __PRETTY_FUNCTION__
))
543 "Unexpected input to sinkRegion.")(static_cast <bool> (N != nullptr && AA != nullptr
&& LI != nullptr && DT != nullptr &&
CurLoop != nullptr && MSSAU != nullptr && SafetyInfo
!= nullptr && "Unexpected input to sinkRegion.") ? void
(0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 543, __extension__ __PRETTY_FUNCTION__
))
;
544
545 // We want to visit children before parents. We will enque all the parents
546 // before their children in the worklist and process the worklist in reverse
547 // order.
548 SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
549
550 bool Changed = false;
551 for (DomTreeNode *DTN : reverse(Worklist)) {
552 BasicBlock *BB = DTN->getBlock();
553 // Only need to process the contents of this block if it is not part of a
554 // subloop (which would already have been processed).
555 if (inSubLoop(BB, CurLoop, LI))
556 continue;
557
558 for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
559 Instruction &I = *--II;
560
561 // The instruction is not used in the loop if it is dead. In this case,
562 // we just delete it instead of sinking it.
563 if (isInstructionTriviallyDead(&I, TLI)) {
564 LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM deleting dead inst: " <<
I << '\n'; } } while (false)
;
565 salvageKnowledge(&I);
566 salvageDebugInfo(I);
567 ++II;
568 eraseInstruction(I, *SafetyInfo, MSSAU);
569 Changed = true;
570 continue;
571 }
572
573 // Check to see if we can sink this instruction to the exit blocks
574 // of the loop. We can do this if the all users of the instruction are
575 // outside of the loop. In this case, it doesn't even matter if the
576 // operands of the instruction are loop invariant.
577 //
578 bool FreeInLoop = false;
579 bool LoopNestMode = OutermostLoop != nullptr;
580 if (!I.mayHaveSideEffects() &&
581 isNotUsedOrFreeInLoop(I, LoopNestMode ? OutermostLoop : CurLoop,
582 SafetyInfo, TTI, FreeInLoop, LoopNestMode) &&
583 canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/nullptr, MSSAU, true,
584 &Flags, ORE)) {
585 if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) {
586 if (!FreeInLoop) {
587 ++II;
588 salvageDebugInfo(I);
589 eraseInstruction(I, *SafetyInfo, MSSAU);
590 }
591 Changed = true;
592 }
593 }
594 }
595 }
596 if (VerifyMemorySSA)
597 MSSAU->getMemorySSA()->verifyMemorySSA();
598 return Changed;
599}
600
601bool llvm::sinkRegionForLoopNest(
602 DomTreeNode *N, AAResults *AA, LoopInfo *LI, DominatorTree *DT,
603 BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
604 Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
605 SinkAndHoistLICMFlags &Flags, OptimizationRemarkEmitter *ORE) {
606
607 bool Changed = false;
608 SmallPriorityWorklist<Loop *, 4> Worklist;
609 Worklist.insert(CurLoop);
610 appendLoopsToWorklist(*CurLoop, Worklist);
611 while (!Worklist.empty()) {
612 Loop *L = Worklist.pop_back_val();
613 Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI,
614 TTI, L, MSSAU, SafetyInfo, Flags, ORE, CurLoop);
615 }
616 return Changed;
617}
618
619namespace {
620// This is a helper class for hoistRegion to make it able to hoist control flow
621// in order to be able to hoist phis. The way this works is that we initially
622// start hoisting to the loop preheader, and when we see a loop invariant branch
623// we make note of this. When we then come to hoist an instruction that's
624// conditional on such a branch we duplicate the branch and the relevant control
625// flow, then hoist the instruction into the block corresponding to its original
626// block in the duplicated control flow.
627class ControlFlowHoister {
628private:
629 // Information about the loop we are hoisting from
630 LoopInfo *LI;
631 DominatorTree *DT;
632 Loop *CurLoop;
633 MemorySSAUpdater *MSSAU;
634
635 // A map of blocks in the loop to the block their instructions will be hoisted
636 // to.
637 DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
638
639 // The branches that we can hoist, mapped to the block that marks a
640 // convergence point of their control flow.
641 DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
642
643public:
644 ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
645 MemorySSAUpdater *MSSAU)
646 : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
647
648 void registerPossiblyHoistableBranch(BranchInst *BI) {
649 // We can only hoist conditional branches with loop invariant operands.
650 if (!ControlFlowHoisting || !BI->isConditional() ||
651 !CurLoop->hasLoopInvariantOperands(BI))
652 return;
653
654 // The branch destinations need to be in the loop, and we don't gain
655 // anything by duplicating conditional branches with duplicate successors,
656 // as it's essentially the same as an unconditional branch.
657 BasicBlock *TrueDest = BI->getSuccessor(0);
658 BasicBlock *FalseDest = BI->getSuccessor(1);
659 if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
660 TrueDest == FalseDest)
661 return;
662
663 // We can hoist BI if one branch destination is the successor of the other,
664 // or both have common successor which we check by seeing if the
665 // intersection of their successors is non-empty.
666 // TODO: This could be expanded to allowing branches where both ends
667 // eventually converge to a single block.
668 SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
669 TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
670 FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
671 BasicBlock *CommonSucc = nullptr;
672 if (TrueDestSucc.count(FalseDest)) {
673 CommonSucc = FalseDest;
674 } else if (FalseDestSucc.count(TrueDest)) {
675 CommonSucc = TrueDest;
676 } else {
677 set_intersect(TrueDestSucc, FalseDestSucc);
678 // If there's one common successor use that.
679 if (TrueDestSucc.size() == 1)
680 CommonSucc = *TrueDestSucc.begin();
681 // If there's more than one pick whichever appears first in the block list
682 // (we can't use the value returned by TrueDestSucc.begin() as it's
683 // unpredicatable which element gets returned).
684 else if (!TrueDestSucc.empty()) {
685 Function *F = TrueDest->getParent();
686 auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
687 auto It = llvm::find_if(*F, IsSucc);
688 assert(It != F->end() && "Could not find successor in function")(static_cast <bool> (It != F->end() && "Could not find successor in function"
) ? void (0) : __assert_fail ("It != F->end() && \"Could not find successor in function\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 688, __extension__ __PRETTY_FUNCTION__
))
;
689 CommonSucc = &*It;
690 }
691 }
692 // The common successor has to be dominated by the branch, as otherwise
693 // there will be some other path to the successor that will not be
694 // controlled by this branch so any phi we hoist would be controlled by the
695 // wrong condition. This also takes care of avoiding hoisting of loop back
696 // edges.
697 // TODO: In some cases this could be relaxed if the successor is dominated
698 // by another block that's been hoisted and we can guarantee that the
699 // control flow has been replicated exactly.
700 if (CommonSucc && DT->dominates(BI, CommonSucc))
701 HoistableBranches[BI] = CommonSucc;
702 }
703
704 bool canHoistPHI(PHINode *PN) {
705 // The phi must have loop invariant operands.
706 if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
707 return false;
708 // We can hoist phis if the block they are in is the target of hoistable
709 // branches which cover all of the predecessors of the block.
710 SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
711 BasicBlock *BB = PN->getParent();
712 for (BasicBlock *PredBB : predecessors(BB))
713 PredecessorBlocks.insert(PredBB);
714 // If we have less predecessor blocks than predecessors then the phi will
715 // have more than one incoming value for the same block which we can't
716 // handle.
717 // TODO: This could be handled be erasing some of the duplicate incoming
718 // values.
719 if (PredecessorBlocks.size() != pred_size(BB))
720 return false;
721 for (auto &Pair : HoistableBranches) {
722 if (Pair.second == BB) {
723 // Which blocks are predecessors via this branch depends on if the
724 // branch is triangle-like or diamond-like.
725 if (Pair.first->getSuccessor(0) == BB) {
726 PredecessorBlocks.erase(Pair.first->getParent());
727 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
728 } else if (Pair.first->getSuccessor(1) == BB) {
729 PredecessorBlocks.erase(Pair.first->getParent());
730 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
731 } else {
732 PredecessorBlocks.erase(Pair.first->getSuccessor(0));
733 PredecessorBlocks.erase(Pair.first->getSuccessor(1));
734 }
735 }
736 }
737 // PredecessorBlocks will now be empty if for every predecessor of BB we
738 // found a hoistable branch source.
739 return PredecessorBlocks.empty();
740 }
741
742 BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
743 if (!ControlFlowHoisting)
744 return CurLoop->getLoopPreheader();
745 // If BB has already been hoisted, return that
746 if (HoistDestinationMap.count(BB))
747 return HoistDestinationMap[BB];
748
749 // Check if this block is conditional based on a pending branch
750 auto HasBBAsSuccessor =
751 [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
752 return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
753 Pair.first->getSuccessor(1) == BB);
754 };
755 auto It = llvm::find_if(HoistableBranches, HasBBAsSuccessor);
756
757 // If not involved in a pending branch, hoist to preheader
758 BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
759 if (It == HoistableBranches.end()) {
760 LLVM_DEBUG(dbgs() << "LICM using "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM using " << InitialPreheader
->getNameOrAsOperand() << " as hoist destination for "
<< BB->getNameOrAsOperand() << "\n"; } } while
(false)
761 << InitialPreheader->getNameOrAsOperand()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM using " << InitialPreheader
->getNameOrAsOperand() << " as hoist destination for "
<< BB->getNameOrAsOperand() << "\n"; } } while
(false)
762 << " as hoist destination for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM using " << InitialPreheader
->getNameOrAsOperand() << " as hoist destination for "
<< BB->getNameOrAsOperand() << "\n"; } } while
(false)
763 << BB->getNameOrAsOperand() << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM using " << InitialPreheader
->getNameOrAsOperand() << " as hoist destination for "
<< BB->getNameOrAsOperand() << "\n"; } } while
(false)
;
764 HoistDestinationMap[BB] = InitialPreheader;
765 return InitialPreheader;
766 }
767 BranchInst *BI = It->first;
768 assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==(static_cast <bool> (std::find_if(++It, HoistableBranches
.end(), HasBBAsSuccessor) == HoistableBranches.end() &&
"BB is expected to be the target of at most one branch") ? void
(0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 770, __extension__ __PRETTY_FUNCTION__
))
769 HoistableBranches.end() &&(static_cast <bool> (std::find_if(++It, HoistableBranches
.end(), HasBBAsSuccessor) == HoistableBranches.end() &&
"BB is expected to be the target of at most one branch") ? void
(0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 770, __extension__ __PRETTY_FUNCTION__
))
770 "BB is expected to be the target of at most one branch")(static_cast <bool> (std::find_if(++It, HoistableBranches
.end(), HasBBAsSuccessor) == HoistableBranches.end() &&
"BB is expected to be the target of at most one branch") ? void
(0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 770, __extension__ __PRETTY_FUNCTION__
))
;
771
772 LLVMContext &C = BB->getContext();
773 BasicBlock *TrueDest = BI->getSuccessor(0);
774 BasicBlock *FalseDest = BI->getSuccessor(1);
775 BasicBlock *CommonSucc = HoistableBranches[BI];
776 BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
777
778 // Create hoisted versions of blocks that currently don't have them
779 auto CreateHoistedBlock = [&](BasicBlock *Orig) {
780 if (HoistDestinationMap.count(Orig))
781 return HoistDestinationMap[Orig];
782 BasicBlock *New =
783 BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
784 HoistDestinationMap[Orig] = New;
785 DT->addNewBlock(New, HoistTarget);
786 if (CurLoop->getParentLoop())
787 CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
788 ++NumCreatedBlocks;
789 LLVM_DEBUG(dbgs() << "LICM created " << New->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM created " << New->
getName() << " as hoist destination for " << Orig
->getName() << "\n"; } } while (false)
790 << " as hoist destination for " << Orig->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM created " << New->
getName() << " as hoist destination for " << Orig
->getName() << "\n"; } } while (false)
791 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM created " << New->
getName() << " as hoist destination for " << Orig
->getName() << "\n"; } } while (false)
;
792 return New;
793 };
794 BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
795 BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
796 BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
797
798 // Link up these blocks with branches.
799 if (!HoistCommonSucc->getTerminator()) {
800 // The new common successor we've generated will branch to whatever that
801 // hoist target branched to.
802 BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
803 assert(TargetSucc && "Expected hoist target to have a single successor")(static_cast <bool> (TargetSucc && "Expected hoist target to have a single successor"
) ? void (0) : __assert_fail ("TargetSucc && \"Expected hoist target to have a single successor\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 803, __extension__ __PRETTY_FUNCTION__
))
;
804 HoistCommonSucc->moveBefore(TargetSucc);
805 BranchInst::Create(TargetSucc, HoistCommonSucc);
806 }
807 if (!HoistTrueDest->getTerminator()) {
808 HoistTrueDest->moveBefore(HoistCommonSucc);
809 BranchInst::Create(HoistCommonSucc, HoistTrueDest);
810 }
811 if (!HoistFalseDest->getTerminator()) {
812 HoistFalseDest->moveBefore(HoistCommonSucc);
813 BranchInst::Create(HoistCommonSucc, HoistFalseDest);
814 }
815
816 // If BI is being cloned to what was originally the preheader then
817 // HoistCommonSucc will now be the new preheader.
818 if (HoistTarget == InitialPreheader) {
819 // Phis in the loop header now need to use the new preheader.
820 InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
821 MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
822 HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
823 // The new preheader dominates the loop header.
824 DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
825 DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
826 DT->changeImmediateDominator(HeaderNode, PreheaderNode);
827 // The preheader hoist destination is now the new preheader, with the
828 // exception of the hoist destination of this branch.
829 for (auto &Pair : HoistDestinationMap)
830 if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
831 Pair.second = HoistCommonSucc;
832 }
833
834 // Now finally clone BI.
835 ReplaceInstWithInst(
836 HoistTarget->getTerminator(),
837 BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
838 ++NumClonedBranches;
839
840 assert(CurLoop->getLoopPreheader() &&(static_cast <bool> (CurLoop->getLoopPreheader() &&
"Hoisting blocks should not have destroyed preheader") ? void
(0) : __assert_fail ("CurLoop->getLoopPreheader() && \"Hoisting blocks should not have destroyed preheader\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 841, __extension__ __PRETTY_FUNCTION__
))
841 "Hoisting blocks should not have destroyed preheader")(static_cast <bool> (CurLoop->getLoopPreheader() &&
"Hoisting blocks should not have destroyed preheader") ? void
(0) : __assert_fail ("CurLoop->getLoopPreheader() && \"Hoisting blocks should not have destroyed preheader\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 841, __extension__ __PRETTY_FUNCTION__
))
;
842 return HoistDestinationMap[BB];
843 }
844};
845} // namespace
846
847/// Walk the specified region of the CFG (defined by all blocks dominated by
848/// the specified block, and that are in the current loop) in depth first
849/// order w.r.t the DominatorTree. This allows us to visit definitions before
850/// uses, allowing us to hoist a loop body in one pass without iteration.
851///
852bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI,
853 DominatorTree *DT, BlockFrequencyInfo *BFI,
854 TargetLibraryInfo *TLI, Loop *CurLoop,
855 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
856 ICFLoopSafetyInfo *SafetyInfo,
857 SinkAndHoistLICMFlags &Flags,
858 OptimizationRemarkEmitter *ORE, bool LoopNestMode,
859 bool AllowSpeculation) {
860 // Verify inputs.
861 assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&(static_cast <bool> (N != nullptr && AA != nullptr
&& LI != nullptr && DT != nullptr &&
CurLoop != nullptr && MSSAU != nullptr && SafetyInfo
!= nullptr && "Unexpected input to hoistRegion.") ? void
(0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 863, __extension__ __PRETTY_FUNCTION__
))
862 CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr &&(static_cast <bool> (N != nullptr && AA != nullptr
&& LI != nullptr && DT != nullptr &&
CurLoop != nullptr && MSSAU != nullptr && SafetyInfo
!= nullptr && "Unexpected input to hoistRegion.") ? void
(0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 863, __extension__ __PRETTY_FUNCTION__
))
863 "Unexpected input to hoistRegion.")(static_cast <bool> (N != nullptr && AA != nullptr
&& LI != nullptr && DT != nullptr &&
CurLoop != nullptr && MSSAU != nullptr && SafetyInfo
!= nullptr && "Unexpected input to hoistRegion.") ? void
(0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && MSSAU != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 863, __extension__ __PRETTY_FUNCTION__
))
;
864
865 ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
866
867 // Keep track of instructions that have been hoisted, as they may need to be
868 // re-hoisted if they end up not dominating all of their uses.
869 SmallVector<Instruction *, 16> HoistedInstructions;
870
871 // For PHI hoisting to work we need to hoist blocks before their successors.
872 // We can do this by iterating through the blocks in the loop in reverse
873 // post-order.
874 LoopBlocksRPO Worklist(CurLoop);
875 Worklist.perform(LI);
876 bool Changed = false;
877 for (BasicBlock *BB : Worklist) {
878 // Only need to process the contents of this block if it is not part of a
879 // subloop (which would already have been processed).
880 if (!LoopNestMode && inSubLoop(BB, CurLoop, LI))
881 continue;
882
883 for (Instruction &I : llvm::make_early_inc_range(*BB)) {
884 // Try constant folding this instruction. If all the operands are
885 // constants, it is technically hoistable, but it would be better to
886 // just fold it.
887 if (Constant *C = ConstantFoldInstruction(
888 &I, I.getModule()->getDataLayout(), TLI)) {
889 LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM folding inst: " << I <<
" --> " << *C << '\n'; } } while (false)
890 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM folding inst: " << I <<
" --> " << *C << '\n'; } } while (false)
;
891 // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
892 I.replaceAllUsesWith(C);
893 if (isInstructionTriviallyDead(&I, TLI))
894 eraseInstruction(I, *SafetyInfo, MSSAU);
895 Changed = true;
896 continue;
897 }
898
899 // Try hoisting the instruction out to the preheader. We can only do
900 // this if all of the operands of the instruction are loop invariant and
901 // if it is safe to hoist the instruction. We also check block frequency
902 // to make sure instruction only gets hoisted into colder blocks.
903 // TODO: It may be safe to hoist if we are hoisting to a conditional block
904 // and we have accurately duplicated the control flow from the loop header
905 // to that block.
906 if (CurLoop->hasLoopInvariantOperands(&I) &&
907 canSinkOrHoistInst(I, AA, DT, CurLoop, /*CurAST*/ nullptr, MSSAU,
908 true, &Flags, ORE) &&
909 isSafeToExecuteUnconditionally(
910 I, DT, TLI, CurLoop, SafetyInfo, ORE,
911 CurLoop->getLoopPreheader()->getTerminator(), AllowSpeculation)) {
912 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
913 MSSAU, SE, ORE);
914 HoistedInstructions.push_back(&I);
915 Changed = true;
916 continue;
917 }
918
919 // Attempt to remove floating point division out of the loop by
920 // converting it to a reciprocal multiplication.
921 if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() &&
922 CurLoop->isLoopInvariant(I.getOperand(1))) {
923 auto Divisor = I.getOperand(1);
924 auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
925 auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
926 ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
927 SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
928 ReciprocalDivisor->insertBefore(&I);
929
930 auto Product =
931 BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
932 Product->setFastMathFlags(I.getFastMathFlags());
933 SafetyInfo->insertInstructionTo(Product, I.getParent());
934 Product->insertAfter(&I);
935 I.replaceAllUsesWith(Product);
936 eraseInstruction(I, *SafetyInfo, MSSAU);
937
938 hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
939 SafetyInfo, MSSAU, SE, ORE);
940 HoistedInstructions.push_back(ReciprocalDivisor);
941 Changed = true;
942 continue;
943 }
944
945 auto IsInvariantStart = [&](Instruction &I) {
946 using namespace PatternMatch;
947 return I.use_empty() &&
948 match(&I, m_Intrinsic<Intrinsic::invariant_start>());
949 };
950 auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
951 return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
952 SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
953 };
954 if ((IsInvariantStart(I) || isGuard(&I)) &&
955 CurLoop->hasLoopInvariantOperands(&I) &&
956 MustExecuteWithoutWritesBefore(I)) {
957 hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
958 MSSAU, SE, ORE);
959 HoistedInstructions.push_back(&I);
960 Changed = true;
961 continue;
962 }
963
964 if (PHINode *PN = dyn_cast<PHINode>(&I)) {
965 if (CFH.canHoistPHI(PN)) {
966 // Redirect incoming blocks first to ensure that we create hoisted
967 // versions of those blocks before we hoist the phi.
968 for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
969 PN->setIncomingBlock(
970 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
971 hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
972 MSSAU, SE, ORE);
973 assert(DT->dominates(PN, BB) && "Conditional PHIs not expected")(static_cast <bool> (DT->dominates(PN, BB) &&
"Conditional PHIs not expected") ? void (0) : __assert_fail (
"DT->dominates(PN, BB) && \"Conditional PHIs not expected\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 973, __extension__ __PRETTY_FUNCTION__
))
;
974 Changed = true;
975 continue;
976 }
977 }
978
979 // Remember possibly hoistable branches so we can actually hoist them
980 // later if needed.
981 if (BranchInst *BI = dyn_cast<BranchInst>(&I))
982 CFH.registerPossiblyHoistableBranch(BI);
983 }
984 }
985
986 // If we hoisted instructions to a conditional block they may not dominate
987 // their uses that weren't hoisted (such as phis where some operands are not
988 // loop invariant). If so make them unconditional by moving them to their
989 // immediate dominator. We iterate through the instructions in reverse order
990 // which ensures that when we rehoist an instruction we rehoist its operands,
991 // and also keep track of where in the block we are rehoisting to to make sure
992 // that we rehoist instructions before the instructions that use them.
993 Instruction *HoistPoint = nullptr;
994 if (ControlFlowHoisting) {
995 for (Instruction *I : reverse(HoistedInstructions)) {
996 if (!llvm::all_of(I->uses(),
997 [&](Use &U) { return DT->dominates(I, U); })) {
998 BasicBlock *Dominator =
999 DT->getNode(I->getParent())->getIDom()->getBlock();
1000 if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
1001 if (HoistPoint)
1002 assert(DT->dominates(Dominator, HoistPoint->getParent()) &&(static_cast <bool> (DT->dominates(Dominator, HoistPoint
->getParent()) && "New hoist point expected to dominate old hoist point"
) ? void (0) : __assert_fail ("DT->dominates(Dominator, HoistPoint->getParent()) && \"New hoist point expected to dominate old hoist point\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1003, __extension__ __PRETTY_FUNCTION__
))
1003 "New hoist point expected to dominate old hoist point")(static_cast <bool> (DT->dominates(Dominator, HoistPoint
->getParent()) && "New hoist point expected to dominate old hoist point"
) ? void (0) : __assert_fail ("DT->dominates(Dominator, HoistPoint->getParent()) && \"New hoist point expected to dominate old hoist point\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1003, __extension__ __PRETTY_FUNCTION__
))
;
1004 HoistPoint = Dominator->getTerminator();
1005 }
1006 LLVM_DEBUG(dbgs() << "LICM rehoisting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint
->getParent()->getNameOrAsOperand() << ": " <<
*I << "\n"; } } while (false)
1007 << HoistPoint->getParent()->getNameOrAsOperand()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint
->getParent()->getNameOrAsOperand() << ": " <<
*I << "\n"; } } while (false)
1008 << ": " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint
->getParent()->getNameOrAsOperand() << ": " <<
*I << "\n"; } } while (false)
;
1009 moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
1010 HoistPoint = I;
1011 Changed = true;
1012 }
1013 }
1014 }
1015 if (VerifyMemorySSA)
1016 MSSAU->getMemorySSA()->verifyMemorySSA();
1017
1018 // Now that we've finished hoisting make sure that LI and DT are still
1019 // valid.
1020#ifdef EXPENSIVE_CHECKS
1021 if (Changed) {
1022 assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&(static_cast <bool> (DT->verify(DominatorTree::VerificationLevel
::Fast) && "Dominator tree verification failed") ? void
(0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast) && \"Dominator tree verification failed\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1023, __extension__ __PRETTY_FUNCTION__
))
1023 "Dominator tree verification failed")(static_cast <bool> (DT->verify(DominatorTree::VerificationLevel
::Fast) && "Dominator tree verification failed") ? void
(0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast) && \"Dominator tree verification failed\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1023, __extension__ __PRETTY_FUNCTION__
))
;
1024 LI->verify(*DT);
1025 }
1026#endif
1027
1028 return Changed;
1029}
1030
1031// Return true if LI is invariant within scope of the loop. LI is invariant if
1032// CurLoop is dominated by an invariant.start representing the same memory
1033// location and size as the memory location LI loads from, and also the
1034// invariant.start has no uses.
1035static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
1036 Loop *CurLoop) {
1037 Value *Addr = LI->getOperand(0);
1038 const DataLayout &DL = LI->getModule()->getDataLayout();
1039 const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
1040
1041 // It is not currently possible for clang to generate an invariant.start
1042 // intrinsic with scalable vector types because we don't support thread local
1043 // sizeless types and we don't permit sizeless types in structs or classes.
1044 // Furthermore, even if support is added for this in future the intrinsic
1045 // itself is defined to have a size of -1 for variable sized objects. This
1046 // makes it impossible to verify if the intrinsic envelops our region of
1047 // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8>
1048 // types would have a -1 parameter, but the former is clearly double the size
1049 // of the latter.
1050 if (LocSizeInBits.isScalable())
29
Calling 'LinearPolySize::isScalable'
32
Returning from 'LinearPolySize::isScalable'
33
Taking true branch
1051 return false;
34
Returning zero, which participates in a condition later
1052
1053 // if the type is i8 addrspace(x)*, we know this is the type of
1054 // llvm.invariant.start operand
1055 auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
1056 LI->getPointerAddressSpace());
1057 unsigned BitcastsVisited = 0;
1058 // Look through bitcasts until we reach the i8* type (this is invariant.start
1059 // operand type).
1060 while (Addr->getType() != PtrInt8Ty) {
1061 auto *BC = dyn_cast<BitCastInst>(Addr);
1062 // Avoid traversing high number of bitcast uses.
1063 if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1064 return false;
1065 Addr = BC->getOperand(0);
1066 }
1067 // If we've ended up at a global/constant, bail. We shouldn't be looking at
1068 // uselists for non-local Values in a loop pass.
1069 if (isa<Constant>(Addr))
1070 return false;
1071
1072 unsigned UsesVisited = 0;
1073 // Traverse all uses of the load operand value, to see if invariant.start is
1074 // one of the uses, and whether it dominates the load instruction.
1075 for (auto *U : Addr->users()) {
1076 // Avoid traversing for Load operand with high number of users.
1077 if (++UsesVisited > MaxNumUsesTraversed)
1078 return false;
1079 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1080 // If there are escaping uses of invariant.start instruction, the load maybe
1081 // non-invariant.
1082 if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1083 !II->use_empty())
1084 continue;
1085 ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0));
1086 // The intrinsic supports having a -1 argument for variable sized objects
1087 // so we should check for that here.
1088 if (InvariantSize->isNegative())
1089 continue;
1090 uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8;
1091 // Confirm the invariant.start location size contains the load operand size
1092 // in bits. Also, the invariant.start should dominate the load, and we
1093 // should not hoist the load out of a loop that contains this dominating
1094 // invariant.start.
1095 if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits &&
1096 DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1097 return true;
1098 }
1099
1100 return false;
1101}
1102
1103namespace {
1104/// Return true if-and-only-if we know how to (mechanically) both hoist and
1105/// sink a given instruction out of a loop. Does not address legality
1106/// concerns such as aliasing or speculation safety.
1107bool isHoistableAndSinkableInst(Instruction &I) {
1108 // Only these instructions are hoistable/sinkable.
1109 return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
5
Assuming 'I' is a 'LoadInst'
6
Returning the value 1, which participates in a condition later
1110 isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) ||
1111 isa<BinaryOperator>(I) || isa<SelectInst>(I) ||
1112 isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1113 isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1114 isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1115 isa<InsertValueInst>(I) || isa<FreezeInst>(I));
1116}
1117/// Return true if all of the alias sets within this AST are known not to
1118/// contain a Mod, or if MSSA knows there are no MemoryDefs in the loop.
1119bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1120 const Loop *L) {
1121 if (CurAST) {
1122 for (AliasSet &AS : *CurAST) {
1123 if (!AS.isForwardingAliasSet() && AS.isMod()) {
1124 return false;
1125 }
1126 }
1127 return true;
1128 } else { /*MSSAU*/
1129 for (auto *BB : L->getBlocks())
1130 if (MSSAU->getMemorySSA()->getBlockDefs(BB))
1131 return false;
1132 return true;
1133 }
1134}
1135
1136/// Return true if I is the only Instruction with a MemoryAccess in L.
1137bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1138 const MemorySSAUpdater *MSSAU) {
1139 for (auto *BB : L->getBlocks())
1140 if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1141 int NotAPhi = 0;
1142 for (const auto &Acc : *Accs) {
1143 if (isa<MemoryPhi>(&Acc))
1144 continue;
1145 const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1146 if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1147 return false;
1148 }
1149 }
1150 return true;
1151}
1152}
1153
1154bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1155 Loop *CurLoop, AliasSetTracker *CurAST,
1156 MemorySSAUpdater *MSSAU,
1157 bool TargetExecutesOncePerLoop,
1158 SinkAndHoistLICMFlags *Flags,
1159 OptimizationRemarkEmitter *ORE) {
1160 assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&(static_cast <bool> (((CurAST != nullptr) ^ (MSSAU != nullptr
)) && "Either AliasSetTracker or MemorySSA should be initialized."
) ? void (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1161, __extension__ __PRETTY_FUNCTION__
))
1
Assuming pointer value is null
2
Assuming the condition is true
3
'?' condition is true
1161 "Either AliasSetTracker or MemorySSA should be initialized.")(static_cast <bool> (((CurAST != nullptr) ^ (MSSAU != nullptr
)) && "Either AliasSetTracker or MemorySSA should be initialized."
) ? void (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1161, __extension__ __PRETTY_FUNCTION__
))
;
1162
1163 // If we don't understand the instruction, bail early.
1164 if (!isHoistableAndSinkableInst(I))
4
Calling 'isHoistableAndSinkableInst'
7
Returning from 'isHoistableAndSinkableInst'
1165 return false;
1166
1167 MemorySSA *MSSA = MSSAU
8.1
'MSSAU' is non-null
8.1
'MSSAU' is non-null
8.1
'MSSAU' is non-null
8.1
'MSSAU' is non-null
? MSSAU->getMemorySSA() : nullptr;
8
Taking false branch
9
'?' condition is true
10
'MSSA' initialized here
1168 if (MSSA)
11
Assuming 'MSSA' is null
12
Taking false branch
1169 assert(Flags != nullptr && "Flags cannot be null.")(static_cast <bool> (Flags != nullptr && "Flags cannot be null."
) ? void (0) : __assert_fail ("Flags != nullptr && \"Flags cannot be null.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1169, __extension__ __PRETTY_FUNCTION__
))
;
1170
1171 // Loads have extra constraints we have to verify before we can hoist them.
1172 if (LoadInst *LI
13.1
'LI' is non-null
13.1
'LI' is non-null
13.1
'LI' is non-null
13.1
'LI' is non-null
= dyn_cast<LoadInst>(&I)) {
13
Assuming the object is a 'LoadInst'
14
Taking true branch
1173 if (!LI->isUnordered())
15
Calling 'LoadInst::isUnordered'
19
Returning from 'LoadInst::isUnordered'
20
Taking false branch
1174 return false; // Don't sink/hoist volatile or ordered atomic loads!
1175
1176 // Loads from constant memory are always safe to move, even if they end up
1177 // in the same alias set as something that ends up being modified.
1178 if (AA->pointsToConstantMemory(LI->getOperand(0)))
21
Assuming the condition is false
22
Taking false branch
1179 return true;
1180 if (LI->hasMetadata(LLVMContext::MD_invariant_load))
23
Calling 'Instruction::hasMetadata'
26
Returning from 'Instruction::hasMetadata'
1181 return true;
1182
1183 if (LI->isAtomic() && !TargetExecutesOncePerLoop)
27
Assuming the condition is false
1184 return false; // Don't risk duplicating unordered loads
1185
1186 // This checks for an invariant.start dominating the load.
1187 if (isLoadInvariantInLoop(LI, DT, CurLoop))
28
Calling 'isLoadInvariantInLoop'
35
Returning from 'isLoadInvariantInLoop'
36
Taking false branch
1188 return true;
1189
1190 bool Invalidated;
1191 if (CurAST
36.1
'CurAST' is null
36.1
'CurAST' is null
36.1
'CurAST' is null
36.1
'CurAST' is null
)
37
Taking false branch
1192 Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1193 CurLoop, AA);
1194 else
1195 Invalidated = pointerInvalidatedByLoopWithMSSA(
1196 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, I, *Flags);
38
Called C++ object pointer is null
1197 // Check loop-invariant address because this may also be a sinkable load
1198 // whose address is not necessarily loop-invariant.
1199 if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1200 ORE->emit([&]() {
1201 return OptimizationRemarkMissed(
1202 DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressInvalidated", LI)
1203 << "failed to move load with loop-invariant address "
1204 "because the loop may invalidate its value";
1205 });
1206
1207 return !Invalidated;
1208 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1209 // Don't sink or hoist dbg info; it's legal, but not useful.
1210 if (isa<DbgInfoIntrinsic>(I))
1211 return false;
1212
1213 // Don't sink calls which can throw.
1214 if (CI->mayThrow())
1215 return false;
1216
1217 // Convergent attribute has been used on operations that involve
1218 // inter-thread communication which results are implicitly affected by the
1219 // enclosing control flows. It is not safe to hoist or sink such operations
1220 // across control flow.
1221 if (CI->isConvergent())
1222 return false;
1223
1224 using namespace PatternMatch;
1225 if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1226 // Assumes don't actually alias anything or throw
1227 return true;
1228
1229 if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
1230 // Widenable conditions don't actually alias anything or throw
1231 return true;
1232
1233 // Handle simple cases by querying alias analysis.
1234 FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1235 if (Behavior == FMRB_DoesNotAccessMemory)
1236 return true;
1237 if (AAResults::onlyReadsMemory(Behavior)) {
1238 // A readonly argmemonly function only reads from memory pointed to by
1239 // it's arguments with arbitrary offsets. If we can prove there are no
1240 // writes to this memory in the loop, we can hoist or sink.
1241 if (AAResults::onlyAccessesArgPointees(Behavior)) {
1242 // TODO: expand to writeable arguments
1243 for (Value *Op : CI->args())
1244 if (Op->getType()->isPointerTy()) {
1245 bool Invalidated;
1246 if (CurAST)
1247 Invalidated = pointerInvalidatedByLoop(
1248 MemoryLocation::getBeforeOrAfter(Op), CurAST, CurLoop, AA);
1249 else
1250 Invalidated = pointerInvalidatedByLoopWithMSSA(
1251 MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, I,
1252 *Flags);
1253 if (Invalidated)
1254 return false;
1255 }
1256 return true;
1257 }
1258
1259 // If this call only reads from memory and there are no writes to memory
1260 // in the loop, we can hoist or sink the call as appropriate.
1261 if (isReadOnly(CurAST, MSSAU, CurLoop))
1262 return true;
1263 }
1264
1265 // FIXME: This should use mod/ref information to see if we can hoist or
1266 // sink the call.
1267
1268 return false;
1269 } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1270 // Fences alias (most) everything to provide ordering. For the moment,
1271 // just give up if there are any other memory operations in the loop.
1272 if (CurAST) {
1273 auto Begin = CurAST->begin();
1274 assert(Begin != CurAST->end() && "must contain FI")(static_cast <bool> (Begin != CurAST->end() &&
"must contain FI") ? void (0) : __assert_fail ("Begin != CurAST->end() && \"must contain FI\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1274, __extension__ __PRETTY_FUNCTION__
))
;
1275 if (std::next(Begin) != CurAST->end())
1276 // constant memory for instance, TODO: handle better
1277 return false;
1278 auto *UniqueI = Begin->getUniqueInstruction();
1279 if (!UniqueI)
1280 // other memory op, give up
1281 return false;
1282 (void)FI; // suppress unused variable warning
1283 assert(UniqueI == FI && "AS must contain FI")(static_cast <bool> (UniqueI == FI && "AS must contain FI"
) ? void (0) : __assert_fail ("UniqueI == FI && \"AS must contain FI\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1283, __extension__ __PRETTY_FUNCTION__
))
;
1284 return true;
1285 } else // MSSAU
1286 return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1287 } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1288 if (!SI->isUnordered())
1289 return false; // Don't sink/hoist volatile or ordered atomic store!
1290
1291 // We can only hoist a store that we can prove writes a value which is not
1292 // read or overwritten within the loop. For those cases, we fallback to
1293 // load store promotion instead. TODO: We can extend this to cases where
1294 // there is exactly one write to the location and that write dominates an
1295 // arbitrary number of reads in the loop.
1296 if (CurAST) {
1297 auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1298
1299 if (AS.isRef() || !AS.isMustAlias())
1300 // Quick exit test, handled by the full path below as well.
1301 return false;
1302 auto *UniqueI = AS.getUniqueInstruction();
1303 if (!UniqueI)
1304 // other memory op, give up
1305 return false;
1306 assert(UniqueI == SI && "AS must contain SI")(static_cast <bool> (UniqueI == SI && "AS must contain SI"
) ? void (0) : __assert_fail ("UniqueI == SI && \"AS must contain SI\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1306, __extension__ __PRETTY_FUNCTION__
))
;
1307 return true;
1308 } else { // MSSAU
1309 if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1310 return true;
1311 // If there are more accesses than the Promotion cap or no "quota" to
1312 // check clobber, then give up as we're not walking a list that long.
1313 if (Flags->tooManyMemoryAccesses() || Flags->tooManyClobberingCalls())
1314 return false;
1315 // If there are interfering Uses (i.e. their defining access is in the
1316 // loop), or ordered loads (stored as Defs!), don't move this store.
1317 // Could do better here, but this is conservatively correct.
1318 // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1319 // moving accesses. Can also extend to dominating uses.
1320 auto *SIMD = MSSA->getMemoryAccess(SI);
1321 for (auto *BB : CurLoop->getBlocks())
1322 if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1323 for (const auto &MA : *Accesses)
1324 if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1325 auto *MD = MU->getDefiningAccess();
1326 if (!MSSA->isLiveOnEntryDef(MD) &&
1327 CurLoop->contains(MD->getBlock()))
1328 return false;
1329 // Disable hoisting past potentially interfering loads. Optimized
1330 // Uses may point to an access outside the loop, as getClobbering
1331 // checks the previous iteration when walking the backedge.
1332 // FIXME: More precise: no Uses that alias SI.
1333 if (!Flags->getIsSink() && !MSSA->dominates(SIMD, MU))
1334 return false;
1335 } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1336 if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1337 (void)LI; // Silence warning.
1338 assert(!LI->isUnordered() && "Expected unordered load")(static_cast <bool> (!LI->isUnordered() && "Expected unordered load"
) ? void (0) : __assert_fail ("!LI->isUnordered() && \"Expected unordered load\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1338, __extension__ __PRETTY_FUNCTION__
))
;
1339 return false;
1340 }
1341 // Any call, while it may not be clobbering SI, it may be a use.
1342 if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1343 // Check if the call may read from the memory location written
1344 // to by SI. Check CI's attributes and arguments; the number of
1345 // such checks performed is limited above by NoOfMemAccTooLarge.
1346 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1347 if (isModOrRefSet(MRI))
1348 return false;
1349 }
1350 }
1351 }
1352 auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1353 Flags->incrementClobberingCalls();
1354 // If there are no clobbering Defs in the loop, store is safe to hoist.
1355 return MSSA->isLiveOnEntryDef(Source) ||
1356 !CurLoop->contains(Source->getBlock());
1357 }
1358 }
1359
1360 assert(!I.mayReadOrWriteMemory() && "unhandled aliasing")(static_cast <bool> (!I.mayReadOrWriteMemory() &&
"unhandled aliasing") ? void (0) : __assert_fail ("!I.mayReadOrWriteMemory() && \"unhandled aliasing\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1360, __extension__ __PRETTY_FUNCTION__
))
;
1361
1362 // We've established mechanical ability and aliasing, it's up to the caller
1363 // to check fault safety
1364 return true;
1365}
1366
1367/// Returns true if a PHINode is a trivially replaceable with an
1368/// Instruction.
1369/// This is true when all incoming values are that instruction.
1370/// This pattern occurs most often with LCSSA PHI nodes.
1371///
1372static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1373 for (const Value *IncValue : PN.incoming_values())
1374 if (IncValue != &I)
1375 return false;
1376
1377 return true;
1378}
1379
1380/// Return true if the instruction is free in the loop.
1381static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1382 const TargetTransformInfo *TTI) {
1383
1384 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1385 if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) !=
1386 TargetTransformInfo::TCC_Free)
1387 return false;
1388 // For a GEP, we cannot simply use getUserCost because currently it
1389 // optimistically assumes that a GEP will fold into addressing mode
1390 // regardless of its users.
1391 const BasicBlock *BB = GEP->getParent();
1392 for (const User *U : GEP->users()) {
1393 const Instruction *UI = cast<Instruction>(U);
1394 if (CurLoop->contains(UI) &&
1395 (BB != UI->getParent() ||
1396 (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1397 return false;
1398 }
1399 return true;
1400 } else
1401 return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) ==
1402 TargetTransformInfo::TCC_Free;
1403}
1404
1405/// Return true if the only users of this instruction are outside of
1406/// the loop. If this is true, we can sink the instruction to the exit
1407/// blocks of the loop.
1408///
1409/// We also return true if the instruction could be folded away in lowering.
1410/// (e.g., a GEP can be folded into a load as an addressing mode in the loop).
1411static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1412 const LoopSafetyInfo *SafetyInfo,
1413 TargetTransformInfo *TTI, bool &FreeInLoop,
1414 bool LoopNestMode) {
1415 const auto &BlockColors = SafetyInfo->getBlockColors();
1416 bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1417 for (const User *U : I.users()) {
1418 const Instruction *UI = cast<Instruction>(U);
1419 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1420 const BasicBlock *BB = PN->getParent();
1421 // We cannot sink uses in catchswitches.
1422 if (isa<CatchSwitchInst>(BB->getTerminator()))
1423 return false;
1424
1425 // We need to sink a callsite to a unique funclet. Avoid sinking if the
1426 // phi use is too muddled.
1427 if (isa<CallInst>(I))
1428 if (!BlockColors.empty() &&
1429 BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1430 return false;
1431
1432 if (LoopNestMode) {
1433 while (isa<PHINode>(UI) && UI->hasOneUser() &&
1434 UI->getNumOperands() == 1) {
1435 if (!CurLoop->contains(UI))
1436 break;
1437 UI = cast<Instruction>(UI->user_back());
1438 }
1439 }
1440 }
1441
1442 if (CurLoop->contains(UI)) {
1443 if (IsFree) {
1444 FreeInLoop = true;
1445 continue;
1446 }
1447 return false;
1448 }
1449 }
1450 return true;
1451}
1452
1453static Instruction *cloneInstructionInExitBlock(
1454 Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1455 const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1456 Instruction *New;
1457 if (auto *CI = dyn_cast<CallInst>(&I)) {
1458 const auto &BlockColors = SafetyInfo->getBlockColors();
1459
1460 // Sinking call-sites need to be handled differently from other
1461 // instructions. The cloned call-site needs a funclet bundle operand
1462 // appropriate for its location in the CFG.
1463 SmallVector<OperandBundleDef, 1> OpBundles;
1464 for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1465 BundleIdx != BundleEnd; ++BundleIdx) {
1466 OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1467 if (Bundle.getTagID() == LLVMContext::OB_funclet)
1468 continue;
1469
1470 OpBundles.emplace_back(Bundle);
1471 }
1472
1473 if (!BlockColors.empty()) {
1474 const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1475 assert(CV.size() == 1 && "non-unique color for exit block!")(static_cast <bool> (CV.size() == 1 && "non-unique color for exit block!"
) ? void (0) : __assert_fail ("CV.size() == 1 && \"non-unique color for exit block!\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1475, __extension__ __PRETTY_FUNCTION__
))
;
1476 BasicBlock *BBColor = CV.front();
1477 Instruction *EHPad = BBColor->getFirstNonPHI();
1478 if (EHPad->isEHPad())
1479 OpBundles.emplace_back("funclet", EHPad);
1480 }
1481
1482 New = CallInst::Create(CI, OpBundles);
1483 } else {
1484 New = I.clone();
1485 }
1486
1487 ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1488 if (!I.getName().empty())
1489 New->setName(I.getName() + ".le");
1490
1491 if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1492 // Create a new MemoryAccess and let MemorySSA set its defining access.
1493 MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1494 New, nullptr, New->getParent(), MemorySSA::Beginning);
1495 if (NewMemAcc) {
1496 if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1497 MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1498 else {
1499 auto *MemUse = cast<MemoryUse>(NewMemAcc);
1500 MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1501 }
1502 }
1503 }
1504
1505 // Build LCSSA PHI nodes for any in-loop operands (if legal). Note that
1506 // this is particularly cheap because we can rip off the PHI node that we're
1507 // replacing for the number and blocks of the predecessors.
1508 // OPT: If this shows up in a profile, we can instead finish sinking all
1509 // invariant instructions, and then walk their operands to re-establish
1510 // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1511 // sinking bottom-up.
1512 for (Use &Op : New->operands())
1513 if (LI->wouldBeOutOfLoopUseRequiringLCSSA(Op.get(), PN.getParent())) {
1514 auto *OInst = cast<Instruction>(Op.get());
1515 PHINode *OpPN =
1516 PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1517 OInst->getName() + ".lcssa", &ExitBlock.front());
1518 for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1519 OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1520 Op = OpPN;
1521 }
1522 return New;
1523}
1524
1525static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1526 MemorySSAUpdater *MSSAU) {
1527 if (MSSAU)
1528 MSSAU->removeMemoryAccess(&I);
1529 SafetyInfo.removeInstruction(&I);
1530 I.eraseFromParent();
1531}
1532
1533static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1534 ICFLoopSafetyInfo &SafetyInfo,
1535 MemorySSAUpdater *MSSAU,
1536 ScalarEvolution *SE) {
1537 SafetyInfo.removeInstruction(&I);
1538 SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1539 I.moveBefore(&Dest);
1540 if (MSSAU)
1541 if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1542 MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1543 MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1544 MemorySSA::BeforeTerminator);
1545 if (SE)
1546 SE->forgetValue(&I);
1547}
1548
1549static Instruction *sinkThroughTriviallyReplaceablePHI(
1550 PHINode *TPN, Instruction *I, LoopInfo *LI,
1551 SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1552 const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1553 MemorySSAUpdater *MSSAU) {
1554 assert(isTriviallyReplaceablePHI(*TPN, *I) &&(static_cast <bool> (isTriviallyReplaceablePHI(*TPN, *I
) && "Expect only trivially replaceable PHI") ? void (
0) : __assert_fail ("isTriviallyReplaceablePHI(*TPN, *I) && \"Expect only trivially replaceable PHI\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1555, __extension__ __PRETTY_FUNCTION__
))
1555 "Expect only trivially replaceable PHI")(static_cast <bool> (isTriviallyReplaceablePHI(*TPN, *I
) && "Expect only trivially replaceable PHI") ? void (
0) : __assert_fail ("isTriviallyReplaceablePHI(*TPN, *I) && \"Expect only trivially replaceable PHI\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1555, __extension__ __PRETTY_FUNCTION__
))
;
1556 BasicBlock *ExitBlock = TPN->getParent();
1557 Instruction *New;
1558 auto It = SunkCopies.find(ExitBlock);
1559 if (It != SunkCopies.end())
1560 New = It->second;
1561 else
1562 New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock(
1563 *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1564 return New;
1565}
1566
1567static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1568 BasicBlock *BB = PN->getParent();
1569 if (!BB->canSplitPredecessors())
1570 return false;
1571 // It's not impossible to split EHPad blocks, but if BlockColors already exist
1572 // it require updating BlockColors for all offspring blocks accordingly. By
1573 // skipping such corner case, we can make updating BlockColors after splitting
1574 // predecessor fairly simple.
1575 if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1576 return false;
1577 for (BasicBlock *BBPred : predecessors(BB)) {
1578 if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1579 isa<CallBrInst>(BBPred->getTerminator()))
1580 return false;
1581 }
1582 return true;
1583}
1584
1585static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1586 LoopInfo *LI, const Loop *CurLoop,
1587 LoopSafetyInfo *SafetyInfo,
1588 MemorySSAUpdater *MSSAU) {
1589#ifndef NDEBUG
1590 SmallVector<BasicBlock *, 32> ExitBlocks;
1591 CurLoop->getUniqueExitBlocks(ExitBlocks);
1592 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1593 ExitBlocks.end());
1594#endif
1595 BasicBlock *ExitBB = PN->getParent();
1596 assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.")(static_cast <bool> (ExitBlockSet.count(ExitBB) &&
"Expect the PHI is in an exit block.") ? void (0) : __assert_fail
("ExitBlockSet.count(ExitBB) && \"Expect the PHI is in an exit block.\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1596, __extension__ __PRETTY_FUNCTION__
))
;
1597
1598 // Split predecessors of the loop exit to make instructions in the loop are
1599 // exposed to exit blocks through trivially replaceable PHIs while keeping the
1600 // loop in the canonical form where each predecessor of each exit block should
1601 // be contained within the loop. For example, this will convert the loop below
1602 // from
1603 //
1604 // LB1:
1605 // %v1 =
1606 // br %LE, %LB2
1607 // LB2:
1608 // %v2 =
1609 // br %LE, %LB1
1610 // LE:
1611 // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1612 //
1613 // to
1614 //
1615 // LB1:
1616 // %v1 =
1617 // br %LE.split, %LB2
1618 // LB2:
1619 // %v2 =
1620 // br %LE.split2, %LB1
1621 // LE.split:
1622 // %p1 = phi [%v1, %LB1] <-- trivially replaceable
1623 // br %LE
1624 // LE.split2:
1625 // %p2 = phi [%v2, %LB2] <-- trivially replaceable
1626 // br %LE
1627 // LE:
1628 // %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1629 //
1630 const auto &BlockColors = SafetyInfo->getBlockColors();
1631 SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1632 while (!PredBBs.empty()) {
1633 BasicBlock *PredBB = *PredBBs.begin();
1634 assert(CurLoop->contains(PredBB) &&(static_cast <bool> (CurLoop->contains(PredBB) &&
"Expect all predecessors are in the loop") ? void (0) : __assert_fail
("CurLoop->contains(PredBB) && \"Expect all predecessors are in the loop\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1635, __extension__ __PRETTY_FUNCTION__
))
1635 "Expect all predecessors are in the loop")(static_cast <bool> (CurLoop->contains(PredBB) &&
"Expect all predecessors are in the loop") ? void (0) : __assert_fail
("CurLoop->contains(PredBB) && \"Expect all predecessors are in the loop\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1635, __extension__ __PRETTY_FUNCTION__
))
;
1636 if (PN->getBasicBlockIndex(PredBB) >= 0) {
1637 BasicBlock *NewPred = SplitBlockPredecessors(
1638 ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1639 // Since we do not allow splitting EH-block with BlockColors in
1640 // canSplitPredecessors(), we can simply assign predecessor's color to
1641 // the new block.
1642 if (!BlockColors.empty())
1643 // Grab a reference to the ColorVector to be inserted before getting the
1644 // reference to the vector we are copying because inserting the new
1645 // element in BlockColors might cause the map to be reallocated.
1646 SafetyInfo->copyColors(NewPred, PredBB);
1647 }
1648 PredBBs.remove(PredBB);
1649 }
1650}
1651
1652/// When an instruction is found to only be used outside of the loop, this
1653/// function moves it to the exit blocks and patches up SSA form as needed.
1654/// This method is guaranteed to remove the original instruction from its
1655/// position, and may either delete it or move it to outside of the loop.
1656///
1657static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1658 BlockFrequencyInfo *BFI, const Loop *CurLoop,
1659 ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU,
1660 OptimizationRemarkEmitter *ORE) {
1661 bool Changed = false;
1662 LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM sinking instruction: " <<
I << "\n"; } } while (false)
;
1663
1664 // Iterate over users to be ready for actual sinking. Replace users via
1665 // unreachable blocks with undef and make all user PHIs trivially replaceable.
1666 SmallPtrSet<Instruction *, 8> VisitedUsers;
1667 for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1668 auto *User = cast<Instruction>(*UI);
1669 Use &U = UI.getUse();
1670 ++UI;
1671
1672 if (VisitedUsers.count(User) || CurLoop->contains(User))
1673 continue;
1674
1675 if (!DT->isReachableFromEntry(User->getParent())) {
1676 U = UndefValue::get(I.getType());
1677 Changed = true;
1678 continue;
1679 }
1680
1681 // The user must be a PHI node.
1682 PHINode *PN = cast<PHINode>(User);
1683
1684 // Surprisingly, instructions can be used outside of loops without any
1685 // exits. This can only happen in PHI nodes if the incoming block is
1686 // unreachable.
1687 BasicBlock *BB = PN->getIncomingBlock(U);
1688 if (!DT->isReachableFromEntry(BB)) {
1689 U = UndefValue::get(I.getType());
1690 Changed = true;
1691 continue;
1692 }
1693
1694 VisitedUsers.insert(PN);
1695 if (isTriviallyReplaceablePHI(*PN, I))
1696 continue;
1697
1698 if (!canSplitPredecessors(PN, SafetyInfo))
1699 return Changed;
1700
1701 // Split predecessors of the PHI so that we can make users trivially
1702 // replaceable.
1703 splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1704
1705 // Should rebuild the iterators, as they may be invalidated by
1706 // splitPredecessorsOfLoopExit().
1707 UI = I.user_begin();
1708 UE = I.user_end();
1709 }
1710
1711 if (VisitedUsers.empty())
1712 return Changed;
1713
1714 ORE->emit([&]() {
1715 return OptimizationRemark(DEBUG_TYPE"licm", "InstSunk", &I)
1716 << "sinking " << ore::NV("Inst", &I);
1717 });
1718 if (isa<LoadInst>(I))
1719 ++NumMovedLoads;
1720 else if (isa<CallInst>(I))
1721 ++NumMovedCalls;
1722 ++NumSunk;
1723
1724#ifndef NDEBUG
1725 SmallVector<BasicBlock *, 32> ExitBlocks;
1726 CurLoop->getUniqueExitBlocks(ExitBlocks);
1727 SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1728 ExitBlocks.end());
1729#endif
1730
1731 // Clones of this instruction. Don't create more than one per exit block!
1732 SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1733
1734 // If this instruction is only used outside of the loop, then all users are
1735 // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1736 // the instruction.
1737 // First check if I is worth sinking for all uses. Sink only when it is worth
1738 // across all uses.
1739 SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1740 for (auto *UI : Users) {
1741 auto *User = cast<Instruction>(UI);
1742
1743 if (CurLoop->contains(User))
1744 continue;
1745
1746 PHINode *PN = cast<PHINode>(User);
1747 assert(ExitBlockSet.count(PN->getParent()) &&(static_cast <bool> (ExitBlockSet.count(PN->getParent
()) && "The LCSSA PHI is not in an exit block!") ? void
(0) : __assert_fail ("ExitBlockSet.count(PN->getParent()) && \"The LCSSA PHI is not in an exit block!\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1748, __extension__ __PRETTY_FUNCTION__
))
1748 "The LCSSA PHI is not in an exit block!")(static_cast <bool> (ExitBlockSet.count(PN->getParent
()) && "The LCSSA PHI is not in an exit block!") ? void
(0) : __assert_fail ("ExitBlockSet.count(PN->getParent()) && \"The LCSSA PHI is not in an exit block!\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1748, __extension__ __PRETTY_FUNCTION__
))
;
1749
1750 // The PHI must be trivially replaceable.
1751 Instruction *New = sinkThroughTriviallyReplaceablePHI(
1752 PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1753 PN->replaceAllUsesWith(New);
1754 eraseInstruction(*PN, *SafetyInfo, nullptr);
1755 Changed = true;
1756 }
1757 return Changed;
1758}
1759
1760/// When an instruction is found to only use loop invariant operands that
1761/// is safe to hoist, this instruction is called to do the dirty work.
1762///
1763static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1764 BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1765 MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1766 OptimizationRemarkEmitter *ORE) {
1767 LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getNameOrAsOperand() << ": "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM hoisting to " << Dest
->getNameOrAsOperand() << ": " << I << "\n"
; } } while (false)
1768 << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM hoisting to " << Dest
->getNameOrAsOperand() << ": " << I << "\n"
; } } while (false)
;
1769 ORE->emit([&]() {
1770 return OptimizationRemark(DEBUG_TYPE"licm", "Hoisted", &I) << "hoisting "
1771 << ore::NV("Inst", &I);
1772 });
1773
1774 // Metadata can be dependent on conditions we are hoisting above.
1775 // Conservatively strip all metadata on the instruction unless we were
1776 // guaranteed to execute I if we entered the loop, in which case the metadata
1777 // is valid in the loop preheader.
1778 // Similarly, If I is a call and it is not guaranteed to execute in the loop,
1779 // then moving to the preheader means we should strip attributes on the call
1780 // that can cause UB since we may be hoisting above conditions that allowed
1781 // inferring those attributes. They may not be valid at the preheader.
1782 if ((I.hasMetadataOtherThanDebugLoc() || isa<CallInst>(I)) &&
1783 // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1784 // time in isGuaranteedToExecute if we don't actually have anything to
1785 // drop. It is a compile time optimization, not required for correctness.
1786 !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1787 I.dropUndefImplyingAttrsAndUnknownMetadata();
1788
1789 if (isa<PHINode>(I))
1790 // Move the new node to the end of the phi list in the destination block.
1791 moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1792 else
1793 // Move the new node to the destination block, before its terminator.
1794 moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1795
1796 I.updateLocationAfterHoist();
1797
1798 if (isa<LoadInst>(I))
1799 ++NumMovedLoads;
1800 else if (isa<CallInst>(I))
1801 ++NumMovedCalls;
1802 ++NumHoisted;
1803}
1804
1805/// Only sink or hoist an instruction if it is not a trapping instruction,
1806/// or if the instruction is known not to trap when moved to the preheader.
1807/// or if it is a trapping instruction and is guaranteed to execute.
1808static bool isSafeToExecuteUnconditionally(
1809 Instruction &Inst, const DominatorTree *DT, const TargetLibraryInfo *TLI,
1810 const Loop *CurLoop, const LoopSafetyInfo *SafetyInfo,
1811 OptimizationRemarkEmitter *ORE, const Instruction *CtxI,
1812 bool AllowSpeculation) {
1813 if (AllowSpeculation && isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI))
1814 return true;
1815
1816 bool GuaranteedToExecute =
1817 SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1818
1819 if (!GuaranteedToExecute) {
1820 auto *LI = dyn_cast<LoadInst>(&Inst);
1821 if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1822 ORE->emit([&]() {
1823 return OptimizationRemarkMissed(
1824 DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressCondExecuted", LI)
1825 << "failed to hoist load with loop-invariant address "
1826 "because load is conditionally executed";
1827 });
1828 }
1829
1830 return GuaranteedToExecute;
1831}
1832
1833namespace {
1834class LoopPromoter : public LoadAndStorePromoter {
1835 Value *SomePtr; // Designated pointer to store to.
1836 const SmallSetVector<Value *, 8> &PointerMustAliases;
1837 SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1838 SmallVectorImpl<Instruction *> &LoopInsertPts;
1839 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1840 PredIteratorCache &PredCache;
1841 MemorySSAUpdater *MSSAU;
1842 LoopInfo &LI;
1843 DebugLoc DL;
1844 Align Alignment;
1845 bool UnorderedAtomic;
1846 AAMDNodes AATags;
1847 ICFLoopSafetyInfo &SafetyInfo;
1848 bool CanInsertStoresInExitBlocks;
1849
1850 // We're about to add a use of V in a loop exit block. Insert an LCSSA phi
1851 // (if legal) if doing so would add an out-of-loop use to an instruction
1852 // defined in-loop.
1853 Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1854 if (!LI.wouldBeOutOfLoopUseRequiringLCSSA(V, BB))
1855 return V;
1856
1857 Instruction *I = cast<Instruction>(V);
1858 // We need to create an LCSSA PHI node for the incoming value and
1859 // store that.
1860 PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1861 I->getName() + ".lcssa", &BB->front());
1862 for (BasicBlock *Pred : PredCache.get(BB))
1863 PN->addIncoming(I, Pred);
1864 return PN;
1865 }
1866
1867public:
1868 LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1869 const SmallSetVector<Value *, 8> &PMA,
1870 SmallVectorImpl<BasicBlock *> &LEB,
1871 SmallVectorImpl<Instruction *> &LIP,
1872 SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1873 MemorySSAUpdater *MSSAU, LoopInfo &li, DebugLoc dl,
1874 Align Alignment, bool UnorderedAtomic, const AAMDNodes &AATags,
1875 ICFLoopSafetyInfo &SafetyInfo, bool CanInsertStoresInExitBlocks)
1876 : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1877 LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1878 PredCache(PIC), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1879 Alignment(Alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1880 SafetyInfo(SafetyInfo),
1881 CanInsertStoresInExitBlocks(CanInsertStoresInExitBlocks) {}
1882
1883 bool isInstInList(Instruction *I,
1884 const SmallVectorImpl<Instruction *> &) const override {
1885 Value *Ptr;
1886 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1887 Ptr = LI->getOperand(0);
1888 else
1889 Ptr = cast<StoreInst>(I)->getPointerOperand();
1890 return PointerMustAliases.count(Ptr);
1891 }
1892
1893 void insertStoresInLoopExitBlocks() {
1894 // Insert stores after in the loop exit blocks. Each exit block gets a
1895 // store of the live-out values that feed them. Since we've already told
1896 // the SSA updater about the defs in the loop and the preheader
1897 // definition, it is all set and we can start using it.
1898 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1899 BasicBlock *ExitBlock = LoopExitBlocks[i];
1900 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1901 LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1902 Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1903 Instruction *InsertPos = LoopInsertPts[i];
1904 StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1905 if (UnorderedAtomic)
1906 NewSI->setOrdering(AtomicOrdering::Unordered);
1907 NewSI->setAlignment(Alignment);
1908 NewSI->setDebugLoc(DL);
1909 if (AATags)
1910 NewSI->setAAMetadata(AATags);
1911
1912 MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1913 MemoryAccess *NewMemAcc;
1914 if (!MSSAInsertPoint) {
1915 NewMemAcc = MSSAU->createMemoryAccessInBB(
1916 NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1917 } else {
1918 NewMemAcc =
1919 MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1920 }
1921 MSSAInsertPts[i] = NewMemAcc;
1922 MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1923 // FIXME: true for safety, false may still be correct.
1924 }
1925 }
1926
1927 void doExtraRewritesBeforeFinalDeletion() override {
1928 if (CanInsertStoresInExitBlocks)
1929 insertStoresInLoopExitBlocks();
1930 }
1931
1932 void instructionDeleted(Instruction *I) const override {
1933 SafetyInfo.removeInstruction(I);
1934 MSSAU->removeMemoryAccess(I);
1935 }
1936
1937 bool shouldDelete(Instruction *I) const override {
1938 if (isa<StoreInst>(I))
1939 return CanInsertStoresInExitBlocks;
1940 return true;
1941 }
1942};
1943
1944bool isNotCapturedBeforeOrInLoop(const Value *V, const Loop *L,
1945 DominatorTree *DT) {
1946 // We can perform the captured-before check against any instruction in the
1947 // loop header, as the loop header is reachable from any instruction inside
1948 // the loop.
1949 // TODO: ReturnCaptures=true shouldn't be necessary here.
1950 return !PointerMayBeCapturedBefore(V, /* ReturnCaptures */ true,
1951 /* StoreCaptures */ true,
1952 L->getHeader()->getTerminator(), DT);
1953}
1954
1955/// Return true if we can prove that a caller cannot inspect the object if an
1956/// unwind occurs inside the loop.
1957bool isNotVisibleOnUnwindInLoop(const Value *Object, const Loop *L,
1958 DominatorTree *DT) {
1959 bool RequiresNoCaptureBeforeUnwind;
1960 if (!isNotVisibleOnUnwind(Object, RequiresNoCaptureBeforeUnwind))
1961 return false;
1962
1963 return !RequiresNoCaptureBeforeUnwind ||
1964 isNotCapturedBeforeOrInLoop(Object, L, DT);
1965}
1966
1967} // namespace
1968
1969/// Try to promote memory values to scalars by sinking stores out of the
1970/// loop and moving loads to before the loop. We do this by looping over
1971/// the stores in the loop, looking for stores to Must pointers which are
1972/// loop invariant.
1973///
1974bool llvm::promoteLoopAccessesToScalars(
1975 const SmallSetVector<Value *, 8> &PointerMustAliases,
1976 SmallVectorImpl<BasicBlock *> &ExitBlocks,
1977 SmallVectorImpl<Instruction *> &InsertPts,
1978 SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1979 LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
1980 Loop *CurLoop, MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo,
1981 OptimizationRemarkEmitter *ORE, bool AllowSpeculation) {
1982 // Verify inputs.
1983 assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&(static_cast <bool> (LI != nullptr && DT != nullptr
&& CurLoop != nullptr && SafetyInfo != nullptr
&& "Unexpected Input to promoteLoopAccessesToScalars"
) ? void (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1985, __extension__ __PRETTY_FUNCTION__
))
1984 SafetyInfo != nullptr &&(static_cast <bool> (LI != nullptr && DT != nullptr
&& CurLoop != nullptr && SafetyInfo != nullptr
&& "Unexpected Input to promoteLoopAccessesToScalars"
) ? void (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1985, __extension__ __PRETTY_FUNCTION__
))
1985 "Unexpected Input to promoteLoopAccessesToScalars")(static_cast <bool> (LI != nullptr && DT != nullptr
&& CurLoop != nullptr && SafetyInfo != nullptr
&& "Unexpected Input to promoteLoopAccessesToScalars"
) ? void (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 1985, __extension__ __PRETTY_FUNCTION__
))
;
1986
1987 Value *SomePtr = *PointerMustAliases.begin();
1988 BasicBlock *Preheader = CurLoop->getLoopPreheader();
1989
1990 // It is not safe to promote a load/store from the loop if the load/store is
1991 // conditional. For example, turning:
1992 //
1993 // for () { if (c) *P += 1; }
1994 //
1995 // into:
1996 //
1997 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
1998 //
1999 // is not safe, because *P may only be valid to access if 'c' is true.
2000 //
2001 // The safety property divides into two parts:
2002 // p1) The memory may not be dereferenceable on entry to the loop. In this
2003 // case, we can't insert the required load in the preheader.
2004 // p2) The memory model does not allow us to insert a store along any dynamic
2005 // path which did not originally have one.
2006 //
2007 // If at least one store is guaranteed to execute, both properties are
2008 // satisfied, and promotion is legal.
2009 //
2010 // This, however, is not a necessary condition. Even if no store/load is
2011 // guaranteed to execute, we can still establish these properties.
2012 // We can establish (p1) by proving that hoisting the load into the preheader
2013 // is safe (i.e. proving dereferenceability on all paths through the loop). We
2014 // can use any access within the alias set to prove dereferenceability,
2015 // since they're all must alias.
2016 //
2017 // There are two ways establish (p2):
2018 // a) Prove the location is thread-local. In this case the memory model
2019 // requirement does not apply, and stores are safe to insert.
2020 // b) Prove a store dominates every exit block. In this case, if an exit
2021 // blocks is reached, the original dynamic path would have taken us through
2022 // the store, so inserting a store into the exit block is safe. Note that this
2023 // is different from the store being guaranteed to execute. For instance,
2024 // if an exception is thrown on the first iteration of the loop, the original
2025 // store is never executed, but the exit blocks are not executed either.
2026
2027 bool DereferenceableInPH = false;
2028 bool SafeToInsertStore = false;
2029 bool FoundLoadToPromote = false;
2030
2031 SmallVector<Instruction *, 64> LoopUses;
2032
2033 // We start with an alignment of one and try to find instructions that allow
2034 // us to prove better alignment.
2035 Align Alignment;
2036 // Keep track of which types of access we see
2037 bool SawUnorderedAtomic = false;
2038 bool SawNotAtomic = false;
2039 AAMDNodes AATags;
2040
2041 const DataLayout &MDL = Preheader->getModule()->getDataLayout();
2042
2043 bool IsKnownThreadLocalObject = false;
2044 if (SafetyInfo->anyBlockMayThrow()) {
2045 // If a loop can throw, we have to insert a store along each unwind edge.
2046 // That said, we can't actually make the unwind edge explicit. Therefore,
2047 // we have to prove that the store is dead along the unwind edge. We do
2048 // this by proving that the caller can't have a reference to the object
2049 // after return and thus can't possibly load from the object.
2050 Value *Object = getUnderlyingObject(SomePtr);
2051 if (!isNotVisibleOnUnwindInLoop(Object, CurLoop, DT))
2052 return false;
2053 // Subtlety: Alloca's aren't visible to callers, but *are* potentially
2054 // visible to other threads if captured and used during their lifetimes.
2055 IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
2056 }
2057
2058 // Check that all accesses to pointers in the aliass set use the same type.
2059 // We cannot (yet) promote a memory location that is loaded and stored in
2060 // different sizes. While we are at it, collect alignment and AA info.
2061 Type *AccessTy = nullptr;
2062 for (Value *ASIV : PointerMustAliases) {
2063 for (Use &U : ASIV->uses()) {
2064 // Ignore instructions that are outside the loop.
2065 Instruction *UI = dyn_cast<Instruction>(U.getUser());
2066 if (!UI || !CurLoop->contains(UI))
2067 continue;
2068
2069 // If there is an non-load/store instruction in the loop, we can't promote
2070 // it.
2071 if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
2072 if (!Load->isUnordered())
2073 return false;
2074
2075 SawUnorderedAtomic |= Load->isAtomic();
2076 SawNotAtomic |= !Load->isAtomic();
2077 FoundLoadToPromote = true;
2078
2079 Align InstAlignment = Load->getAlign();
2080
2081 // Note that proving a load safe to speculate requires proving
2082 // sufficient alignment at the target location. Proving it guaranteed
2083 // to execute does as well. Thus we can increase our guaranteed
2084 // alignment as well.
2085 if (!DereferenceableInPH || (InstAlignment > Alignment))
2086 if (isSafeToExecuteUnconditionally(
2087 *Load, DT, TLI, CurLoop, SafetyInfo, ORE,
2088 Preheader->getTerminator(), AllowSpeculation)) {
2089 DereferenceableInPH = true;
2090 Alignment = std::max(Alignment, InstAlignment);
2091 }
2092 } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2093 // Stores *of* the pointer are not interesting, only stores *to* the
2094 // pointer.
2095 if (U.getOperandNo() != StoreInst::getPointerOperandIndex())
2096 continue;
2097 if (!Store->isUnordered())
2098 return false;
2099
2100 SawUnorderedAtomic |= Store->isAtomic();
2101 SawNotAtomic |= !Store->isAtomic();
2102
2103 // If the store is guaranteed to execute, both properties are satisfied.
2104 // We may want to check if a store is guaranteed to execute even if we
2105 // already know that promotion is safe, since it may have higher
2106 // alignment than any other guaranteed stores, in which case we can
2107 // raise the alignment on the promoted store.
2108 Align InstAlignment = Store->getAlign();
2109
2110 if (!DereferenceableInPH || !SafeToInsertStore ||
2111 (InstAlignment > Alignment)) {
2112 if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
2113 DereferenceableInPH = true;
2114 SafeToInsertStore = true;
2115 Alignment = std::max(Alignment, InstAlignment);
2116 }
2117 }
2118
2119 // If a store dominates all exit blocks, it is safe to sink.
2120 // As explained above, if an exit block was executed, a dominating
2121 // store must have been executed at least once, so we are not
2122 // introducing stores on paths that did not have them.
2123 // Note that this only looks at explicit exit blocks. If we ever
2124 // start sinking stores into unwind edges (see above), this will break.
2125 if (!SafeToInsertStore)
2126 SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2127 return DT->dominates(Store->getParent(), Exit);
2128 });
2129
2130 // If the store is not guaranteed to execute, we may still get
2131 // deref info through it.
2132 if (!DereferenceableInPH) {
2133 DereferenceableInPH = isDereferenceableAndAlignedPointer(
2134 Store->getPointerOperand(), Store->getValueOperand()->getType(),
2135 Store->getAlign(), MDL, Preheader->getTerminator(), DT, TLI);
2136 }
2137 } else
2138 return false; // Not a load or store.
2139
2140 if (!AccessTy)
2141 AccessTy = getLoadStoreType(UI);
2142 else if (AccessTy != getLoadStoreType(UI))
2143 return false;
2144
2145 // Merge the AA tags.
2146 if (LoopUses.empty()) {
2147 // On the first load/store, just take its AA tags.
2148 AATags = UI->getAAMetadata();
2149 } else if (AATags) {
2150 AATags = AATags.merge(UI->getAAMetadata());
2151 }
2152
2153 LoopUses.push_back(UI);
2154 }
2155 }
2156
2157 // If we found both an unordered atomic instruction and a non-atomic memory
2158 // access, bail. We can't blindly promote non-atomic to atomic since we
2159 // might not be able to lower the result. We can't downgrade since that
2160 // would violate memory model. Also, align 0 is an error for atomics.
2161 if (SawUnorderedAtomic && SawNotAtomic)
2162 return false;
2163
2164 // If we're inserting an atomic load in the preheader, we must be able to
2165 // lower it. We're only guaranteed to be able to lower naturally aligned
2166 // atomics.
2167 if (SawUnorderedAtomic && Alignment < MDL.getTypeStoreSize(AccessTy))
2168 return false;
2169
2170 // If we couldn't prove we can hoist the load, bail.
2171 if (!DereferenceableInPH)
2172 return false;
2173
2174 // We know we can hoist the load, but don't have a guaranteed store.
2175 // Check whether the location is thread-local. If it is, then we can insert
2176 // stores along paths which originally didn't have them without violating the
2177 // memory model.
2178 if (!SafeToInsertStore) {
2179 if (IsKnownThreadLocalObject)
2180 SafeToInsertStore = true;
2181 else {
2182 Value *Object = getUnderlyingObject(SomePtr);
2183 SafeToInsertStore =
2184 (isNoAliasCall(Object) || isa<AllocaInst>(Object)) &&
2185 isNotCapturedBeforeOrInLoop(Object, CurLoop, DT);
2186 }
2187 }
2188
2189 // If we've still failed to prove we can sink the store, hoist the load
2190 // only, if possible.
2191 if (!SafeToInsertStore && !FoundLoadToPromote)
2192 // If we cannot hoist the load either, give up.
2193 return false;
2194
2195 // Lets do the promotion!
2196 if (SafeToInsertStore)
2197 LLVM_DEBUG(dbgs() << "LICM: Promoting load/store of the value: " << *SomePtrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM: Promoting load/store of the value: "
<< *SomePtr << '\n'; } } while (false)
2198 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM: Promoting load/store of the value: "
<< *SomePtr << '\n'; } } while (false)
;
2199 else
2200 LLVM_DEBUG(dbgs() << "LICM: Promoting load of the value: " << *SomePtrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM: Promoting load of the value: "
<< *SomePtr << '\n'; } } while (false)
2201 << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("licm")) { dbgs() << "LICM: Promoting load of the value: "
<< *SomePtr << '\n'; } } while (false)
;
2202
2203 ORE->emit([&]() {
2204 return OptimizationRemark(DEBUG_TYPE"licm", "PromoteLoopAccessesToScalar",
2205 LoopUses[0])
2206 << "Moving accesses to memory location out of the loop";
2207 });
2208 ++NumPromoted;
2209
2210 // Look at all the loop uses, and try to merge their locations.
2211 std::vector<const DILocation *> LoopUsesLocs;
2212 for (auto U : LoopUses)
2213 LoopUsesLocs.push_back(U->getDebugLoc().get());
2214 auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs));
2215
2216 // We use the SSAUpdater interface to insert phi nodes as required.
2217 SmallVector<PHINode *, 16> NewPHIs;
2218 SSAUpdater SSA(&NewPHIs);
2219 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2220 InsertPts, MSSAInsertPts, PIC, MSSAU, *LI, DL,
2221 Alignment, SawUnorderedAtomic, AATags, *SafetyInfo,
2222 SafeToInsertStore);
2223
2224 // Set up the preheader to have a definition of the value. It is the live-out
2225 // value from the preheader that uses in the loop will use.
2226 LoadInst *PreheaderLoad = new LoadInst(
2227 AccessTy, SomePtr, SomePtr->getName() + ".promoted",
2228 Preheader->getTerminator());
2229 if (SawUnorderedAtomic)
2230 PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2231 PreheaderLoad->setAlignment(Alignment);
2232 PreheaderLoad->setDebugLoc(DebugLoc());
2233 if (AATags)
2234 PreheaderLoad->setAAMetadata(AATags);
2235 SSA.AddAvailableValue(Preheader, PreheaderLoad);
2236
2237 MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2238 PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2239 MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2240 MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2241
2242 if (VerifyMemorySSA)
2243 MSSAU->getMemorySSA()->verifyMemorySSA();
2244 // Rewrite all the loads in the loop and remember all the definitions from
2245 // stores in the loop.
2246 Promoter.run(LoopUses);
2247
2248 if (VerifyMemorySSA)
2249 MSSAU->getMemorySSA()->verifyMemorySSA();
2250 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2251 if (PreheaderLoad->use_empty())
2252 eraseInstruction(*PreheaderLoad, *SafetyInfo, MSSAU);
2253
2254 return true;
2255}
2256
2257static void foreachMemoryAccess(MemorySSA *MSSA, Loop *L,
2258 function_ref<void(Instruction *)> Fn) {
2259 for (const BasicBlock *BB : L->blocks())
2260 if (const auto *Accesses = MSSA->getBlockAccesses(BB))
2261 for (const auto &Access : *Accesses)
2262 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(&Access))
2263 Fn(MUD->getMemoryInst());
2264}
2265
2266static SmallVector<SmallSetVector<Value *, 8>, 0>
2267collectPromotionCandidates(MemorySSA *MSSA, AliasAnalysis *AA, Loop *L) {
2268 AliasSetTracker AST(*AA);
2269
2270 auto IsPotentiallyPromotable = [L](const Instruction *I) {
2271 if (const auto *SI = dyn_cast<StoreInst>(I))
2272 return L->isLoopInvariant(SI->getPointerOperand());
2273 if (const auto *LI = dyn_cast<LoadInst>(I))
2274 return L->isLoopInvariant(LI->getPointerOperand());
2275 return false;
2276 };
2277
2278 // Populate AST with potentially promotable accesses.
2279 SmallPtrSet<Value *, 16> AttemptingPromotion;
2280 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2281 if (IsPotentiallyPromotable(I)) {
2282 AttemptingPromotion.insert(I);
2283 AST.add(I);
2284 }
2285 });
2286
2287 // We're only interested in must-alias sets that contain a mod.
2288 SmallVector<const AliasSet *, 8> Sets;
2289 for (AliasSet &AS : AST)
2290 if (!AS.isForwardingAliasSet() && AS.isMod() && AS.isMustAlias())
2291 Sets.push_back(&AS);
2292
2293 if (Sets.empty())
2294 return {}; // Nothing to promote...
2295
2296 // Discard any sets for which there is an aliasing non-promotable access.
2297 foreachMemoryAccess(MSSA, L, [&](Instruction *I) {
2298 if (AttemptingPromotion.contains(I))
2299 return;
2300
2301 llvm::erase_if(Sets, [&](const AliasSet *AS) {
2302 return AS->aliasesUnknownInst(I, *AA);
2303 });
2304 });
2305
2306 SmallVector<SmallSetVector<Value *, 8>, 0> Result;
2307 for (const AliasSet *Set : Sets) {
2308 SmallSetVector<Value *, 8> PointerMustAliases;
2309 for (const auto &ASI : *Set)
2310 PointerMustAliases.insert(ASI.getValue());
2311 Result.push_back(std::move(PointerMustAliases));
2312 }
2313
2314 return Result;
2315}
2316
2317static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2318 AliasSetTracker *CurAST, Loop *CurLoop,
2319 AAResults *AA) {
2320 return CurAST->getAliasSetFor(MemLoc).isMod();
2321}
2322
2323bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2324 Loop *CurLoop, Instruction &I,
2325 SinkAndHoistLICMFlags &Flags) {
2326 // For hoisting, use the walker to determine safety
2327 if (!Flags.getIsSink()) {
2328 MemoryAccess *Source;
2329 // See declaration of SetLicmMssaOptCap for usage details.
2330 if (Flags.tooManyClobberingCalls())
2331 Source = MU->getDefiningAccess();
2332 else {
2333 Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2334 Flags.incrementClobberingCalls();
2335 }
2336 return !MSSA->isLiveOnEntryDef(Source) &&
2337 CurLoop->contains(Source->getBlock());
2338 }
2339
2340 // For sinking, we'd need to check all Defs below this use. The getClobbering
2341 // call will look on the backedge of the loop, but will check aliasing with
2342 // the instructions on the previous iteration.
2343 // For example:
2344 // for (i ... )
2345 // load a[i] ( Use (LoE)
2346 // store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2347 // i++;
2348 // The load sees no clobbering inside the loop, as the backedge alias check
2349 // does phi translation, and will check aliasing against store a[i-1].
2350 // However sinking the load outside the loop, below the store is incorrect.
2351
2352 // For now, only sink if there are no Defs in the loop, and the existing ones
2353 // precede the use and are in the same block.
2354 // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2355 // needs PostDominatorTreeAnalysis.
2356 // FIXME: More precise: no Defs that alias this Use.
2357 if (Flags.tooManyMemoryAccesses())
2358 return true;
2359 for (auto *BB : CurLoop->getBlocks())
2360 if (pointerInvalidatedByBlockWithMSSA(*BB, *MSSA, *MU))
2361 return true;
2362 // When sinking, the source block may not be part of the loop so check it.
2363 if (!CurLoop->contains(&I))
2364 return pointerInvalidatedByBlockWithMSSA(*I.getParent(), *MSSA, *MU);
2365
2366 return false;
2367}
2368
2369bool pointerInvalidatedByBlockWithMSSA(BasicBlock &BB, MemorySSA &MSSA,
2370 MemoryUse &MU) {
2371 if (const auto *Accesses = MSSA.getBlockDefs(&BB))
2372 for (const auto &MA : *Accesses)
2373 if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2374 if (MU.getBlock() != MD->getBlock() || !MSSA.locallyDominates(MD, &MU))
2375 return true;
2376 return false;
2377}
2378
2379/// Little predicate that returns true if the specified basic block is in
2380/// a subloop of the current one, not the current one itself.
2381///
2382static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2383 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop")(static_cast <bool> (CurLoop->contains(BB) &&
"Only valid if BB is IN the loop") ? void (0) : __assert_fail
("CurLoop->contains(BB) && \"Only valid if BB is IN the loop\""
, "llvm/lib/Transforms/Scalar/LICM.cpp", 2383, __extension__ __PRETTY_FUNCTION__
))
;
2384 return LI->getLoopFor(BB) != CurLoop;
2385}

/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/InstrTypes.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/OperandTraits.h"
33#include "llvm/IR/Use.h"
34#include "llvm/IR/User.h"
35#include "llvm/Support/AtomicOrdering.h"
36#include "llvm/Support/ErrorHandling.h"
37#include <cassert>
38#include <cstddef>
39#include <cstdint>
40#include <iterator>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class ConstantInt;
48class DataLayout;
49class StringRef;
50class Type;
51class Value;
52
53//===----------------------------------------------------------------------===//
54// AllocaInst Class
55//===----------------------------------------------------------------------===//
56
57/// an instruction to allocate memory on the stack
58class AllocaInst : public UnaryInstruction {
59 Type *AllocatedType;
60
61 using AlignmentField = AlignmentBitfieldElementT<0>;
62 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
63 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
64 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
65 SwiftErrorField>(),
66 "Bitfields must be contiguous");
67
68protected:
69 // Note: Instruction needs to be a friend here to call cloneImpl.
70 friend class Instruction;
71
72 AllocaInst *cloneImpl() const;
73
74public:
75 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
76 const Twine &Name, Instruction *InsertBefore);
77 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
78 const Twine &Name, BasicBlock *InsertAtEnd);
79
80 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
81 Instruction *InsertBefore);
82 AllocaInst(Type *Ty, unsigned AddrSpace,
83 const Twine &Name, BasicBlock *InsertAtEnd);
84
85 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
86 const Twine &Name = "", Instruction *InsertBefore = nullptr);
87 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
88 const Twine &Name, BasicBlock *InsertAtEnd);
89
90 /// Return true if there is an allocation size parameter to the allocation
91 /// instruction that is not 1.
92 bool isArrayAllocation() const;
93
94 /// Get the number of elements allocated. For a simple allocation of a single
95 /// element, this will return a constant 1 value.
96 const Value *getArraySize() const { return getOperand(0); }
97 Value *getArraySize() { return getOperand(0); }
98
99 /// Overload to return most specific pointer type.
100 PointerType *getType() const {
101 return cast<PointerType>(Instruction::getType());
102 }
103
104 /// Return the address space for the allocation.
105 unsigned getAddressSpace() const {
106 return getType()->getAddressSpace();
107 }
108
109 /// Get allocation size in bits. Returns None if size can't be determined,
110 /// e.g. in case of a VLA.
111 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
112
113 /// Return the type that is being allocated by the instruction.
114 Type *getAllocatedType() const { return AllocatedType; }
115 /// for use only in special circumstances that need to generically
116 /// transform a whole instruction (eg: IR linking and vectorization).
117 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
118
119 /// Return the alignment of the memory that is being allocated by the
120 /// instruction.
121 Align getAlign() const {
122 return Align(1ULL << getSubclassData<AlignmentField>());
123 }
124
125 void setAlignment(Align Align) {
126 setSubclassData<AlignmentField>(Log2(Align));
127 }
128
129 // FIXME: Remove this one transition to Align is over.
130 uint64_t getAlignment() const { return getAlign().value(); }
131
132 /// Return true if this alloca is in the entry block of the function and is a
133 /// constant size. If so, the code generator will fold it into the
134 /// prolog/epilog code, so it is basically free.
135 bool isStaticAlloca() const;
136
137 /// Return true if this alloca is used as an inalloca argument to a call. Such
138 /// allocas are never considered static even if they are in the entry block.
139 bool isUsedWithInAlloca() const {
140 return getSubclassData<UsedWithInAllocaField>();
141 }
142
143 /// Specify whether this alloca is used to represent the arguments to a call.
144 void setUsedWithInAlloca(bool V) {
145 setSubclassData<UsedWithInAllocaField>(V);
146 }
147
148 /// Return true if this alloca is used as a swifterror argument to a call.
149 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
150 /// Specify whether this alloca is used to represent a swifterror.
151 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
152
153 // Methods for support type inquiry through isa, cast, and dyn_cast:
154 static bool classof(const Instruction *I) {
155 return (I->getOpcode() == Instruction::Alloca);
156 }
157 static bool classof(const Value *V) {
158 return isa<Instruction>(V) && classof(cast<Instruction>(V));
159 }
160
161private:
162 // Shadow Instruction::setInstructionSubclassData with a private forwarding
163 // method so that subclasses cannot accidentally use it.
164 template <typename Bitfield>
165 void setSubclassData(typename Bitfield::Type Value) {
166 Instruction::setSubclassData<Bitfield>(Value);
167 }
168};
169
170//===----------------------------------------------------------------------===//
171// LoadInst Class
172//===----------------------------------------------------------------------===//
173
174/// An instruction for reading from memory. This uses the SubclassData field in
175/// Value to store whether or not the load is volatile.
176class LoadInst : public UnaryInstruction {
177 using VolatileField = BoolBitfieldElementT<0>;
178 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
179 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
180 static_assert(
181 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
182 "Bitfields must be contiguous");
183
184 void AssertOK();
185
186protected:
187 // Note: Instruction needs to be a friend here to call cloneImpl.
188 friend class Instruction;
189
190 LoadInst *cloneImpl() const;
191
192public:
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
194 Instruction *InsertBefore);
195 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 Instruction *InsertBefore);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 BasicBlock *InsertAtEnd);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, Instruction *InsertBefore = nullptr);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, BasicBlock *InsertAtEnd);
204 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
205 Align Align, AtomicOrdering Order,
206 SyncScope::ID SSID = SyncScope::System,
207 Instruction *InsertBefore = nullptr);
208 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
209 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
210 BasicBlock *InsertAtEnd);
211
212 /// Return true if this is a load from a volatile memory location.
213 bool isVolatile() const { return getSubclassData<VolatileField>(); }
214
215 /// Specify whether this is a volatile load or not.
216 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
217
218 /// Return the alignment of the access that is being performed.
219 /// FIXME: Remove this function once transition to Align is over.
220 /// Use getAlign() instead.
221 uint64_t getAlignment() const { return getAlign().value(); }
222
223 /// Return the alignment of the access that is being performed.
224 Align getAlign() const {
225 return Align(1ULL << (getSubclassData<AlignmentField>()));
226 }
227
228 void setAlignment(Align Align) {
229 setSubclassData<AlignmentField>(Log2(Align));
230 }
231
232 /// Returns the ordering constraint of this load instruction.
233 AtomicOrdering getOrdering() const {
234 return getSubclassData<OrderingField>();
235 }
236 /// Sets the ordering constraint of this load instruction. May not be Release
237 /// or AcquireRelease.
238 void setOrdering(AtomicOrdering Ordering) {
239 setSubclassData<OrderingField>(Ordering);
240 }
241
242 /// Returns the synchronization scope ID of this load instruction.
243 SyncScope::ID getSyncScopeID() const {
244 return SSID;
245 }
246
247 /// Sets the synchronization scope ID of this load instruction.
248 void setSyncScopeID(SyncScope::ID SSID) {
249 this->SSID = SSID;
250 }
251
252 /// Sets the ordering constraint and the synchronization scope ID of this load
253 /// instruction.
254 void setAtomic(AtomicOrdering Ordering,
255 SyncScope::ID SSID = SyncScope::System) {
256 setOrdering(Ordering);
257 setSyncScopeID(SSID);
258 }
259
260 bool isSimple() const { return !isAtomic() && !isVolatile(); }
261
262 bool isUnordered() const {
263 return (getOrdering() == AtomicOrdering::NotAtomic ||
16
Assuming the condition is true
18
Returning the value 1, which participates in a condition later
264 getOrdering() == AtomicOrdering::Unordered) &&
265 !isVolatile();
17
Assuming the condition is true
266 }
267
268 Value *getPointerOperand() { return getOperand(0); }
269 const Value *getPointerOperand() const { return getOperand(0); }
270 static unsigned getPointerOperandIndex() { return 0U; }
271 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
272
273 /// Returns the address space of the pointer operand.
274 unsigned getPointerAddressSpace() const {
275 return getPointerOperandType()->getPointerAddressSpace();
276 }
277
278 // Methods for support type inquiry through isa, cast, and dyn_cast:
279 static bool classof(const Instruction *I) {
280 return I->getOpcode() == Instruction::Load;
281 }
282 static bool classof(const Value *V) {
283 return isa<Instruction>(V) && classof(cast<Instruction>(V));
284 }
285
286private:
287 // Shadow Instruction::setInstructionSubclassData with a private forwarding
288 // method so that subclasses cannot accidentally use it.
289 template <typename Bitfield>
290 void setSubclassData(typename Bitfield::Type Value) {
291 Instruction::setSubclassData<Bitfield>(Value);
292 }
293
294 /// The synchronization scope ID of this load instruction. Not quite enough
295 /// room in SubClassData for everything, so synchronization scope ID gets its
296 /// own field.
297 SyncScope::ID SSID;
298};
299
300//===----------------------------------------------------------------------===//
301// StoreInst Class
302//===----------------------------------------------------------------------===//
303
304/// An instruction for storing to memory.
305class StoreInst : public Instruction {
306 using VolatileField = BoolBitfieldElementT<0>;
307 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
308 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
309 static_assert(
310 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
311 "Bitfields must be contiguous");
312
313 void AssertOK();
314
315protected:
316 // Note: Instruction needs to be a friend here to call cloneImpl.
317 friend class Instruction;
318
319 StoreInst *cloneImpl() const;
320
321public:
322 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
323 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
324 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
325 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 Instruction *InsertBefore = nullptr);
328 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
329 BasicBlock *InsertAtEnd);
330 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
331 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
332 Instruction *InsertBefore = nullptr);
333 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
334 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
335
336 // allocate space for exactly two operands
337 void *operator new(size_t S) { return User::operator new(S, 2); }
338 void operator delete(void *Ptr) { User::operator delete(Ptr); }
339
340 /// Return true if this is a store to a volatile memory location.
341 bool isVolatile() const { return getSubclassData<VolatileField>(); }
342
343 /// Specify whether this is a volatile store or not.
344 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
345
346 /// Transparently provide more efficient getOperand methods.
347 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
348
349 /// Return the alignment of the access that is being performed
350 /// FIXME: Remove this function once transition to Align is over.
351 /// Use getAlign() instead.
352 uint64_t getAlignment() const { return getAlign().value(); }
353
354 Align getAlign() const {
355 return Align(1ULL << (getSubclassData<AlignmentField>()));
356 }
357
358 void setAlignment(Align Align) {
359 setSubclassData<AlignmentField>(Log2(Align));
360 }
361
362 /// Returns the ordering constraint of this store instruction.
363 AtomicOrdering getOrdering() const {
364 return getSubclassData<OrderingField>();
365 }
366
367 /// Sets the ordering constraint of this store instruction. May not be
368 /// Acquire or AcquireRelease.
369 void setOrdering(AtomicOrdering Ordering) {
370 setSubclassData<OrderingField>(Ordering);
371 }
372
373 /// Returns the synchronization scope ID of this store instruction.
374 SyncScope::ID getSyncScopeID() const {
375 return SSID;
376 }
377
378 /// Sets the synchronization scope ID of this store instruction.
379 void setSyncScopeID(SyncScope::ID SSID) {
380 this->SSID = SSID;
381 }
382
383 /// Sets the ordering constraint and the synchronization scope ID of this
384 /// store instruction.
385 void setAtomic(AtomicOrdering Ordering,
386 SyncScope::ID SSID = SyncScope::System) {
387 setOrdering(Ordering);
388 setSyncScopeID(SSID);
389 }
390
391 bool isSimple() const { return !isAtomic() && !isVolatile(); }
392
393 bool isUnordered() const {
394 return (getOrdering() == AtomicOrdering::NotAtomic ||
395 getOrdering() == AtomicOrdering::Unordered) &&
396 !isVolatile();
397 }
398
399 Value *getValueOperand() { return getOperand(0); }
400 const Value *getValueOperand() const { return getOperand(0); }
401
402 Value *getPointerOperand() { return getOperand(1); }
403 const Value *getPointerOperand() const { return getOperand(1); }
404 static unsigned getPointerOperandIndex() { return 1U; }
405 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
406
407 /// Returns the address space of the pointer operand.
408 unsigned getPointerAddressSpace() const {
409 return getPointerOperandType()->getPointerAddressSpace();
410 }
411
412 // Methods for support type inquiry through isa, cast, and dyn_cast:
413 static bool classof(const Instruction *I) {
414 return I->getOpcode() == Instruction::Store;
415 }
416 static bool classof(const Value *V) {
417 return isa<Instruction>(V) && classof(cast<Instruction>(V));
418 }
419
420private:
421 // Shadow Instruction::setInstructionSubclassData with a private forwarding
422 // method so that subclasses cannot accidentally use it.
423 template <typename Bitfield>
424 void setSubclassData(typename Bitfield::Type Value) {
425 Instruction::setSubclassData<Bitfield>(Value);
426 }
427
428 /// The synchronization scope ID of this store instruction. Not quite enough
429 /// room in SubClassData for everything, so synchronization scope ID gets its
430 /// own field.
431 SyncScope::ID SSID;
432};
433
434template <>
435struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
436};
437
438DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 438, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 438, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
439
440//===----------------------------------------------------------------------===//
441// FenceInst Class
442//===----------------------------------------------------------------------===//
443
444/// An instruction for ordering other memory operations.
445class FenceInst : public Instruction {
446 using OrderingField = AtomicOrderingBitfieldElementT<0>;
447
448 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
449
450protected:
451 // Note: Instruction needs to be a friend here to call cloneImpl.
452 friend class Instruction;
453
454 FenceInst *cloneImpl() const;
455
456public:
457 // Ordering may only be Acquire, Release, AcquireRelease, or
458 // SequentiallyConsistent.
459 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
460 SyncScope::ID SSID = SyncScope::System,
461 Instruction *InsertBefore = nullptr);
462 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
463 BasicBlock *InsertAtEnd);
464
465 // allocate space for exactly zero operands
466 void *operator new(size_t S) { return User::operator new(S, 0); }
467 void operator delete(void *Ptr) { User::operator delete(Ptr); }
468
469 /// Returns the ordering constraint of this fence instruction.
470 AtomicOrdering getOrdering() const {
471 return getSubclassData<OrderingField>();
472 }
473
474 /// Sets the ordering constraint of this fence instruction. May only be
475 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
476 void setOrdering(AtomicOrdering Ordering) {
477 setSubclassData<OrderingField>(Ordering);
478 }
479
480 /// Returns the synchronization scope ID of this fence instruction.
481 SyncScope::ID getSyncScopeID() const {
482 return SSID;
483 }
484
485 /// Sets the synchronization scope ID of this fence instruction.
486 void setSyncScopeID(SyncScope::ID SSID) {
487 this->SSID = SSID;
488 }
489
490 // Methods for support type inquiry through isa, cast, and dyn_cast:
491 static bool classof(const Instruction *I) {
492 return I->getOpcode() == Instruction::Fence;
493 }
494 static bool classof(const Value *V) {
495 return isa<Instruction>(V) && classof(cast<Instruction>(V));
496 }
497
498private:
499 // Shadow Instruction::setInstructionSubclassData with a private forwarding
500 // method so that subclasses cannot accidentally use it.
501 template <typename Bitfield>
502 void setSubclassData(typename Bitfield::Type Value) {
503 Instruction::setSubclassData<Bitfield>(Value);
504 }
505
506 /// The synchronization scope ID of this fence instruction. Not quite enough
507 /// room in SubClassData for everything, so synchronization scope ID gets its
508 /// own field.
509 SyncScope::ID SSID;
510};
511
512//===----------------------------------------------------------------------===//
513// AtomicCmpXchgInst Class
514//===----------------------------------------------------------------------===//
515
516/// An instruction that atomically checks whether a
517/// specified value is in a memory location, and, if it is, stores a new value
518/// there. The value returned by this instruction is a pair containing the
519/// original value as first element, and an i1 indicating success (true) or
520/// failure (false) as second element.
521///
522class AtomicCmpXchgInst : public Instruction {
523 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
524 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
525 SyncScope::ID SSID);
526
527 template <unsigned Offset>
528 using AtomicOrderingBitfieldElement =
529 typename Bitfield::Element<AtomicOrdering, Offset, 3,
530 AtomicOrdering::LAST>;
531
532protected:
533 // Note: Instruction needs to be a friend here to call cloneImpl.
534 friend class Instruction;
535
536 AtomicCmpXchgInst *cloneImpl() const;
537
538public:
539 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
540 AtomicOrdering SuccessOrdering,
541 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
542 Instruction *InsertBefore = nullptr);
543 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
544 AtomicOrdering SuccessOrdering,
545 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
546 BasicBlock *InsertAtEnd);
547
548 // allocate space for exactly three operands
549 void *operator new(size_t S) { return User::operator new(S, 3); }
550 void operator delete(void *Ptr) { User::operator delete(Ptr); }
551
552 using VolatileField = BoolBitfieldElementT<0>;
553 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
554 using SuccessOrderingField =
555 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
556 using FailureOrderingField =
557 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
558 using AlignmentField =
559 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
560 static_assert(
561 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
562 FailureOrderingField, AlignmentField>(),
563 "Bitfields must be contiguous");
564
565 /// Return the alignment of the memory that is being allocated by the
566 /// instruction.
567 Align getAlign() const {
568 return Align(1ULL << getSubclassData<AlignmentField>());
569 }
570
571 void setAlignment(Align Align) {
572 setSubclassData<AlignmentField>(Log2(Align));
573 }
574
575 /// Return true if this is a cmpxchg from a volatile memory
576 /// location.
577 ///
578 bool isVolatile() const { return getSubclassData<VolatileField>(); }
579
580 /// Specify whether this is a volatile cmpxchg.
581 ///
582 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
583
584 /// Return true if this cmpxchg may spuriously fail.
585 bool isWeak() const { return getSubclassData<WeakField>(); }
586
587 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
588
589 /// Transparently provide more efficient getOperand methods.
590 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
591
592 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
593 return Ordering != AtomicOrdering::NotAtomic &&
594 Ordering != AtomicOrdering::Unordered;
595 }
596
597 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
598 return Ordering != AtomicOrdering::NotAtomic &&
599 Ordering != AtomicOrdering::Unordered &&
600 Ordering != AtomicOrdering::AcquireRelease &&
601 Ordering != AtomicOrdering::Release;
602 }
603
604 /// Returns the success ordering constraint of this cmpxchg instruction.
605 AtomicOrdering getSuccessOrdering() const {
606 return getSubclassData<SuccessOrderingField>();
607 }
608
609 /// Sets the success ordering constraint of this cmpxchg instruction.
610 void setSuccessOrdering(AtomicOrdering Ordering) {
611 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
612 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
;
613 setSubclassData<SuccessOrderingField>(Ordering);
614 }
615
616 /// Returns the failure ordering constraint of this cmpxchg instruction.
617 AtomicOrdering getFailureOrdering() const {
618 return getSubclassData<FailureOrderingField>();
619 }
620
621 /// Sets the failure ordering constraint of this cmpxchg instruction.
622 void setFailureOrdering(AtomicOrdering Ordering) {
623 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 624, __extension__ __PRETTY_FUNCTION__
))
624 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 624, __extension__ __PRETTY_FUNCTION__
))
;
625 setSubclassData<FailureOrderingField>(Ordering);
626 }
627
628 /// Returns a single ordering which is at least as strong as both the
629 /// success and failure orderings for this cmpxchg.
630 AtomicOrdering getMergedOrdering() const {
631 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
632 return AtomicOrdering::SequentiallyConsistent;
633 if (getFailureOrdering() == AtomicOrdering::Acquire) {
634 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
635 return AtomicOrdering::Acquire;
636 if (getSuccessOrdering() == AtomicOrdering::Release)
637 return AtomicOrdering::AcquireRelease;
638 }
639 return getSuccessOrdering();
640 }
641
642 /// Returns the synchronization scope ID of this cmpxchg instruction.
643 SyncScope::ID getSyncScopeID() const {
644 return SSID;
645 }
646
647 /// Sets the synchronization scope ID of this cmpxchg instruction.
648 void setSyncScopeID(SyncScope::ID SSID) {
649 this->SSID = SSID;
650 }
651
652 Value *getPointerOperand() { return getOperand(0); }
653 const Value *getPointerOperand() const { return getOperand(0); }
654 static unsigned getPointerOperandIndex() { return 0U; }
655
656 Value *getCompareOperand() { return getOperand(1); }
657 const Value *getCompareOperand() const { return getOperand(1); }
658
659 Value *getNewValOperand() { return getOperand(2); }
660 const Value *getNewValOperand() const { return getOperand(2); }
661
662 /// Returns the address space of the pointer operand.
663 unsigned getPointerAddressSpace() const {
664 return getPointerOperand()->getType()->getPointerAddressSpace();
665 }
666
667 /// Returns the strongest permitted ordering on failure, given the
668 /// desired ordering on success.
669 ///
670 /// If the comparison in a cmpxchg operation fails, there is no atomic store
671 /// so release semantics cannot be provided. So this function drops explicit
672 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
673 /// operation would remain SequentiallyConsistent.
674 static AtomicOrdering
675 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
676 switch (SuccessOrdering) {
677 default:
678 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "llvm/include/llvm/IR/Instructions.h", 678)
;
679 case AtomicOrdering::Release:
680 case AtomicOrdering::Monotonic:
681 return AtomicOrdering::Monotonic;
682 case AtomicOrdering::AcquireRelease:
683 case AtomicOrdering::Acquire:
684 return AtomicOrdering::Acquire;
685 case AtomicOrdering::SequentiallyConsistent:
686 return AtomicOrdering::SequentiallyConsistent;
687 }
688 }
689
690 // Methods for support type inquiry through isa, cast, and dyn_cast:
691 static bool classof(const Instruction *I) {
692 return I->getOpcode() == Instruction::AtomicCmpXchg;
693 }
694 static bool classof(const Value *V) {
695 return isa<Instruction>(V) && classof(cast<Instruction>(V));
696 }
697
698private:
699 // Shadow Instruction::setInstructionSubclassData with a private forwarding
700 // method so that subclasses cannot accidentally use it.
701 template <typename Bitfield>
702 void setSubclassData(typename Bitfield::Type Value) {
703 Instruction::setSubclassData<Bitfield>(Value);
704 }
705
706 /// The synchronization scope ID of this cmpxchg instruction. Not quite
707 /// enough room in SubClassData for everything, so synchronization scope ID
708 /// gets its own field.
709 SyncScope::ID SSID;
710};
711
712template <>
713struct OperandTraits<AtomicCmpXchgInst> :
714 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
715};
716
717DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 717, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst
>::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture
].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 717, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands
() const { return OperandTraits<AtomicCmpXchgInst>::operands
(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicCmpXchgInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
718
719//===----------------------------------------------------------------------===//
720// AtomicRMWInst Class
721//===----------------------------------------------------------------------===//
722
723/// an instruction that atomically reads a memory location,
724/// combines it with another value, and then stores the result back. Returns
725/// the old value.
726///
727class AtomicRMWInst : public Instruction {
728protected:
729 // Note: Instruction needs to be a friend here to call cloneImpl.
730 friend class Instruction;
731
732 AtomicRMWInst *cloneImpl() const;
733
734public:
735 /// This enumeration lists the possible modifications atomicrmw can make. In
736 /// the descriptions, 'p' is the pointer to the instruction's memory location,
737 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
738 /// instruction. These instructions always return 'old'.
739 enum BinOp : unsigned {
740 /// *p = v
741 Xchg,
742 /// *p = old + v
743 Add,
744 /// *p = old - v
745 Sub,
746 /// *p = old & v
747 And,
748 /// *p = ~(old & v)
749 Nand,
750 /// *p = old | v
751 Or,
752 /// *p = old ^ v
753 Xor,
754 /// *p = old >signed v ? old : v
755 Max,
756 /// *p = old <signed v ? old : v
757 Min,
758 /// *p = old >unsigned v ? old : v
759 UMax,
760 /// *p = old <unsigned v ? old : v
761 UMin,
762
763 /// *p = old + v
764 FAdd,
765
766 /// *p = old - v
767 FSub,
768
769 FIRST_BINOP = Xchg,
770 LAST_BINOP = FSub,
771 BAD_BINOP
772 };
773
774private:
775 template <unsigned Offset>
776 using AtomicOrderingBitfieldElement =
777 typename Bitfield::Element<AtomicOrdering, Offset, 3,
778 AtomicOrdering::LAST>;
779
780 template <unsigned Offset>
781 using BinOpBitfieldElement =
782 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
783
784public:
785 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
786 AtomicOrdering Ordering, SyncScope::ID SSID,
787 Instruction *InsertBefore = nullptr);
788 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
789 AtomicOrdering Ordering, SyncScope::ID SSID,
790 BasicBlock *InsertAtEnd);
791
792 // allocate space for exactly two operands
793 void *operator new(size_t S) { return User::operator new(S, 2); }
794 void operator delete(void *Ptr) { User::operator delete(Ptr); }
795
796 using VolatileField = BoolBitfieldElementT<0>;
797 using AtomicOrderingField =
798 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
799 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
800 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
801 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
802 OperationField, AlignmentField>(),
803 "Bitfields must be contiguous");
804
805 BinOp getOperation() const { return getSubclassData<OperationField>(); }
806
807 static StringRef getOperationName(BinOp Op);
808
809 static bool isFPOperation(BinOp Op) {
810 switch (Op) {
811 case AtomicRMWInst::FAdd:
812 case AtomicRMWInst::FSub:
813 return true;
814 default:
815 return false;
816 }
817 }
818
819 void setOperation(BinOp Operation) {
820 setSubclassData<OperationField>(Operation);
821 }
822
823 /// Return the alignment of the memory that is being allocated by the
824 /// instruction.
825 Align getAlign() const {
826 return Align(1ULL << getSubclassData<AlignmentField>());
827 }
828
829 void setAlignment(Align Align) {
830 setSubclassData<AlignmentField>(Log2(Align));
831 }
832
833 /// Return true if this is a RMW on a volatile memory location.
834 ///
835 bool isVolatile() const { return getSubclassData<VolatileField>(); }
836
837 /// Specify whether this is a volatile RMW or not.
838 ///
839 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
840
841 /// Transparently provide more efficient getOperand methods.
842 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
843
844 /// Returns the ordering constraint of this rmw instruction.
845 AtomicOrdering getOrdering() const {
846 return getSubclassData<AtomicOrderingField>();
847 }
848
849 /// Sets the ordering constraint of this rmw instruction.
850 void setOrdering(AtomicOrdering Ordering) {
851 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
852 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
;
853 setSubclassData<AtomicOrderingField>(Ordering);
854 }
855
856 /// Returns the synchronization scope ID of this rmw instruction.
857 SyncScope::ID getSyncScopeID() const {
858 return SSID;
859 }
860
861 /// Sets the synchronization scope ID of this rmw instruction.
862 void setSyncScopeID(SyncScope::ID SSID) {
863 this->SSID = SSID;
864 }
865
866 Value *getPointerOperand() { return getOperand(0); }
867 const Value *getPointerOperand() const { return getOperand(0); }
868 static unsigned getPointerOperandIndex() { return 0U; }
869
870 Value *getValOperand() { return getOperand(1); }
871 const Value *getValOperand() const { return getOperand(1); }
872
873 /// Returns the address space of the pointer operand.
874 unsigned getPointerAddressSpace() const {
875 return getPointerOperand()->getType()->getPointerAddressSpace();
876 }
877
878 bool isFloatingPointOperation() const {
879 return isFPOperation(getOperation());
880 }
881
882 // Methods for support type inquiry through isa, cast, and dyn_cast:
883 static bool classof(const Instruction *I) {
884 return I->getOpcode() == Instruction::AtomicRMW;
885 }
886 static bool classof(const Value *V) {
887 return isa<Instruction>(V) && classof(cast<Instruction>(V));
888 }
889
890private:
891 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
892 AtomicOrdering Ordering, SyncScope::ID SSID);
893
894 // Shadow Instruction::setInstructionSubclassData with a private forwarding
895 // method so that subclasses cannot accidentally use it.
896 template <typename Bitfield>
897 void setSubclassData(typename Bitfield::Type Value) {
898 Instruction::setSubclassData<Bitfield>(Value);
899 }
900
901 /// The synchronization scope ID of this rmw instruction. Not quite enough
902 /// room in SubClassData for everything, so synchronization scope ID gets its
903 /// own field.
904 SyncScope::ID SSID;
905};
906
907template <>
908struct OperandTraits<AtomicRMWInst>
909 : public FixedNumOperandTraits<AtomicRMWInst,2> {
910};
911
912DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
913
914//===----------------------------------------------------------------------===//
915// GetElementPtrInst Class
916//===----------------------------------------------------------------------===//
917
918// checkGEPType - Simple wrapper function to give a better assertion failure
919// message on bad indexes for a gep instruction.
920//
921inline Type *checkGEPType(Type *Ty) {
922 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "llvm/include/llvm/IR/Instructions.h", 922, __extension__ __PRETTY_FUNCTION__
))
;
923 return Ty;
924}
925
926/// an instruction for type-safe pointer arithmetic to
927/// access elements of arrays and structs
928///
929class GetElementPtrInst : public Instruction {
930 Type *SourceElementType;
931 Type *ResultElementType;
932
933 GetElementPtrInst(const GetElementPtrInst &GEPI);
934
935 /// Constructors - Create a getelementptr instruction with a base pointer an
936 /// list of indices. The first ctor can optionally insert before an existing
937 /// instruction, the second appends the new instruction to the specified
938 /// BasicBlock.
939 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
940 ArrayRef<Value *> IdxList, unsigned Values,
941 const Twine &NameStr, Instruction *InsertBefore);
942 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
943 ArrayRef<Value *> IdxList, unsigned Values,
944 const Twine &NameStr, BasicBlock *InsertAtEnd);
945
946 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
947
948protected:
949 // Note: Instruction needs to be a friend here to call cloneImpl.
950 friend class Instruction;
951
952 GetElementPtrInst *cloneImpl() const;
953
954public:
955 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
956 ArrayRef<Value *> IdxList,
957 const Twine &NameStr = "",
958 Instruction *InsertBefore = nullptr) {
959 unsigned Values = 1 + unsigned(IdxList.size());
960 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 960, __extension__ __PRETTY_FUNCTION__
))
;
961 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
962 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
;
963 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
964 NameStr, InsertBefore);
965 }
966
967 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
968 ArrayRef<Value *> IdxList,
969 const Twine &NameStr,
970 BasicBlock *InsertAtEnd) {
971 unsigned Values = 1 + unsigned(IdxList.size());
972 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 972, __extension__ __PRETTY_FUNCTION__
))
;
973 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
974 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
;
975 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
976 NameStr, InsertAtEnd);
977 }
978
979 /// Create an "inbounds" getelementptr. See the documentation for the
980 /// "inbounds" flag in LangRef.html for details.
981 static GetElementPtrInst *
982 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
983 const Twine &NameStr = "",
984 Instruction *InsertBefore = nullptr) {
985 GetElementPtrInst *GEP =
986 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
987 GEP->setIsInBounds(true);
988 return GEP;
989 }
990
991 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
992 ArrayRef<Value *> IdxList,
993 const Twine &NameStr,
994 BasicBlock *InsertAtEnd) {
995 GetElementPtrInst *GEP =
996 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
997 GEP->setIsInBounds(true);
998 return GEP;
999 }
1000
1001 /// Transparently provide more efficient getOperand methods.
1002 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1003
1004 Type *getSourceElementType() const { return SourceElementType; }
1005
1006 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1007 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1008
1009 Type *getResultElementType() const {
1010 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
1011 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
;
1012 return ResultElementType;
1013 }
1014
1015 /// Returns the address space of this instruction's pointer type.
1016 unsigned getAddressSpace() const {
1017 // Note that this is always the same as the pointer operand's address space
1018 // and that is cheaper to compute, so cheat here.
1019 return getPointerAddressSpace();
1020 }
1021
1022 /// Returns the result type of a getelementptr with the given source
1023 /// element type and indexes.
1024 ///
1025 /// Null is returned if the indices are invalid for the specified
1026 /// source element type.
1027 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1028 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1029 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1030
1031 /// Return the type of the element at the given index of an indexable
1032 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1033 ///
1034 /// Returns null if the type can't be indexed, or the given index is not
1035 /// legal for the given type.
1036 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1037 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1038
1039 inline op_iterator idx_begin() { return op_begin()+1; }
1040 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1041 inline op_iterator idx_end() { return op_end(); }
1042 inline const_op_iterator idx_end() const { return op_end(); }
1043
1044 inline iterator_range<op_iterator> indices() {
1045 return make_range(idx_begin(), idx_end());
1046 }
1047
1048 inline iterator_range<const_op_iterator> indices() const {
1049 return make_range(idx_begin(), idx_end());
1050 }
1051
1052 Value *getPointerOperand() {
1053 return getOperand(0);
1054 }
1055 const Value *getPointerOperand() const {
1056 return getOperand(0);
1057 }
1058 static unsigned getPointerOperandIndex() {
1059 return 0U; // get index for modifying correct operand.
1060 }
1061
1062 /// Method to return the pointer operand as a
1063 /// PointerType.
1064 Type *getPointerOperandType() const {
1065 return getPointerOperand()->getType();
1066 }
1067
1068 /// Returns the address space of the pointer operand.
1069 unsigned getPointerAddressSpace() const {
1070 return getPointerOperandType()->getPointerAddressSpace();
1071 }
1072
1073 /// Returns the pointer type returned by the GEP
1074 /// instruction, which may be a vector of pointers.
1075 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1076 ArrayRef<Value *> IdxList) {
1077 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1078 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1079 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1080 Type *PtrTy = OrigPtrTy->isOpaque()
1081 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1082 : PointerType::get(ResultElemTy, AddrSpace);
1083 // Vector GEP
1084 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1085 ElementCount EltCount = PtrVTy->getElementCount();
1086 return VectorType::get(PtrTy, EltCount);
1087 }
1088 for (Value *Index : IdxList)
1089 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1090 ElementCount EltCount = IndexVTy->getElementCount();
1091 return VectorType::get(PtrTy, EltCount);
1092 }
1093 // Scalar GEP
1094 return PtrTy;
1095 }
1096
1097 unsigned getNumIndices() const { // Note: always non-negative
1098 return getNumOperands() - 1;
1099 }
1100
1101 bool hasIndices() const {
1102 return getNumOperands() > 1;
1103 }
1104
1105 /// Return true if all of the indices of this GEP are
1106 /// zeros. If so, the result pointer and the first operand have the same
1107 /// value, just potentially different types.
1108 bool hasAllZeroIndices() const;
1109
1110 /// Return true if all of the indices of this GEP are
1111 /// constant integers. If so, the result pointer and the first operand have
1112 /// a constant offset between them.
1113 bool hasAllConstantIndices() const;
1114
1115 /// Set or clear the inbounds flag on this GEP instruction.
1116 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1117 void setIsInBounds(bool b = true);
1118
1119 /// Determine whether the GEP has the inbounds flag.
1120 bool isInBounds() const;
1121
1122 /// Accumulate the constant address offset of this GEP if possible.
1123 ///
1124 /// This routine accepts an APInt into which it will accumulate the constant
1125 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1126 /// all-constant, it returns false and the value of the offset APInt is
1127 /// undefined (it is *not* preserved!). The APInt passed into this routine
1128 /// must be at least as wide as the IntPtr type for the address space of
1129 /// the base GEP pointer.
1130 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1131 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1132 MapVector<Value *, APInt> &VariableOffsets,
1133 APInt &ConstantOffset) const;
1134 // Methods for support type inquiry through isa, cast, and dyn_cast:
1135 static bool classof(const Instruction *I) {
1136 return (I->getOpcode() == Instruction::GetElementPtr);
1137 }
1138 static bool classof(const Value *V) {
1139 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1140 }
1141};
1142
1143template <>
1144struct OperandTraits<GetElementPtrInst> :
1145 public VariadicOperandTraits<GetElementPtrInst, 1> {
1146};
1147
1148GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1149 ArrayRef<Value *> IdxList, unsigned Values,
1150 const Twine &NameStr,
1151 Instruction *InsertBefore)
1152 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1153 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1154 Values, InsertBefore),
1155 SourceElementType(PointeeType),
1156 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1157 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
1158 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
;
1159 init(Ptr, IdxList, NameStr);
1160}
1161
1162GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1163 ArrayRef<Value *> IdxList, unsigned Values,
1164 const Twine &NameStr,
1165 BasicBlock *InsertAtEnd)
1166 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1167 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1168 Values, InsertAtEnd),
1169 SourceElementType(PointeeType),
1170 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1171 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
1172 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
;
1173 init(Ptr, IdxList, NameStr);
1174}
1175
1176DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<GetElementPtrInst
>::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture
].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands
() const { return OperandTraits<GetElementPtrInst>::operands
(this); } template <int Idx_nocapture> Use &GetElementPtrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &GetElementPtrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1177
1178//===----------------------------------------------------------------------===//
1179// ICmpInst Class
1180//===----------------------------------------------------------------------===//
1181
1182/// This instruction compares its operands according to the predicate given
1183/// to the constructor. It only operates on integers or pointers. The operands
1184/// must be identical types.
1185/// Represent an integer comparison operator.
1186class ICmpInst: public CmpInst {
1187 void AssertOK() {
1188 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
1189 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
;
1190 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
1191 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
;
1192 // Check that the operands are the right type
1193 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1194 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1195 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
;
1196 }
1197
1198protected:
1199 // Note: Instruction needs to be a friend here to call cloneImpl.
1200 friend class Instruction;
1201
1202 /// Clone an identical ICmpInst
1203 ICmpInst *cloneImpl() const;
1204
1205public:
1206 /// Constructor with insert-before-instruction semantics.
1207 ICmpInst(
1208 Instruction *InsertBefore, ///< Where to insert
1209 Predicate pred, ///< The predicate to use for the comparison
1210 Value *LHS, ///< The left-hand-side of the expression
1211 Value *RHS, ///< The right-hand-side of the expression
1212 const Twine &NameStr = "" ///< Name of the instruction
1213 ) : CmpInst(makeCmpResultType(LHS->getType()),
1214 Instruction::ICmp, pred, LHS, RHS, NameStr,
1215 InsertBefore) {
1216#ifndef NDEBUG
1217 AssertOK();
1218#endif
1219 }
1220
1221 /// Constructor with insert-at-end semantics.
1222 ICmpInst(
1223 BasicBlock &InsertAtEnd, ///< Block to insert into.
1224 Predicate pred, ///< The predicate to use for the comparison
1225 Value *LHS, ///< The left-hand-side of the expression
1226 Value *RHS, ///< The right-hand-side of the expression
1227 const Twine &NameStr = "" ///< Name of the instruction
1228 ) : CmpInst(makeCmpResultType(LHS->getType()),
1229 Instruction::ICmp, pred, LHS, RHS, NameStr,
1230 &InsertAtEnd) {
1231#ifndef NDEBUG
1232 AssertOK();
1233#endif
1234 }
1235
1236 /// Constructor with no-insertion semantics
1237 ICmpInst(
1238 Predicate pred, ///< The predicate to use for the comparison
1239 Value *LHS, ///< The left-hand-side of the expression
1240 Value *RHS, ///< The right-hand-side of the expression
1241 const Twine &NameStr = "" ///< Name of the instruction
1242 ) : CmpInst(makeCmpResultType(LHS->getType()),
1243 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1244#ifndef NDEBUG
1245 AssertOK();
1246#endif
1247 }
1248
1249 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1250 /// @returns the predicate that would be the result if the operand were
1251 /// regarded as signed.
1252 /// Return the signed version of the predicate
1253 Predicate getSignedPredicate() const {
1254 return getSignedPredicate(getPredicate());
1255 }
1256
1257 /// This is a static version that you can use without an instruction.
1258 /// Return the signed version of the predicate.
1259 static Predicate getSignedPredicate(Predicate pred);
1260
1261 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1262 /// @returns the predicate that would be the result if the operand were
1263 /// regarded as unsigned.
1264 /// Return the unsigned version of the predicate
1265 Predicate getUnsignedPredicate() const {
1266 return getUnsignedPredicate(getPredicate());
1267 }
1268
1269 /// This is a static version that you can use without an instruction.
1270 /// Return the unsigned version of the predicate.
1271 static Predicate getUnsignedPredicate(Predicate pred);
1272
1273 /// Return true if this predicate is either EQ or NE. This also
1274 /// tests for commutativity.
1275 static bool isEquality(Predicate P) {
1276 return P == ICMP_EQ || P == ICMP_NE;
1277 }
1278
1279 /// Return true if this predicate is either EQ or NE. This also
1280 /// tests for commutativity.
1281 bool isEquality() const {
1282 return isEquality(getPredicate());
1283 }
1284
1285 /// @returns true if the predicate of this ICmpInst is commutative
1286 /// Determine if this relation is commutative.
1287 bool isCommutative() const { return isEquality(); }
1288
1289 /// Return true if the predicate is relational (not EQ or NE).
1290 ///
1291 bool isRelational() const {
1292 return !isEquality();
1293 }
1294
1295 /// Return true if the predicate is relational (not EQ or NE).
1296 ///
1297 static bool isRelational(Predicate P) {
1298 return !isEquality(P);
1299 }
1300
1301 /// Return true if the predicate is SGT or UGT.
1302 ///
1303 static bool isGT(Predicate P) {
1304 return P == ICMP_SGT || P == ICMP_UGT;
1305 }
1306
1307 /// Return true if the predicate is SLT or ULT.
1308 ///
1309 static bool isLT(Predicate P) {
1310 return P == ICMP_SLT || P == ICMP_ULT;
1311 }
1312
1313 /// Return true if the predicate is SGE or UGE.
1314 ///
1315 static bool isGE(Predicate P) {
1316 return P == ICMP_SGE || P == ICMP_UGE;
1317 }
1318
1319 /// Return true if the predicate is SLE or ULE.
1320 ///
1321 static bool isLE(Predicate P) {
1322 return P == ICMP_SLE || P == ICMP_ULE;
1323 }
1324
1325 /// Returns the sequence of all ICmp predicates.
1326 ///
1327 static auto predicates() { return ICmpPredicates(); }
1328
1329 /// Exchange the two operands to this instruction in such a way that it does
1330 /// not modify the semantics of the instruction. The predicate value may be
1331 /// changed to retain the same result if the predicate is order dependent
1332 /// (e.g. ult).
1333 /// Swap operands and adjust predicate.
1334 void swapOperands() {
1335 setPredicate(getSwappedPredicate());
1336 Op<0>().swap(Op<1>());
1337 }
1338
1339 /// Return result of `LHS Pred RHS` comparison.
1340 static bool compare(const APInt &LHS, const APInt &RHS,
1341 ICmpInst::Predicate Pred);
1342
1343 // Methods for support type inquiry through isa, cast, and dyn_cast:
1344 static bool classof(const Instruction *I) {
1345 return I->getOpcode() == Instruction::ICmp;
1346 }
1347 static bool classof(const Value *V) {
1348 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1349 }
1350};
1351
1352//===----------------------------------------------------------------------===//
1353// FCmpInst Class
1354//===----------------------------------------------------------------------===//
1355
1356/// This instruction compares its operands according to the predicate given
1357/// to the constructor. It only operates on floating point values or packed
1358/// vectors of floating point values. The operands must be identical types.
1359/// Represents a floating point comparison operator.
1360class FCmpInst: public CmpInst {
1361 void AssertOK() {
1362 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1362, __extension__ __PRETTY_FUNCTION__
))
;
1363 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
1364 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
;
1365 // Check that the operands are the right type
1366 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
1367 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
;
1368 }
1369
1370protected:
1371 // Note: Instruction needs to be a friend here to call cloneImpl.
1372 friend class Instruction;
1373
1374 /// Clone an identical FCmpInst
1375 FCmpInst *cloneImpl() const;
1376
1377public:
1378 /// Constructor with insert-before-instruction semantics.
1379 FCmpInst(
1380 Instruction *InsertBefore, ///< Where to insert
1381 Predicate pred, ///< The predicate to use for the comparison
1382 Value *LHS, ///< The left-hand-side of the expression
1383 Value *RHS, ///< The right-hand-side of the expression
1384 const Twine &NameStr = "" ///< Name of the instruction
1385 ) : CmpInst(makeCmpResultType(LHS->getType()),
1386 Instruction::FCmp, pred, LHS, RHS, NameStr,
1387 InsertBefore) {
1388 AssertOK();
1389 }
1390
1391 /// Constructor with insert-at-end semantics.
1392 FCmpInst(
1393 BasicBlock &InsertAtEnd, ///< Block to insert into.
1394 Predicate pred, ///< The predicate to use for the comparison
1395 Value *LHS, ///< The left-hand-side of the expression
1396 Value *RHS, ///< The right-hand-side of the expression
1397 const Twine &NameStr = "" ///< Name of the instruction
1398 ) : CmpInst(makeCmpResultType(LHS->getType()),
1399 Instruction::FCmp, pred, LHS, RHS, NameStr,
1400 &InsertAtEnd) {
1401 AssertOK();
1402 }
1403
1404 /// Constructor with no-insertion semantics
1405 FCmpInst(
1406 Predicate Pred, ///< The predicate to use for the comparison
1407 Value *LHS, ///< The left-hand-side of the expression
1408 Value *RHS, ///< The right-hand-side of the expression
1409 const Twine &NameStr = "", ///< Name of the instruction
1410 Instruction *FlagsSource = nullptr
1411 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1412 RHS, NameStr, nullptr, FlagsSource) {
1413 AssertOK();
1414 }
1415
1416 /// @returns true if the predicate of this instruction is EQ or NE.
1417 /// Determine if this is an equality predicate.
1418 static bool isEquality(Predicate Pred) {
1419 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1420 Pred == FCMP_UNE;
1421 }
1422
1423 /// @returns true if the predicate of this instruction is EQ or NE.
1424 /// Determine if this is an equality predicate.
1425 bool isEquality() const { return isEquality(getPredicate()); }
1426
1427 /// @returns true if the predicate of this instruction is commutative.
1428 /// Determine if this is a commutative predicate.
1429 bool isCommutative() const {
1430 return isEquality() ||
1431 getPredicate() == FCMP_FALSE ||
1432 getPredicate() == FCMP_TRUE ||
1433 getPredicate() == FCMP_ORD ||
1434 getPredicate() == FCMP_UNO;
1435 }
1436
1437 /// @returns true if the predicate is relational (not EQ or NE).
1438 /// Determine if this a relational predicate.
1439 bool isRelational() const { return !isEquality(); }
1440
1441 /// Exchange the two operands to this instruction in such a way that it does
1442 /// not modify the semantics of the instruction. The predicate value may be
1443 /// changed to retain the same result if the predicate is order dependent
1444 /// (e.g. ult).
1445 /// Swap operands and adjust predicate.
1446 void swapOperands() {
1447 setPredicate(getSwappedPredicate());
1448 Op<0>().swap(Op<1>());
1449 }
1450
1451 /// Returns the sequence of all FCmp predicates.
1452 ///
1453 static auto predicates() { return FCmpPredicates(); }
1454
1455 /// Return result of `LHS Pred RHS` comparison.
1456 static bool compare(const APFloat &LHS, const APFloat &RHS,
1457 FCmpInst::Predicate Pred);
1458
1459 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1460 static bool classof(const Instruction *I) {
1461 return I->getOpcode() == Instruction::FCmp;
1462 }
1463 static bool classof(const Value *V) {
1464 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1465 }
1466};
1467
1468//===----------------------------------------------------------------------===//
1469/// This class represents a function call, abstracting a target
1470/// machine's calling convention. This class uses low bit of the SubClassData
1471/// field to indicate whether or not this is a tail call. The rest of the bits
1472/// hold the calling convention of the call.
1473///
1474class CallInst : public CallBase {
1475 CallInst(const CallInst &CI);
1476
1477 /// Construct a CallInst given a range of arguments.
1478 /// Construct a CallInst from a range of arguments
1479 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1480 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1481 Instruction *InsertBefore);
1482
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 const Twine &NameStr, Instruction *InsertBefore)
1485 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1486
1487 /// Construct a CallInst given a range of arguments.
1488 /// Construct a CallInst from a range of arguments
1489 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1490 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1491 BasicBlock *InsertAtEnd);
1492
1493 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1494 Instruction *InsertBefore);
1495
1496 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1497 BasicBlock *InsertAtEnd);
1498
1499 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1500 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1501 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1502
1503 /// Compute the number of operands to allocate.
1504 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1505 // We need one operand for the called function, plus the input operand
1506 // counts provided.
1507 return 1 + NumArgs + NumBundleInputs;
1508 }
1509
1510protected:
1511 // Note: Instruction needs to be a friend here to call cloneImpl.
1512 friend class Instruction;
1513
1514 CallInst *cloneImpl() const;
1515
1516public:
1517 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1518 Instruction *InsertBefore = nullptr) {
1519 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1520 }
1521
1522 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1523 const Twine &NameStr,
1524 Instruction *InsertBefore = nullptr) {
1525 return new (ComputeNumOperands(Args.size()))
1526 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1527 }
1528
1529 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1530 ArrayRef<OperandBundleDef> Bundles = None,
1531 const Twine &NameStr = "",
1532 Instruction *InsertBefore = nullptr) {
1533 const int NumOperands =
1534 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1535 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1536
1537 return new (NumOperands, DescriptorBytes)
1538 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1539 }
1540
1541 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1542 BasicBlock *InsertAtEnd) {
1543 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1544 }
1545
1546 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1547 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1548 return new (ComputeNumOperands(Args.size()))
1549 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1550 }
1551
1552 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1553 ArrayRef<OperandBundleDef> Bundles,
1554 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1555 const int NumOperands =
1556 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1557 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1558
1559 return new (NumOperands, DescriptorBytes)
1560 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1561 }
1562
1563 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1564 Instruction *InsertBefore = nullptr) {
1565 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1566 InsertBefore);
1567 }
1568
1569 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1570 ArrayRef<OperandBundleDef> Bundles = None,
1571 const Twine &NameStr = "",
1572 Instruction *InsertBefore = nullptr) {
1573 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1574 NameStr, InsertBefore);
1575 }
1576
1577 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1578 const Twine &NameStr,
1579 Instruction *InsertBefore = nullptr) {
1580 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1581 InsertBefore);
1582 }
1583
1584 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1585 BasicBlock *InsertAtEnd) {
1586 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1587 InsertAtEnd);
1588 }
1589
1590 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1591 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1592 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1593 InsertAtEnd);
1594 }
1595
1596 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1597 ArrayRef<OperandBundleDef> Bundles,
1598 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1599 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1600 NameStr, InsertAtEnd);
1601 }
1602
1603 /// Create a clone of \p CI with a different set of operand bundles and
1604 /// insert it before \p InsertPt.
1605 ///
1606 /// The returned call instruction is identical \p CI in every way except that
1607 /// the operand bundles for the new instruction are set to the operand bundles
1608 /// in \p Bundles.
1609 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1610 Instruction *InsertPt = nullptr);
1611
1612 /// Generate the IR for a call to malloc:
1613 /// 1. Compute the malloc call's argument as the specified type's size,
1614 /// possibly multiplied by the array size if the array size is not
1615 /// constant 1.
1616 /// 2. Call malloc with that argument.
1617 /// 3. Bitcast the result of the malloc call to the specified type.
1618 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1619 Type *AllocTy, Value *AllocSize,
1620 Value *ArraySize = nullptr,
1621 Function *MallocF = nullptr,
1622 const Twine &Name = "");
1623 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1624 Type *AllocTy, Value *AllocSize,
1625 Value *ArraySize = nullptr,
1626 Function *MallocF = nullptr,
1627 const Twine &Name = "");
1628 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1629 Type *AllocTy, Value *AllocSize,
1630 Value *ArraySize = nullptr,
1631 ArrayRef<OperandBundleDef> Bundles = None,
1632 Function *MallocF = nullptr,
1633 const Twine &Name = "");
1634 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1635 Type *AllocTy, Value *AllocSize,
1636 Value *ArraySize = nullptr,
1637 ArrayRef<OperandBundleDef> Bundles = None,
1638 Function *MallocF = nullptr,
1639 const Twine &Name = "");
1640 /// Generate the IR for a call to the builtin free function.
1641 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1642 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1643 static Instruction *CreateFree(Value *Source,
1644 ArrayRef<OperandBundleDef> Bundles,
1645 Instruction *InsertBefore);
1646 static Instruction *CreateFree(Value *Source,
1647 ArrayRef<OperandBundleDef> Bundles,
1648 BasicBlock *InsertAtEnd);
1649
1650 // Note that 'musttail' implies 'tail'.
1651 enum TailCallKind : unsigned {
1652 TCK_None = 0,
1653 TCK_Tail = 1,
1654 TCK_MustTail = 2,
1655 TCK_NoTail = 3,
1656 TCK_LAST = TCK_NoTail
1657 };
1658
1659 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1660 static_assert(
1661 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1662 "Bitfields must be contiguous");
1663
1664 TailCallKind getTailCallKind() const {
1665 return getSubclassData<TailCallKindField>();
1666 }
1667
1668 bool isTailCall() const {
1669 TailCallKind Kind = getTailCallKind();
1670 return Kind == TCK_Tail || Kind == TCK_MustTail;
1671 }
1672
1673 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1674
1675 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1676
1677 void setTailCallKind(TailCallKind TCK) {
1678 setSubclassData<TailCallKindField>(TCK);
1679 }
1680
1681 void setTailCall(bool IsTc = true) {
1682 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1683 }
1684
1685 /// Return true if the call can return twice
1686 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1687 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1688
1689 // Methods for support type inquiry through isa, cast, and dyn_cast:
1690 static bool classof(const Instruction *I) {
1691 return I->getOpcode() == Instruction::Call;
1692 }
1693 static bool classof(const Value *V) {
1694 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1695 }
1696
1697 /// Updates profile metadata by scaling it by \p S / \p T.
1698 void updateProfWeight(uint64_t S, uint64_t T);
1699
1700private:
1701 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1702 // method so that subclasses cannot accidentally use it.
1703 template <typename Bitfield>
1704 void setSubclassData(typename Bitfield::Type Value) {
1705 Instruction::setSubclassData<Bitfield>(Value);
1706 }
1707};
1708
1709CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1710 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1711 BasicBlock *InsertAtEnd)
1712 : CallBase(Ty->getReturnType(), Instruction::Call,
1713 OperandTraits<CallBase>::op_end(this) -
1714 (Args.size() + CountBundleInputs(Bundles) + 1),
1715 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1716 InsertAtEnd) {
1717 init(Ty, Func, Args, Bundles, NameStr);
1718}
1719
1720CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1721 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1722 Instruction *InsertBefore)
1723 : CallBase(Ty->getReturnType(), Instruction::Call,
1724 OperandTraits<CallBase>::op_end(this) -
1725 (Args.size() + CountBundleInputs(Bundles) + 1),
1726 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1727 InsertBefore) {
1728 init(Ty, Func, Args, Bundles, NameStr);
1729}
1730
1731//===----------------------------------------------------------------------===//
1732// SelectInst Class
1733//===----------------------------------------------------------------------===//
1734
1735/// This class represents the LLVM 'select' instruction.
1736///
1737class SelectInst : public Instruction {
1738 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1739 Instruction *InsertBefore)
1740 : Instruction(S1->getType(), Instruction::Select,
1741 &Op<0>(), 3, InsertBefore) {
1742 init(C, S1, S2);
1743 setName(NameStr);
1744 }
1745
1746 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1747 BasicBlock *InsertAtEnd)
1748 : Instruction(S1->getType(), Instruction::Select,
1749 &Op<0>(), 3, InsertAtEnd) {
1750 init(C, S1, S2);
1751 setName(NameStr);
1752 }
1753
1754 void init(Value *C, Value *S1, Value *S2) {
1755 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "llvm/include/llvm/IR/Instructions.h", 1755, __extension__ __PRETTY_FUNCTION__
))
;
1756 Op<0>() = C;
1757 Op<1>() = S1;
1758 Op<2>() = S2;
1759 }
1760
1761protected:
1762 // Note: Instruction needs to be a friend here to call cloneImpl.
1763 friend class Instruction;
1764
1765 SelectInst *cloneImpl() const;
1766
1767public:
1768 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1769 const Twine &NameStr = "",
1770 Instruction *InsertBefore = nullptr,
1771 Instruction *MDFrom = nullptr) {
1772 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1773 if (MDFrom)
1774 Sel->copyMetadata(*MDFrom);
1775 return Sel;
1776 }
1777
1778 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1779 const Twine &NameStr,
1780 BasicBlock *InsertAtEnd) {
1781 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1782 }
1783
1784 const Value *getCondition() const { return Op<0>(); }
1785 const Value *getTrueValue() const { return Op<1>(); }
1786 const Value *getFalseValue() const { return Op<2>(); }
1787 Value *getCondition() { return Op<0>(); }
1788 Value *getTrueValue() { return Op<1>(); }
1789 Value *getFalseValue() { return Op<2>(); }
1790
1791 void setCondition(Value *V) { Op<0>() = V; }
1792 void setTrueValue(Value *V) { Op<1>() = V; }
1793 void setFalseValue(Value *V) { Op<2>() = V; }
1794
1795 /// Swap the true and false values of the select instruction.
1796 /// This doesn't swap prof metadata.
1797 void swapValues() { Op<1>().swap(Op<2>()); }
1798
1799 /// Return a string if the specified operands are invalid
1800 /// for a select operation, otherwise return null.
1801 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1802
1803 /// Transparently provide more efficient getOperand methods.
1804 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1805
1806 OtherOps getOpcode() const {
1807 return static_cast<OtherOps>(Instruction::getOpcode());
1808 }
1809
1810 // Methods for support type inquiry through isa, cast, and dyn_cast:
1811 static bool classof(const Instruction *I) {
1812 return I->getOpcode() == Instruction::Select;
1813 }
1814 static bool classof(const Value *V) {
1815 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1816 }
1817};
1818
1819template <>
1820struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1821};
1822
1823DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this))[i_nocapture
].get()); } void SelectInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SelectInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1824
1825//===----------------------------------------------------------------------===//
1826// VAArgInst Class
1827//===----------------------------------------------------------------------===//
1828
1829/// This class represents the va_arg llvm instruction, which returns
1830/// an argument of the specified type given a va_list and increments that list
1831///
1832class VAArgInst : public UnaryInstruction {
1833protected:
1834 // Note: Instruction needs to be a friend here to call cloneImpl.
1835 friend class Instruction;
1836
1837 VAArgInst *cloneImpl() const;
1838
1839public:
1840 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1841 Instruction *InsertBefore = nullptr)
1842 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1843 setName(NameStr);
1844 }
1845
1846 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1847 BasicBlock *InsertAtEnd)
1848 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1849 setName(NameStr);
1850 }
1851
1852 Value *getPointerOperand() { return getOperand(0); }
1853 const Value *getPointerOperand() const { return getOperand(0); }
1854 static unsigned getPointerOperandIndex() { return 0U; }
1855
1856 // Methods for support type inquiry through isa, cast, and dyn_cast:
1857 static bool classof(const Instruction *I) {
1858 return I->getOpcode() == VAArg;
1859 }
1860 static bool classof(const Value *V) {
1861 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1862 }
1863};
1864
1865//===----------------------------------------------------------------------===//
1866// ExtractElementInst Class
1867//===----------------------------------------------------------------------===//
1868
1869/// This instruction extracts a single (scalar)
1870/// element from a VectorType value
1871///
1872class ExtractElementInst : public Instruction {
1873 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1874 Instruction *InsertBefore = nullptr);
1875 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1876 BasicBlock *InsertAtEnd);
1877
1878protected:
1879 // Note: Instruction needs to be a friend here to call cloneImpl.
1880 friend class Instruction;
1881
1882 ExtractElementInst *cloneImpl() const;
1883
1884public:
1885 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1886 const Twine &NameStr = "",
1887 Instruction *InsertBefore = nullptr) {
1888 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1889 }
1890
1891 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1892 const Twine &NameStr,
1893 BasicBlock *InsertAtEnd) {
1894 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1895 }
1896
1897 /// Return true if an extractelement instruction can be
1898 /// formed with the specified operands.
1899 static bool isValidOperands(const Value *Vec, const Value *Idx);
1900
1901 Value *getVectorOperand() { return Op<0>(); }
1902 Value *getIndexOperand() { return Op<1>(); }
1903 const Value *getVectorOperand() const { return Op<0>(); }
1904 const Value *getIndexOperand() const { return Op<1>(); }
1905
1906 VectorType *getVectorOperandType() const {
1907 return cast<VectorType>(getVectorOperand()->getType());
1908 }
1909
1910 /// Transparently provide more efficient getOperand methods.
1911 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1912
1913 // Methods for support type inquiry through isa, cast, and dyn_cast:
1914 static bool classof(const Instruction *I) {
1915 return I->getOpcode() == Instruction::ExtractElement;
1916 }
1917 static bool classof(const Value *V) {
1918 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1919 }
1920};
1921
1922template <>
1923struct OperandTraits<ExtractElementInst> :
1924 public FixedNumOperandTraits<ExtractElementInst, 2> {
1925};
1926
1927DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture
].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands
() const { return OperandTraits<ExtractElementInst>::operands
(this); } template <int Idx_nocapture> Use &ExtractElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ExtractElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1928
1929//===----------------------------------------------------------------------===//
1930// InsertElementInst Class
1931//===----------------------------------------------------------------------===//
1932
1933/// This instruction inserts a single (scalar)
1934/// element into a VectorType value
1935///
1936class InsertElementInst : public Instruction {
1937 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1938 const Twine &NameStr = "",
1939 Instruction *InsertBefore = nullptr);
1940 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1941 BasicBlock *InsertAtEnd);
1942
1943protected:
1944 // Note: Instruction needs to be a friend here to call cloneImpl.
1945 friend class Instruction;
1946
1947 InsertElementInst *cloneImpl() const;
1948
1949public:
1950 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1951 const Twine &NameStr = "",
1952 Instruction *InsertBefore = nullptr) {
1953 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1954 }
1955
1956 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1957 const Twine &NameStr,
1958 BasicBlock *InsertAtEnd) {
1959 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1960 }
1961
1962 /// Return true if an insertelement instruction can be
1963 /// formed with the specified operands.
1964 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1965 const Value *Idx);
1966
1967 /// Overload to return most specific vector type.
1968 ///
1969 VectorType *getType() const {
1970 return cast<VectorType>(Instruction::getType());
1971 }
1972
1973 /// Transparently provide more efficient getOperand methods.
1974 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1975
1976 // Methods for support type inquiry through isa, cast, and dyn_cast:
1977 static bool classof(const Instruction *I) {
1978 return I->getOpcode() == Instruction::InsertElement;
1979 }
1980 static bool classof(const Value *V) {
1981 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1982 }
1983};
1984
1985template <>
1986struct OperandTraits<InsertElementInst> :
1987 public FixedNumOperandTraits<InsertElementInst, 3> {
1988};
1989
1990DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertElementInst
>::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture
].get()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertElementInst::getNumOperands
() const { return OperandTraits<InsertElementInst>::operands
(this); } template <int Idx_nocapture> Use &InsertElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1991
1992//===----------------------------------------------------------------------===//
1993// ShuffleVectorInst Class
1994//===----------------------------------------------------------------------===//
1995
1996constexpr int UndefMaskElem = -1;
1997
1998/// This instruction constructs a fixed permutation of two
1999/// input vectors.
2000///
2001/// For each element of the result vector, the shuffle mask selects an element
2002/// from one of the input vectors to copy to the result. Non-negative elements
2003/// in the mask represent an index into the concatenated pair of input vectors.
2004/// UndefMaskElem (-1) specifies that the result element is undefined.
2005///
2006/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2007/// requirement may be relaxed in the future.
2008class ShuffleVectorInst : public Instruction {
2009 SmallVector<int, 4> ShuffleMask;
2010 Constant *ShuffleMaskForBitcode;
2011
2012protected:
2013 // Note: Instruction needs to be a friend here to call cloneImpl.
2014 friend class Instruction;
2015
2016 ShuffleVectorInst *cloneImpl() const;
2017
2018public:
2019 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2020 Instruction *InsertBefore = nullptr);
2021 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2022 BasicBlock *InsertAtEnd);
2023 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2024 Instruction *InsertBefore = nullptr);
2025 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2026 BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2028 const Twine &NameStr = "",
2029 Instruction *InsertBefor = nullptr);
2030 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2031 const Twine &NameStr, BasicBlock *InsertAtEnd);
2032 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2033 const Twine &NameStr = "",
2034 Instruction *InsertBefor = nullptr);
2035 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2036 const Twine &NameStr, BasicBlock *InsertAtEnd);
2037
2038 void *operator new(size_t S) { return User::operator new(S, 2); }
2039 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2040
2041 /// Swap the operands and adjust the mask to preserve the semantics
2042 /// of the instruction.
2043 void commute();
2044
2045 /// Return true if a shufflevector instruction can be
2046 /// formed with the specified operands.
2047 static bool isValidOperands(const Value *V1, const Value *V2,
2048 const Value *Mask);
2049 static bool isValidOperands(const Value *V1, const Value *V2,
2050 ArrayRef<int> Mask);
2051
2052 /// Overload to return most specific vector type.
2053 ///
2054 VectorType *getType() const {
2055 return cast<VectorType>(Instruction::getType());
2056 }
2057
2058 /// Transparently provide more efficient getOperand methods.
2059 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2060
2061 /// Return the shuffle mask value of this instruction for the given element
2062 /// index. Return UndefMaskElem if the element is undef.
2063 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2064
2065 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 static void getShuffleMask(const Constant *Mask,
2068 SmallVectorImpl<int> &Result);
2069
2070 /// Return the mask for this instruction as a vector of integers. Undefined
2071 /// elements of the mask are returned as UndefMaskElem.
2072 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2073 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2074 }
2075
2076 /// Return the mask for this instruction, for use in bitcode.
2077 ///
2078 /// TODO: This is temporary until we decide a new bitcode encoding for
2079 /// shufflevector.
2080 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2081
2082 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2083 Type *ResultTy);
2084
2085 void setShuffleMask(ArrayRef<int> Mask);
2086
2087 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2088
2089 /// Return true if this shuffle returns a vector with a different number of
2090 /// elements than its source vectors.
2091 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2092 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2093 bool changesLength() const {
2094 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2095 ->getElementCount()
2096 .getKnownMinValue();
2097 unsigned NumMaskElts = ShuffleMask.size();
2098 return NumSourceElts != NumMaskElts;
2099 }
2100
2101 /// Return true if this shuffle returns a vector with a greater number of
2102 /// elements than its source vectors.
2103 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2104 bool increasesLength() const {
2105 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2106 ->getElementCount()
2107 .getKnownMinValue();
2108 unsigned NumMaskElts = ShuffleMask.size();
2109 return NumSourceElts < NumMaskElts;
2110 }
2111
2112 /// Return true if this shuffle mask chooses elements from exactly one source
2113 /// vector.
2114 /// Example: <7,5,undef,7>
2115 /// This assumes that vector operands are the same length as the mask.
2116 static bool isSingleSourceMask(ArrayRef<int> Mask);
2117 static bool isSingleSourceMask(const Constant *Mask) {
2118 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2118, __extension__ __PRETTY_FUNCTION__
))
;
2119 SmallVector<int, 16> MaskAsInts;
2120 getShuffleMask(Mask, MaskAsInts);
2121 return isSingleSourceMask(MaskAsInts);
2122 }
2123
2124 /// Return true if this shuffle chooses elements from exactly one source
2125 /// vector without changing the length of that vector.
2126 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2127 /// TODO: Optionally allow length-changing shuffles.
2128 bool isSingleSource() const {
2129 return !changesLength() && isSingleSourceMask(ShuffleMask);
2130 }
2131
2132 /// Return true if this shuffle mask chooses elements from exactly one source
2133 /// vector without lane crossings. A shuffle using this mask is not
2134 /// necessarily a no-op because it may change the number of elements from its
2135 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2136 /// Example: <undef,undef,2,3>
2137 static bool isIdentityMask(ArrayRef<int> Mask);
2138 static bool isIdentityMask(const Constant *Mask) {
2139 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2139, __extension__ __PRETTY_FUNCTION__
))
;
2140 SmallVector<int, 16> MaskAsInts;
2141 getShuffleMask(Mask, MaskAsInts);
2142 return isIdentityMask(MaskAsInts);
2143 }
2144
2145 /// Return true if this shuffle chooses elements from exactly one source
2146 /// vector without lane crossings and does not change the number of elements
2147 /// from its input vectors.
2148 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2149 bool isIdentity() const {
2150 return !changesLength() && isIdentityMask(ShuffleMask);
2151 }
2152
2153 /// Return true if this shuffle lengthens exactly one source vector with
2154 /// undefs in the high elements.
2155 bool isIdentityWithPadding() const;
2156
2157 /// Return true if this shuffle extracts the first N elements of exactly one
2158 /// source vector.
2159 bool isIdentityWithExtract() const;
2160
2161 /// Return true if this shuffle concatenates its 2 source vectors. This
2162 /// returns false if either input is undefined. In that case, the shuffle is
2163 /// is better classified as an identity with padding operation.
2164 bool isConcat() const;
2165
2166 /// Return true if this shuffle mask chooses elements from its source vectors
2167 /// without lane crossings. A shuffle using this mask would be
2168 /// equivalent to a vector select with a constant condition operand.
2169 /// Example: <4,1,6,undef>
2170 /// This returns false if the mask does not choose from both input vectors.
2171 /// In that case, the shuffle is better classified as an identity shuffle.
2172 /// This assumes that vector operands are the same length as the mask
2173 /// (a length-changing shuffle can never be equivalent to a vector select).
2174 static bool isSelectMask(ArrayRef<int> Mask);
2175 static bool isSelectMask(const Constant *Mask) {
2176 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2176, __extension__ __PRETTY_FUNCTION__
))
;
2177 SmallVector<int, 16> MaskAsInts;
2178 getShuffleMask(Mask, MaskAsInts);
2179 return isSelectMask(MaskAsInts);
2180 }
2181
2182 /// Return true if this shuffle chooses elements from its source vectors
2183 /// without lane crossings and all operands have the same number of elements.
2184 /// In other words, this shuffle is equivalent to a vector select with a
2185 /// constant condition operand.
2186 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2187 /// This returns false if the mask does not choose from both input vectors.
2188 /// In that case, the shuffle is better classified as an identity shuffle.
2189 /// TODO: Optionally allow length-changing shuffles.
2190 bool isSelect() const {
2191 return !changesLength() && isSelectMask(ShuffleMask);
2192 }
2193
2194 /// Return true if this shuffle mask swaps the order of elements from exactly
2195 /// one source vector.
2196 /// Example: <7,6,undef,4>
2197 /// This assumes that vector operands are the same length as the mask.
2198 static bool isReverseMask(ArrayRef<int> Mask);
2199 static bool isReverseMask(const Constant *Mask) {
2200 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2200, __extension__ __PRETTY_FUNCTION__
))
;
2201 SmallVector<int, 16> MaskAsInts;
2202 getShuffleMask(Mask, MaskAsInts);
2203 return isReverseMask(MaskAsInts);
2204 }
2205
2206 /// Return true if this shuffle swaps the order of elements from exactly
2207 /// one source vector.
2208 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2209 /// TODO: Optionally allow length-changing shuffles.
2210 bool isReverse() const {
2211 return !changesLength() && isReverseMask(ShuffleMask);
2212 }
2213
2214 /// Return true if this shuffle mask chooses all elements with the same value
2215 /// as the first element of exactly one source vector.
2216 /// Example: <4,undef,undef,4>
2217 /// This assumes that vector operands are the same length as the mask.
2218 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2219 static bool isZeroEltSplatMask(const Constant *Mask) {
2220 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2220, __extension__ __PRETTY_FUNCTION__
))
;
2221 SmallVector<int, 16> MaskAsInts;
2222 getShuffleMask(Mask, MaskAsInts);
2223 return isZeroEltSplatMask(MaskAsInts);
2224 }
2225
2226 /// Return true if all elements of this shuffle are the same value as the
2227 /// first element of exactly one source vector without changing the length
2228 /// of that vector.
2229 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2230 /// TODO: Optionally allow length-changing shuffles.
2231 /// TODO: Optionally allow splats from other elements.
2232 bool isZeroEltSplat() const {
2233 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2234 }
2235
2236 /// Return true if this shuffle mask is a transpose mask.
2237 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2238 /// even- or odd-numbered vector elements from two n-dimensional source
2239 /// vectors and write each result into consecutive elements of an
2240 /// n-dimensional destination vector. Two shuffles are necessary to complete
2241 /// the transpose, one for the even elements and another for the odd elements.
2242 /// This description closely follows how the TRN1 and TRN2 AArch64
2243 /// instructions operate.
2244 ///
2245 /// For example, a simple 2x2 matrix can be transposed with:
2246 ///
2247 /// ; Original matrix
2248 /// m0 = < a, b >
2249 /// m1 = < c, d >
2250 ///
2251 /// ; Transposed matrix
2252 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2253 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2254 ///
2255 /// For matrices having greater than n columns, the resulting nx2 transposed
2256 /// matrix is stored in two result vectors such that one vector contains
2257 /// interleaved elements from all the even-numbered rows and the other vector
2258 /// contains interleaved elements from all the odd-numbered rows. For example,
2259 /// a 2x4 matrix can be transposed with:
2260 ///
2261 /// ; Original matrix
2262 /// m0 = < a, b, c, d >
2263 /// m1 = < e, f, g, h >
2264 ///
2265 /// ; Transposed matrix
2266 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2267 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2268 static bool isTransposeMask(ArrayRef<int> Mask);
2269 static bool isTransposeMask(const Constant *Mask) {
2270 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2270, __extension__ __PRETTY_FUNCTION__
))
;
2271 SmallVector<int, 16> MaskAsInts;
2272 getShuffleMask(Mask, MaskAsInts);
2273 return isTransposeMask(MaskAsInts);
2274 }
2275
2276 /// Return true if this shuffle transposes the elements of its inputs without
2277 /// changing the length of the vectors. This operation may also be known as a
2278 /// merge or interleave. See the description for isTransposeMask() for the
2279 /// exact specification.
2280 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2281 bool isTranspose() const {
2282 return !changesLength() && isTransposeMask(ShuffleMask);
2283 }
2284
2285 /// Return true if this shuffle mask is an extract subvector mask.
2286 /// A valid extract subvector mask returns a smaller vector from a single
2287 /// source operand. The base extraction index is returned as well.
2288 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2289 int &Index);
2290 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2291 int &Index) {
2292 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2292, __extension__ __PRETTY_FUNCTION__
))
;
2293 // Not possible to express a shuffle mask for a scalable vector for this
2294 // case.
2295 if (isa<ScalableVectorType>(Mask->getType()))
2296 return false;
2297 SmallVector<int, 16> MaskAsInts;
2298 getShuffleMask(Mask, MaskAsInts);
2299 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2300 }
2301
2302 /// Return true if this shuffle mask is an extract subvector mask.
2303 bool isExtractSubvectorMask(int &Index) const {
2304 // Not possible to express a shuffle mask for a scalable vector for this
2305 // case.
2306 if (isa<ScalableVectorType>(getType()))
2307 return false;
2308
2309 int NumSrcElts =
2310 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2311 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2312 }
2313
2314 /// Return true if this shuffle mask is an insert subvector mask.
2315 /// A valid insert subvector mask inserts the lowest elements of a second
2316 /// source operand into an in-place first source operand operand.
2317 /// Both the sub vector width and the insertion index is returned.
2318 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2319 int &NumSubElts, int &Index);
2320 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2321 int &NumSubElts, int &Index) {
2322 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2322, __extension__ __PRETTY_FUNCTION__
))
;
2323 // Not possible to express a shuffle mask for a scalable vector for this
2324 // case.
2325 if (isa<ScalableVectorType>(Mask->getType()))
2326 return false;
2327 SmallVector<int, 16> MaskAsInts;
2328 getShuffleMask(Mask, MaskAsInts);
2329 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2330 }
2331
2332 /// Return true if this shuffle mask is an insert subvector mask.
2333 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2334 // Not possible to express a shuffle mask for a scalable vector for this
2335 // case.
2336 if (isa<ScalableVectorType>(getType()))
2337 return false;
2338
2339 int NumSrcElts =
2340 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2341 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2342 }
2343
2344 /// Return true if this shuffle mask replicates each of the \p VF elements
2345 /// in a vector \p ReplicationFactor times.
2346 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2347 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2348 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2349 int &VF);
2350 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2351 int &VF) {
2352 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2352, __extension__ __PRETTY_FUNCTION__
))
;
2353 // Not possible to express a shuffle mask for a scalable vector for this
2354 // case.
2355 if (isa<ScalableVectorType>(Mask->getType()))
2356 return false;
2357 SmallVector<int, 16> MaskAsInts;
2358 getShuffleMask(Mask, MaskAsInts);
2359 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2360 }
2361
2362 /// Return true if this shuffle mask is a replication mask.
2363 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2364
2365 /// Change values in a shuffle permute mask assuming the two vector operands
2366 /// of length InVecNumElts have swapped position.
2367 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2368 unsigned InVecNumElts) {
2369 for (int &Idx : Mask) {
2370 if (Idx == -1)
2371 continue;
2372 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2373 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2374, __extension__ __PRETTY_FUNCTION__
))
2374 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2374, __extension__ __PRETTY_FUNCTION__
))
;
2375 }
2376 }
2377
2378 // Methods for support type inquiry through isa, cast, and dyn_cast:
2379 static bool classof(const Instruction *I) {
2380 return I->getOpcode() == Instruction::ShuffleVector;
2381 }
2382 static bool classof(const Value *V) {
2383 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2384 }
2385};
2386
2387template <>
2388struct OperandTraits<ShuffleVectorInst>
2389 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2390
2391DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2391, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst
>::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture
].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2391, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands
() const { return OperandTraits<ShuffleVectorInst>::operands
(this); } template <int Idx_nocapture> Use &ShuffleVectorInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ShuffleVectorInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2392
2393//===----------------------------------------------------------------------===//
2394// ExtractValueInst Class
2395//===----------------------------------------------------------------------===//
2396
2397/// This instruction extracts a struct member or array
2398/// element value from an aggregate value.
2399///
2400class ExtractValueInst : public UnaryInstruction {
2401 SmallVector<unsigned, 4> Indices;
2402
2403 ExtractValueInst(const ExtractValueInst &EVI);
2404
2405 /// Constructors - Create a extractvalue instruction with a base aggregate
2406 /// value and a list of indices. The first ctor can optionally insert before
2407 /// an existing instruction, the second appends the new instruction to the
2408 /// specified BasicBlock.
2409 inline ExtractValueInst(Value *Agg,
2410 ArrayRef<unsigned> Idxs,
2411 const Twine &NameStr,
2412 Instruction *InsertBefore);
2413 inline ExtractValueInst(Value *Agg,
2414 ArrayRef<unsigned> Idxs,
2415 const Twine &NameStr, BasicBlock *InsertAtEnd);
2416
2417 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2418
2419protected:
2420 // Note: Instruction needs to be a friend here to call cloneImpl.
2421 friend class Instruction;
2422
2423 ExtractValueInst *cloneImpl() const;
2424
2425public:
2426 static ExtractValueInst *Create(Value *Agg,
2427 ArrayRef<unsigned> Idxs,
2428 const Twine &NameStr = "",
2429 Instruction *InsertBefore = nullptr) {
2430 return new
2431 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2432 }
2433
2434 static ExtractValueInst *Create(Value *Agg,
2435 ArrayRef<unsigned> Idxs,
2436 const Twine &NameStr,
2437 BasicBlock *InsertAtEnd) {
2438 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2439 }
2440
2441 /// Returns the type of the element that would be extracted
2442 /// with an extractvalue instruction with the specified parameters.
2443 ///
2444 /// Null is returned if the indices are invalid for the specified type.
2445 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2446
2447 using idx_iterator = const unsigned*;
2448
2449 inline idx_iterator idx_begin() const { return Indices.begin(); }
2450 inline idx_iterator idx_end() const { return Indices.end(); }
2451 inline iterator_range<idx_iterator> indices() const {
2452 return make_range(idx_begin(), idx_end());
2453 }
2454
2455 Value *getAggregateOperand() {
2456 return getOperand(0);
2457 }
2458 const Value *getAggregateOperand() const {
2459 return getOperand(0);
2460 }
2461 static unsigned getAggregateOperandIndex() {
2462 return 0U; // get index for modifying correct operand
2463 }
2464
2465 ArrayRef<unsigned> getIndices() const {
2466 return Indices;
2467 }
2468
2469 unsigned getNumIndices() const {
2470 return (unsigned)Indices.size();
2471 }
2472
2473 bool hasIndices() const {
2474 return true;
2475 }
2476
2477 // Methods for support type inquiry through isa, cast, and dyn_cast:
2478 static bool classof(const Instruction *I) {
2479 return I->getOpcode() == Instruction::ExtractValue;
2480 }
2481 static bool classof(const Value *V) {
2482 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2483 }
2484};
2485
2486ExtractValueInst::ExtractValueInst(Value *Agg,
2487 ArrayRef<unsigned> Idxs,
2488 const Twine &NameStr,
2489 Instruction *InsertBefore)
2490 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2491 ExtractValue, Agg, InsertBefore) {
2492 init(Idxs, NameStr);
2493}
2494
2495ExtractValueInst::ExtractValueInst(Value *Agg,
2496 ArrayRef<unsigned> Idxs,
2497 const Twine &NameStr,
2498 BasicBlock *InsertAtEnd)
2499 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2500 ExtractValue, Agg, InsertAtEnd) {
2501 init(Idxs, NameStr);
2502}
2503
2504//===----------------------------------------------------------------------===//
2505// InsertValueInst Class
2506//===----------------------------------------------------------------------===//
2507
2508/// This instruction inserts a struct field of array element
2509/// value into an aggregate value.
2510///
2511class InsertValueInst : public Instruction {
2512 SmallVector<unsigned, 4> Indices;
2513
2514 InsertValueInst(const InsertValueInst &IVI);
2515
2516 /// Constructors - Create a insertvalue instruction with a base aggregate
2517 /// value, a value to insert, and a list of indices. The first ctor can
2518 /// optionally insert before an existing instruction, the second appends
2519 /// the new instruction to the specified BasicBlock.
2520 inline InsertValueInst(Value *Agg, Value *Val,
2521 ArrayRef<unsigned> Idxs,
2522 const Twine &NameStr,
2523 Instruction *InsertBefore);
2524 inline InsertValueInst(Value *Agg, Value *Val,
2525 ArrayRef<unsigned> Idxs,
2526 const Twine &NameStr, BasicBlock *InsertAtEnd);
2527
2528 /// Constructors - These two constructors are convenience methods because one
2529 /// and two index insertvalue instructions are so common.
2530 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2531 const Twine &NameStr = "",
2532 Instruction *InsertBefore = nullptr);
2533 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2534 BasicBlock *InsertAtEnd);
2535
2536 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2537 const Twine &NameStr);
2538
2539protected:
2540 // Note: Instruction needs to be a friend here to call cloneImpl.
2541 friend class Instruction;
2542
2543 InsertValueInst *cloneImpl() const;
2544
2545public:
2546 // allocate space for exactly two operands
2547 void *operator new(size_t S) { return User::operator new(S, 2); }
2548 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2549
2550 static InsertValueInst *Create(Value *Agg, Value *Val,
2551 ArrayRef<unsigned> Idxs,
2552 const Twine &NameStr = "",
2553 Instruction *InsertBefore = nullptr) {
2554 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore);
2555 }
2556
2557 static InsertValueInst *Create(Value *Agg, Value *Val,
2558 ArrayRef<unsigned> Idxs,
2559 const Twine &NameStr,
2560 BasicBlock *InsertAtEnd) {
2561 return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd);
2562 }
2563
2564 /// Transparently provide more efficient getOperand methods.
2565 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2566
2567 using idx_iterator = const unsigned*;
2568
2569 inline idx_iterator idx_begin() const { return Indices.begin(); }
2570 inline idx_iterator idx_end() const { return Indices.end(); }
2571 inline iterator_range<idx_iterator> indices() const {
2572 return make_range(idx_begin(), idx_end());
2573 }
2574
2575 Value *getAggregateOperand() {
2576 return getOperand(0);
2577 }
2578 const Value *getAggregateOperand() const {
2579 return getOperand(0);
2580 }
2581 static unsigned getAggregateOperandIndex() {
2582 return 0U; // get index for modifying correct operand
2583 }
2584
2585 Value *getInsertedValueOperand() {
2586 return getOperand(1);
2587 }
2588 const Value *getInsertedValueOperand() const {
2589 return getOperand(1);
2590 }
2591 static unsigned getInsertedValueOperandIndex() {
2592 return 1U; // get index for modifying correct operand
2593 }
2594
2595 ArrayRef<unsigned> getIndices() const {
2596 return Indices;
2597 }
2598
2599 unsigned getNumIndices() const {
2600 return (unsigned)Indices.size();
2601 }
2602
2603 bool hasIndices() const {
2604 return true;
2605 }
2606
2607 // Methods for support type inquiry through isa, cast, and dyn_cast:
2608 static bool classof(const Instruction *I) {
2609 return I->getOpcode() == Instruction::InsertValue;
2610 }
2611 static bool classof(const Value *V) {
2612 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2613 }
2614};
2615
2616template <>
2617struct OperandTraits<InsertValueInst> :
2618 public FixedNumOperandTraits<InsertValueInst, 2> {
2619};
2620
2621InsertValueInst::InsertValueInst(Value *Agg,
2622 Value *Val,
2623 ArrayRef<unsigned> Idxs,
2624 const Twine &NameStr,
2625 Instruction *InsertBefore)
2626 : Instruction(Agg->getType(), InsertValue,
2627 OperandTraits<InsertValueInst>::op_begin(this),
2628 2, InsertBefore) {
2629 init(Agg, Val, Idxs, NameStr);
2630}
2631
2632InsertValueInst::InsertValueInst(Value *Agg,
2633 Value *Val,
2634 ArrayRef<unsigned> Idxs,
2635 const Twine &NameStr,
2636 BasicBlock *InsertAtEnd)
2637 : Instruction(Agg->getType(), InsertValue,
2638 OperandTraits<InsertValueInst>::op_begin(this),
2639 2, InsertAtEnd) {
2640 init(Agg, Val, Idxs, NameStr);
2641}
2642
2643DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return
OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst
::const_op_iterator InsertValueInst::op_begin() const { return
OperandTraits<InsertValueInst>::op_begin(const_cast<
InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst
::op_end() { return OperandTraits<InsertValueInst>::op_end
(this); } InsertValueInst::const_op_iterator InsertValueInst::
op_end() const { return OperandTraits<InsertValueInst>::
op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<InsertValueInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2643, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertValueInst
>::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture
].get()); } void InsertValueInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertValueInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2643, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertValueInst::getNumOperands
() const { return OperandTraits<InsertValueInst>::operands
(this); } template <int Idx_nocapture> Use &InsertValueInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertValueInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2644
2645//===----------------------------------------------------------------------===//
2646// PHINode Class
2647//===----------------------------------------------------------------------===//
2648
2649// PHINode - The PHINode class is used to represent the magical mystical PHI
2650// node, that can not exist in nature, but can be synthesized in a computer
2651// scientist's overactive imagination.
2652//
2653class PHINode : public Instruction {
2654 /// The number of operands actually allocated. NumOperands is
2655 /// the number actually in use.
2656 unsigned ReservedSpace;
2657
2658 PHINode(const PHINode &PN);
2659
2660 explicit PHINode(Type *Ty, unsigned NumReservedValues,
2661 const Twine &NameStr = "",
2662 Instruction *InsertBefore = nullptr)
2663 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore),
2664 ReservedSpace(NumReservedValues) {
2665 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2665, __extension__ __PRETTY_FUNCTION__
))
;
2666 setName(NameStr);
2667 allocHungoffUses(ReservedSpace);
2668 }
2669
2670 PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr,
2671 BasicBlock *InsertAtEnd)
2672 : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd),
2673 ReservedSpace(NumReservedValues) {
2674 assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!"
) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\""
, "llvm/include/llvm/IR/Instructions.h", 2674, __extension__ __PRETTY_FUNCTION__
))
;
2675 setName(NameStr);
2676 allocHungoffUses(ReservedSpace);
2677 }
2678
2679protected:
2680 // Note: Instruction needs to be a friend here to call cloneImpl.
2681 friend class Instruction;
2682
2683 PHINode *cloneImpl() const;
2684
2685 // allocHungoffUses - this is more complicated than the generic
2686 // User::allocHungoffUses, because we have to allocate Uses for the incoming
2687 // values and pointers to the incoming blocks, all in one allocation.
2688 void allocHungoffUses(unsigned N) {
2689 User::allocHungoffUses(N, /* IsPhi */ true);
2690 }
2691
2692public:
2693 /// Constructors - NumReservedValues is a hint for the number of incoming
2694 /// edges that this phi node will have (use 0 if you really have no idea).
2695 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2696 const Twine &NameStr = "",
2697 Instruction *InsertBefore = nullptr) {
2698 return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore);
2699 }
2700
2701 static PHINode *Create(Type *Ty, unsigned NumReservedValues,
2702 const Twine &NameStr, BasicBlock *InsertAtEnd) {
2703 return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd);
2704 }
2705
2706 /// Provide fast operand accessors
2707 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2708
2709 // Block iterator interface. This provides access to the list of incoming
2710 // basic blocks, which parallels the list of incoming values.
2711
2712 using block_iterator = BasicBlock **;
2713 using const_block_iterator = BasicBlock * const *;
2714
2715 block_iterator block_begin() {
2716 return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace);
2717 }
2718
2719 const_block_iterator block_begin() const {
2720 return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace);
2721 }
2722
2723 block_iterator block_end() {
2724 return block_begin() + getNumOperands();
2725 }
2726
2727 const_block_iterator block_end() const {
2728 return block_begin() + getNumOperands();
2729 }
2730
2731 iterator_range<block_iterator> blocks() {
2732 return make_range(block_begin(), block_end());
2733 }
2734
2735 iterator_range<const_block_iterator> blocks() const {
2736 return make_range(block_begin(), block_end());
2737 }
2738
2739 op_range incoming_values() { return operands(); }
2740
2741 const_op_range incoming_values() const { return operands(); }
2742
2743 /// Return the number of incoming edges
2744 ///
2745 unsigned getNumIncomingValues() const { return getNumOperands(); }
2746
2747 /// Return incoming value number x
2748 ///
2749 Value *getIncomingValue(unsigned i) const {
2750 return getOperand(i);
2751 }
2752 void setIncomingValue(unsigned i, Value *V) {
2753 assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!"
) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\""
, "llvm/include/llvm/IR/Instructions.h", 2753, __extension__ __PRETTY_FUNCTION__
))
;
2754 assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2755, __extension__ __PRETTY_FUNCTION__
))
2755 "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() &&
"All operands to PHI node must be the same type as the PHI node!"
) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\""
, "llvm/include/llvm/IR/Instructions.h", 2755, __extension__ __PRETTY_FUNCTION__
))
;
2756 setOperand(i, V);
2757 }
2758
2759 static unsigned getOperandNumForIncomingValue(unsigned i) {
2760 return i;
2761 }
2762
2763 static unsigned getIncomingValueNumForOperand(unsigned i) {
2764 return i;
2765 }
2766
2767 /// Return incoming basic block number @p i.
2768 ///
2769 BasicBlock *getIncomingBlock(unsigned i) const {
2770 return block_begin()[i];
2771 }
2772
2773 /// Return incoming basic block corresponding
2774 /// to an operand of the PHI.
2775 ///
2776 BasicBlock *getIncomingBlock(const Use &U) const {
2777 assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?"
) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\""
, "llvm/include/llvm/IR/Instructions.h", 2777, __extension__ __PRETTY_FUNCTION__
))
;
2778 return getIncomingBlock(unsigned(&U - op_begin()));
2779 }
2780
2781 /// Return incoming basic block corresponding
2782 /// to value use iterator.
2783 ///
2784 BasicBlock *getIncomingBlock(Value::const_user_iterator I) const {
2785 return getIncomingBlock(I.getUse());
2786 }
2787
2788 void setIncomingBlock(unsigned i, BasicBlock *BB) {
2789 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2789, __extension__ __PRETTY_FUNCTION__
))
;
2790 block_begin()[i] = BB;
2791 }
2792
2793 /// Replace every incoming basic block \p Old to basic block \p New.
2794 void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) {
2795 assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2795, __extension__ __PRETTY_FUNCTION__
))
;
2796 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2797 if (getIncomingBlock(Op) == Old)
2798 setIncomingBlock(Op, New);
2799 }
2800
2801 /// Add an incoming value to the end of the PHI list
2802 ///
2803 void addIncoming(Value *V, BasicBlock *BB) {
2804 if (getNumOperands() == ReservedSpace)
2805 growOperands(); // Get more space!
2806 // Initialize some new operands.
2807 setNumHungOffUseOperands(getNumOperands() + 1);
2808 setIncomingValue(getNumOperands() - 1, V);
2809 setIncomingBlock(getNumOperands() - 1, BB);
2810 }
2811
2812 /// Remove an incoming value. This is useful if a
2813 /// predecessor basic block is deleted. The value removed is returned.
2814 ///
2815 /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty
2816 /// is true), the PHI node is destroyed and any uses of it are replaced with
2817 /// dummy values. The only time there should be zero incoming values to a PHI
2818 /// node is when the block is dead, so this strategy is sound.
2819 ///
2820 Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true);
2821
2822 Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) {
2823 int Idx = getBasicBlockIndex(BB);
2824 assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\""
, "llvm/include/llvm/IR/Instructions.h", 2824, __extension__ __PRETTY_FUNCTION__
))
;
2825 return removeIncomingValue(Idx, DeletePHIIfEmpty);
2826 }
2827
2828 /// Return the first index of the specified basic
2829 /// block in the value list for this PHI. Returns -1 if no instance.
2830 ///
2831 int getBasicBlockIndex(const BasicBlock *BB) const {
2832 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
2833 if (block_begin()[i] == BB)
2834 return i;
2835 return -1;
2836 }
2837
2838 Value *getIncomingValueForBlock(const BasicBlock *BB) const {
2839 int Idx = getBasicBlockIndex(BB);
2840 assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!"
) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\""
, "llvm/include/llvm/IR/Instructions.h", 2840, __extension__ __PRETTY_FUNCTION__
))
;
2841 return getIncomingValue(Idx);
2842 }
2843
2844 /// Set every incoming value(s) for block \p BB to \p V.
2845 void setIncomingValueForBlock(const BasicBlock *BB, Value *V) {
2846 assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!"
) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\""
, "llvm/include/llvm/IR/Instructions.h", 2846, __extension__ __PRETTY_FUNCTION__
))
;
2847 bool Found = false;
2848 for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op)
2849 if (getIncomingBlock(Op) == BB) {
2850 Found = true;
2851 setIncomingValue(Op, V);
2852 }
2853 (void)Found;
2854 assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!"
) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\""
, "llvm/include/llvm/IR/Instructions.h", 2854, __extension__ __PRETTY_FUNCTION__
))
;
2855 }
2856
2857 /// If the specified PHI node always merges together the
2858 /// same value, return the value, otherwise return null.
2859 Value *hasConstantValue() const;
2860
2861 /// Whether the specified PHI node always merges
2862 /// together the same value, assuming undefs are equal to a unique
2863 /// non-undef value.
2864 bool hasConstantOrUndefValue() const;
2865
2866 /// If the PHI node is complete which means all of its parent's predecessors
2867 /// have incoming value in this PHI, return true, otherwise return false.
2868 bool isComplete() const {
2869 return llvm::all_of(predecessors(getParent()),
2870 [this](const BasicBlock *Pred) {
2871 return getBasicBlockIndex(Pred) >= 0;
2872 });
2873 }
2874
2875 /// Methods for support type inquiry through isa, cast, and dyn_cast:
2876 static bool classof(const Instruction *I) {
2877 return I->getOpcode() == Instruction::PHI;
2878 }
2879 static bool classof(const Value *V) {
2880 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2881 }
2882
2883private:
2884 void growOperands();
2885};
2886
2887template <>
2888struct OperandTraits<PHINode> : public HungoffOperandTraits<2> {
2889};
2890
2891DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits
<PHINode>::op_begin(this); } PHINode::const_op_iterator
PHINode::op_begin() const { return OperandTraits<PHINode>
::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator
PHINode::op_end() { return OperandTraits<PHINode>::op_end
(this); } PHINode::const_op_iterator PHINode::op_end() const {
return OperandTraits<PHINode>::op_end(const_cast<PHINode
*>(this)); } Value *PHINode::getOperand(unsigned i_nocapture
) const { (static_cast <bool> (i_nocapture < OperandTraits
<PHINode>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2891, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<PHINode
>::op_begin(const_cast<PHINode*>(this))[i_nocapture]
.get()); } void PHINode::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<PHINode>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2891, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<PHINode>::op_begin(this)[i_nocapture]
= Val_nocapture; } unsigned PHINode::getNumOperands() const {
return OperandTraits<PHINode>::operands(this); } template
<int Idx_nocapture> Use &PHINode::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &PHINode::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
2892
2893//===----------------------------------------------------------------------===//
2894// LandingPadInst Class
2895//===----------------------------------------------------------------------===//
2896
2897//===---------------------------------------------------------------------------
2898/// The landingpad instruction holds all of the information
2899/// necessary to generate correct exception handling. The landingpad instruction
2900/// cannot be moved from the top of a landing pad block, which itself is
2901/// accessible only from the 'unwind' edge of an invoke. This uses the
2902/// SubclassData field in Value to store whether or not the landingpad is a
2903/// cleanup.
2904///
2905class LandingPadInst : public Instruction {
2906 using CleanupField = BoolBitfieldElementT<0>;
2907
2908 /// The number of operands actually allocated. NumOperands is
2909 /// the number actually in use.
2910 unsigned ReservedSpace;
2911
2912 LandingPadInst(const LandingPadInst &LP);
2913
2914public:
2915 enum ClauseType { Catch, Filter };
2916
2917private:
2918 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2919 const Twine &NameStr, Instruction *InsertBefore);
2920 explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues,
2921 const Twine &NameStr, BasicBlock *InsertAtEnd);
2922
2923 // Allocate space for exactly zero operands.
2924 void *operator new(size_t S) { return User::operator new(S); }
2925
2926 void growOperands(unsigned Size);
2927 void init(unsigned NumReservedValues, const Twine &NameStr);
2928
2929protected:
2930 // Note: Instruction needs to be a friend here to call cloneImpl.
2931 friend class Instruction;
2932
2933 LandingPadInst *cloneImpl() const;
2934
2935public:
2936 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2937
2938 /// Constructors - NumReservedClauses is a hint for the number of incoming
2939 /// clauses that this landingpad will have (use 0 if you really have no idea).
2940 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2941 const Twine &NameStr = "",
2942 Instruction *InsertBefore = nullptr);
2943 static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
2944 const Twine &NameStr, BasicBlock *InsertAtEnd);
2945
2946 /// Provide fast operand accessors
2947 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2948
2949 /// Return 'true' if this landingpad instruction is a
2950 /// cleanup. I.e., it should be run when unwinding even if its landing pad
2951 /// doesn't catch the exception.
2952 bool isCleanup() const { return getSubclassData<CleanupField>(); }
2953
2954 /// Indicate that this landingpad instruction is a cleanup.
2955 void setCleanup(bool V) { setSubclassData<CleanupField>(V); }
2956
2957 /// Add a catch or filter clause to the landing pad.
2958 void addClause(Constant *ClauseVal);
2959
2960 /// Get the value of the clause at index Idx. Use isCatch/isFilter to
2961 /// determine what type of clause this is.
2962 Constant *getClause(unsigned Idx) const {
2963 return cast<Constant>(getOperandList()[Idx]);
2964 }
2965
2966 /// Return 'true' if the clause and index Idx is a catch clause.
2967 bool isCatch(unsigned Idx) const {
2968 return !isa<ArrayType>(getOperandList()[Idx]->getType());
2969 }
2970
2971 /// Return 'true' if the clause and index Idx is a filter clause.
2972 bool isFilter(unsigned Idx) const {
2973 return isa<ArrayType>(getOperandList()[Idx]->getType());
2974 }
2975
2976 /// Get the number of clauses for this landing pad.
2977 unsigned getNumClauses() const { return getNumOperands(); }
2978
2979 /// Grow the size of the operand list to accommodate the new
2980 /// number of clauses.
2981 void reserveClauses(unsigned Size) { growOperands(Size); }
2982
2983 // Methods for support type inquiry through isa, cast, and dyn_cast:
2984 static bool classof(const Instruction *I) {
2985 return I->getOpcode() == Instruction::LandingPad;
2986 }
2987 static bool classof(const Value *V) {
2988 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2989 }
2990};
2991
2992template <>
2993struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> {
2994};
2995
2996DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return
OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst
::const_op_iterator LandingPadInst::op_begin() const { return
OperandTraits<LandingPadInst>::op_begin(const_cast<
LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst
::op_end() { return OperandTraits<LandingPadInst>::op_end
(this); } LandingPadInst::const_op_iterator LandingPadInst::op_end
() const { return OperandTraits<LandingPadInst>::op_end
(const_cast<LandingPadInst*>(this)); } Value *LandingPadInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<LandingPadInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2996, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<LandingPadInst
>::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture
].get()); } void LandingPadInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<LandingPadInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2996, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned LandingPadInst::getNumOperands(
) const { return OperandTraits<LandingPadInst>::operands
(this); } template <int Idx_nocapture> Use &LandingPadInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &LandingPadInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2997
2998//===----------------------------------------------------------------------===//
2999// ReturnInst Class
3000//===----------------------------------------------------------------------===//
3001
3002//===---------------------------------------------------------------------------
3003/// Return a value (possibly void), from a function. Execution
3004/// does not continue in this function any longer.
3005///
3006class ReturnInst : public Instruction {
3007 ReturnInst(const ReturnInst &RI);
3008
3009private:
3010 // ReturnInst constructors:
3011 // ReturnInst() - 'ret void' instruction
3012 // ReturnInst( null) - 'ret void' instruction
3013 // ReturnInst(Value* X) - 'ret X' instruction
3014 // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I
3015 // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I
3016 // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B
3017 // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B
3018 //
3019 // NOTE: If the Value* passed is of type void then the constructor behaves as
3020 // if it was passed NULL.
3021 explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr,
3022 Instruction *InsertBefore = nullptr);
3023 ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd);
3024 explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd);
3025
3026protected:
3027 // Note: Instruction needs to be a friend here to call cloneImpl.
3028 friend class Instruction;
3029
3030 ReturnInst *cloneImpl() const;
3031
3032public:
3033 static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr,
3034 Instruction *InsertBefore = nullptr) {
3035 return new(!!retVal) ReturnInst(C, retVal, InsertBefore);
3036 }
3037
3038 static ReturnInst* Create(LLVMContext &C, Value *retVal,
3039 BasicBlock *InsertAtEnd) {
3040 return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd);
3041 }
3042
3043 static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) {
3044 return new(0) ReturnInst(C, InsertAtEnd);
3045 }
3046
3047 /// Provide fast operand accessors
3048 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3049
3050 /// Convenience accessor. Returns null if there is no return value.
3051 Value *getReturnValue() const {
3052 return getNumOperands() != 0 ? getOperand(0) : nullptr;
3053 }
3054
3055 unsigned getNumSuccessors() const { return 0; }
3056
3057 // Methods for support type inquiry through isa, cast, and dyn_cast:
3058 static bool classof(const Instruction *I) {
3059 return (I->getOpcode() == Instruction::Ret);
3060 }
3061 static bool classof(const Value *V) {
3062 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3063 }
3064
3065private:
3066 BasicBlock *getSuccessor(unsigned idx) const {
3067 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3067)
;
3068 }
3069
3070 void setSuccessor(unsigned idx, BasicBlock *B) {
3071 llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 3071)
;
3072 }
3073};
3074
3075template <>
3076struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> {
3077};
3078
3079DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits
<ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator
ReturnInst::op_begin() const { return OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst
::op_iterator ReturnInst::op_end() { return OperandTraits<
ReturnInst>::op_end(this); } ReturnInst::const_op_iterator
ReturnInst::op_end() const { return OperandTraits<ReturnInst
>::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ReturnInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3079, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ReturnInst
>::op_begin(const_cast<ReturnInst*>(this))[i_nocapture
].get()); } void ReturnInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3079, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const
{ return OperandTraits<ReturnInst>::operands(this); } template
<int Idx_nocapture> Use &ReturnInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ReturnInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3080
3081//===----------------------------------------------------------------------===//
3082// BranchInst Class
3083//===----------------------------------------------------------------------===//
3084
3085//===---------------------------------------------------------------------------
3086/// Conditional or Unconditional Branch instruction.
3087///
3088class BranchInst : public Instruction {
3089 /// Ops list - Branches are strange. The operands are ordered:
3090 /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because
3091 /// they don't have to check for cond/uncond branchness. These are mostly
3092 /// accessed relative from op_end().
3093 BranchInst(const BranchInst &BI);
3094 // BranchInst constructors (where {B, T, F} are blocks, and C is a condition):
3095 // BranchInst(BB *B) - 'br B'
3096 // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F'
3097 // BranchInst(BB* B, Inst *I) - 'br B' insert before I
3098 // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I
3099 // BranchInst(BB* B, BB *I) - 'br B' insert at end
3100 // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end
3101 explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr);
3102 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3103 Instruction *InsertBefore = nullptr);
3104 BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd);
3105 BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond,
3106 BasicBlock *InsertAtEnd);
3107
3108 void AssertOK();
3109
3110protected:
3111 // Note: Instruction needs to be a friend here to call cloneImpl.
3112 friend class Instruction;
3113
3114 BranchInst *cloneImpl() const;
3115
3116public:
3117 /// Iterator type that casts an operand to a basic block.
3118 ///
3119 /// This only makes sense because the successors are stored as adjacent
3120 /// operands for branch instructions.
3121 struct succ_op_iterator
3122 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3123 std::random_access_iterator_tag, BasicBlock *,
3124 ptrdiff_t, BasicBlock *, BasicBlock *> {
3125 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3126
3127 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3128 BasicBlock *operator->() const { return operator*(); }
3129 };
3130
3131 /// The const version of `succ_op_iterator`.
3132 struct const_succ_op_iterator
3133 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3134 std::random_access_iterator_tag,
3135 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3136 const BasicBlock *> {
3137 explicit const_succ_op_iterator(const_value_op_iterator I)
3138 : iterator_adaptor_base(I) {}
3139
3140 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3141 const BasicBlock *operator->() const { return operator*(); }
3142 };
3143
3144 static BranchInst *Create(BasicBlock *IfTrue,
3145 Instruction *InsertBefore = nullptr) {
3146 return new(1) BranchInst(IfTrue, InsertBefore);
3147 }
3148
3149 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3150 Value *Cond, Instruction *InsertBefore = nullptr) {
3151 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore);
3152 }
3153
3154 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) {
3155 return new(1) BranchInst(IfTrue, InsertAtEnd);
3156 }
3157
3158 static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse,
3159 Value *Cond, BasicBlock *InsertAtEnd) {
3160 return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd);
3161 }
3162
3163 /// Transparently provide more efficient getOperand methods.
3164 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3165
3166 bool isUnconditional() const { return getNumOperands() == 1; }
3167 bool isConditional() const { return getNumOperands() == 3; }
3168
3169 Value *getCondition() const {
3170 assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3170, __extension__ __PRETTY_FUNCTION__
))
;
3171 return Op<-3>();
3172 }
3173
3174 void setCondition(Value *V) {
3175 assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!"
) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3175, __extension__ __PRETTY_FUNCTION__
))
;
3176 Op<-3>() = V;
3177 }
3178
3179 unsigned getNumSuccessors() const { return 1+isConditional(); }
3180
3181 BasicBlock *getSuccessor(unsigned i) const {
3182 assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("i < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3182, __extension__ __PRETTY_FUNCTION__
))
;
3183 return cast_or_null<BasicBlock>((&Op<-1>() - i)->get());
3184 }
3185
3186 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3187 assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for Branch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for Branch!\""
, "llvm/include/llvm/IR/Instructions.h", 3187, __extension__ __PRETTY_FUNCTION__
))
;
3188 *(&Op<-1>() - idx) = NewSucc;
3189 }
3190
3191 /// Swap the successors of this branch instruction.
3192 ///
3193 /// Swaps the successors of the branch instruction. This also swaps any
3194 /// branch weight metadata associated with the instruction so that it
3195 /// continues to map correctly to each operand.
3196 void swapSuccessors();
3197
3198 iterator_range<succ_op_iterator> successors() {
3199 return make_range(
3200 succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)),
3201 succ_op_iterator(value_op_end()));
3202 }
3203
3204 iterator_range<const_succ_op_iterator> successors() const {
3205 return make_range(const_succ_op_iterator(
3206 std::next(value_op_begin(), isConditional() ? 1 : 0)),
3207 const_succ_op_iterator(value_op_end()));
3208 }
3209
3210 // Methods for support type inquiry through isa, cast, and dyn_cast:
3211 static bool classof(const Instruction *I) {
3212 return (I->getOpcode() == Instruction::Br);
3213 }
3214 static bool classof(const Value *V) {
3215 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3216 }
3217};
3218
3219template <>
3220struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> {
3221};
3222
3223DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits
<BranchInst>::op_begin(this); } BranchInst::const_op_iterator
BranchInst::op_begin() const { return OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this)); } BranchInst
::op_iterator BranchInst::op_end() { return OperandTraits<
BranchInst>::op_end(this); } BranchInst::const_op_iterator
BranchInst::op_end() const { return OperandTraits<BranchInst
>::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<BranchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3223, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<BranchInst
>::op_begin(const_cast<BranchInst*>(this))[i_nocapture
].get()); } void BranchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<BranchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3223, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<BranchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned BranchInst::getNumOperands() const
{ return OperandTraits<BranchInst>::operands(this); } template
<int Idx_nocapture> Use &BranchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &BranchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3224
3225//===----------------------------------------------------------------------===//
3226// SwitchInst Class
3227//===----------------------------------------------------------------------===//
3228
3229//===---------------------------------------------------------------------------
3230/// Multiway switch
3231///
3232class SwitchInst : public Instruction {
3233 unsigned ReservedSpace;
3234
3235 // Operand[0] = Value to switch on
3236 // Operand[1] = Default basic block destination
3237 // Operand[2n ] = Value to match
3238 // Operand[2n+1] = BasicBlock to go to on match
3239 SwitchInst(const SwitchInst &SI);
3240
3241 /// Create a new switch instruction, specifying a value to switch on and a
3242 /// default destination. The number of additional cases can be specified here
3243 /// to make memory allocation more efficient. This constructor can also
3244 /// auto-insert before another instruction.
3245 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3246 Instruction *InsertBefore);
3247
3248 /// Create a new switch instruction, specifying a value to switch on and a
3249 /// default destination. The number of additional cases can be specified here
3250 /// to make memory allocation more efficient. This constructor also
3251 /// auto-inserts at the end of the specified BasicBlock.
3252 SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases,
3253 BasicBlock *InsertAtEnd);
3254
3255 // allocate space for exactly zero operands
3256 void *operator new(size_t S) { return User::operator new(S); }
3257
3258 void init(Value *Value, BasicBlock *Default, unsigned NumReserved);
3259 void growOperands();
3260
3261protected:
3262 // Note: Instruction needs to be a friend here to call cloneImpl.
3263 friend class Instruction;
3264
3265 SwitchInst *cloneImpl() const;
3266
3267public:
3268 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3269
3270 // -2
3271 static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
3272
3273 template <typename CaseHandleT> class CaseIteratorImpl;
3274
3275 /// A handle to a particular switch case. It exposes a convenient interface
3276 /// to both the case value and the successor block.
3277 ///
3278 /// We define this as a template and instantiate it to form both a const and
3279 /// non-const handle.
3280 template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT>
3281 class CaseHandleImpl {
3282 // Directly befriend both const and non-const iterators.
3283 friend class SwitchInst::CaseIteratorImpl<
3284 CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>;
3285
3286 protected:
3287 // Expose the switch type we're parameterized with to the iterator.
3288 using SwitchInstType = SwitchInstT;
3289
3290 SwitchInstT *SI;
3291 ptrdiff_t Index;
3292
3293 CaseHandleImpl() = default;
3294 CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {}
3295
3296 public:
3297 /// Resolves case value for current case.
3298 ConstantIntT *getCaseValue() const {
3299 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3300, __extension__ __PRETTY_FUNCTION__
))
3300 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3300, __extension__ __PRETTY_FUNCTION__
))
;
3301 return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2));
3302 }
3303
3304 /// Resolves successor for current case.
3305 BasicBlockT *getCaseSuccessor() const {
3306 assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3308, __extension__ __PRETTY_FUNCTION__
))
3307 (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3308, __extension__ __PRETTY_FUNCTION__
))
3308 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases
() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3308, __extension__ __PRETTY_FUNCTION__
))
;
3309 return SI->getSuccessor(getSuccessorIndex());
3310 }
3311
3312 /// Returns number of current case.
3313 unsigned getCaseIndex() const { return Index; }
3314
3315 /// Returns successor index for current case successor.
3316 unsigned getSuccessorIndex() const {
3317 assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3319, __extension__ __PRETTY_FUNCTION__
))
3318 (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3319, __extension__ __PRETTY_FUNCTION__
))
3319 "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex
|| (unsigned)Index < SI->getNumCases()) && "Index out the number of cases."
) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3319, __extension__ __PRETTY_FUNCTION__
))
;
3320 return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0;
3321 }
3322
3323 bool operator==(const CaseHandleImpl &RHS) const {
3324 assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3324, __extension__ __PRETTY_FUNCTION__
))
;
3325 return Index == RHS.Index;
3326 }
3327 };
3328
3329 using ConstCaseHandle =
3330 CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>;
3331
3332 class CaseHandle
3333 : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> {
3334 friend class SwitchInst::CaseIteratorImpl<CaseHandle>;
3335
3336 public:
3337 CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {}
3338
3339 /// Sets the new value for current case.
3340 void setValue(ConstantInt *V) const {
3341 assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3342, __extension__ __PRETTY_FUNCTION__
))
3342 "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases
() && "Index out the number of cases.") ? void (0) : __assert_fail
("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3342, __extension__ __PRETTY_FUNCTION__
))
;
3343 SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V));
3344 }
3345
3346 /// Sets the new successor for current case.
3347 void setSuccessor(BasicBlock *S) const {
3348 SI->setSuccessor(getSuccessorIndex(), S);
3349 }
3350 };
3351
3352 template <typename CaseHandleT>
3353 class CaseIteratorImpl
3354 : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>,
3355 std::random_access_iterator_tag,
3356 const CaseHandleT> {
3357 using SwitchInstT = typename CaseHandleT::SwitchInstType;
3358
3359 CaseHandleT Case;
3360
3361 public:
3362 /// Default constructed iterator is in an invalid state until assigned to
3363 /// a case for a particular switch.
3364 CaseIteratorImpl() = default;
3365
3366 /// Initializes case iterator for given SwitchInst and for given
3367 /// case number.
3368 CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {}
3369
3370 /// Initializes case iterator for given SwitchInst and for given
3371 /// successor index.
3372 static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI,
3373 unsigned SuccessorIndex) {
3374 assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3375, __extension__ __PRETTY_FUNCTION__
))
3375 "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors
() && "Successor index # out of range!") ? void (0) :
__assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3375, __extension__ __PRETTY_FUNCTION__
))
;
3376 return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1)
3377 : CaseIteratorImpl(SI, DefaultPseudoIndex);
3378 }
3379
3380 /// Support converting to the const variant. This will be a no-op for const
3381 /// variant.
3382 operator CaseIteratorImpl<ConstCaseHandle>() const {
3383 return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index);
3384 }
3385
3386 CaseIteratorImpl &operator+=(ptrdiff_t N) {
3387 // Check index correctness after addition.
3388 // Note: Index == getNumCases() means end().
3389 assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3391, __extension__ __PRETTY_FUNCTION__
))
3390 (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3391, __extension__ __PRETTY_FUNCTION__
))
3391 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && (
unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3391, __extension__ __PRETTY_FUNCTION__
))
;
3392 Case.Index += N;
3393 return *this;
3394 }
3395 CaseIteratorImpl &operator-=(ptrdiff_t N) {
3396 // Check index correctness after subtraction.
3397 // Note: Case.Index == getNumCases() means end().
3398 assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3400, __extension__ __PRETTY_FUNCTION__
))
3399 (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3400, __extension__ __PRETTY_FUNCTION__
))
3400 "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && (
unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&
"Case.Index out the number of cases.") ? void (0) : __assert_fail
("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\""
, "llvm/include/llvm/IR/Instructions.h", 3400, __extension__ __PRETTY_FUNCTION__
))
;
3401 Case.Index -= N;
3402 return *this;
3403 }
3404 ptrdiff_t operator-(const CaseIteratorImpl &RHS) const {
3405 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3405, __extension__ __PRETTY_FUNCTION__
))
;
3406 return Case.Index - RHS.Case.Index;
3407 }
3408 bool operator==(const CaseIteratorImpl &RHS) const {
3409 return Case == RHS.Case;
3410 }
3411 bool operator<(const CaseIteratorImpl &RHS) const {
3412 assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators."
) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\""
, "llvm/include/llvm/IR/Instructions.h", 3412, __extension__ __PRETTY_FUNCTION__
))
;
3413 return Case.Index < RHS.Case.Index;
3414 }
3415 const CaseHandleT &operator*() const { return Case; }
3416 };
3417
3418 using CaseIt = CaseIteratorImpl<CaseHandle>;
3419 using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>;
3420
3421 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3422 unsigned NumCases,
3423 Instruction *InsertBefore = nullptr) {
3424 return new SwitchInst(Value, Default, NumCases, InsertBefore);
3425 }
3426
3427 static SwitchInst *Create(Value *Value, BasicBlock *Default,
3428 unsigned NumCases, BasicBlock *InsertAtEnd) {
3429 return new SwitchInst(Value, Default, NumCases, InsertAtEnd);
3430 }
3431
3432 /// Provide fast operand accessors
3433 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3434
3435 // Accessor Methods for Switch stmt
3436 Value *getCondition() const { return getOperand(0); }
3437 void setCondition(Value *V) { setOperand(0, V); }
3438
3439 BasicBlock *getDefaultDest() const {
3440 return cast<BasicBlock>(getOperand(1));
3441 }
3442
3443 void setDefaultDest(BasicBlock *DefaultCase) {
3444 setOperand(1, reinterpret_cast<Value*>(DefaultCase));
3445 }
3446
3447 /// Return the number of 'cases' in this switch instruction, excluding the
3448 /// default case.
3449 unsigned getNumCases() const {
3450 return getNumOperands()/2 - 1;
3451 }
3452
3453 /// Returns a read/write iterator that points to the first case in the
3454 /// SwitchInst.
3455 CaseIt case_begin() {
3456 return CaseIt(this, 0);
3457 }
3458
3459 /// Returns a read-only iterator that points to the first case in the
3460 /// SwitchInst.
3461 ConstCaseIt case_begin() const {
3462 return ConstCaseIt(this, 0);
3463 }
3464
3465 /// Returns a read/write iterator that points one past the last in the
3466 /// SwitchInst.
3467 CaseIt case_end() {
3468 return CaseIt(this, getNumCases());
3469 }
3470
3471 /// Returns a read-only iterator that points one past the last in the
3472 /// SwitchInst.
3473 ConstCaseIt case_end() const {
3474 return ConstCaseIt(this, getNumCases());
3475 }
3476
3477 /// Iteration adapter for range-for loops.
3478 iterator_range<CaseIt> cases() {
3479 return make_range(case_begin(), case_end());
3480 }
3481
3482 /// Constant iteration adapter for range-for loops.
3483 iterator_range<ConstCaseIt> cases() const {
3484 return make_range(case_begin(), case_end());
3485 }
3486
3487 /// Returns an iterator that points to the default case.
3488 /// Note: this iterator allows to resolve successor only. Attempt
3489 /// to resolve case value causes an assertion.
3490 /// Also note, that increment and decrement also causes an assertion and
3491 /// makes iterator invalid.
3492 CaseIt case_default() {
3493 return CaseIt(this, DefaultPseudoIndex);
3494 }
3495 ConstCaseIt case_default() const {
3496 return ConstCaseIt(this, DefaultPseudoIndex);
3497 }
3498
3499 /// Search all of the case values for the specified constant. If it is
3500 /// explicitly handled, return the case iterator of it, otherwise return
3501 /// default case iterator to indicate that it is handled by the default
3502 /// handler.
3503 CaseIt findCaseValue(const ConstantInt *C) {
3504 return CaseIt(
3505 this,
3506 const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex());
3507 }
3508 ConstCaseIt findCaseValue(const ConstantInt *C) const {
3509 ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) {
3510 return Case.getCaseValue() == C;
3511 });
3512 if (I != case_end())
3513 return I;
3514
3515 return case_default();
3516 }
3517
3518 /// Finds the unique case value for a given successor. Returns null if the
3519 /// successor is not found, not unique, or is the default case.
3520 ConstantInt *findCaseDest(BasicBlock *BB) {
3521 if (BB == getDefaultDest())
3522 return nullptr;
3523
3524 ConstantInt *CI = nullptr;
3525 for (auto Case : cases()) {
3526 if (Case.getCaseSuccessor() != BB)
3527 continue;
3528
3529 if (CI)
3530 return nullptr; // Multiple cases lead to BB.
3531
3532 CI = Case.getCaseValue();
3533 }
3534
3535 return CI;
3536 }
3537
3538 /// Add an entry to the switch instruction.
3539 /// Note:
3540 /// This action invalidates case_end(). Old case_end() iterator will
3541 /// point to the added case.
3542 void addCase(ConstantInt *OnVal, BasicBlock *Dest);
3543
3544 /// This method removes the specified case and its successor from the switch
3545 /// instruction. Note that this operation may reorder the remaining cases at
3546 /// index idx and above.
3547 /// Note:
3548 /// This action invalidates iterators for all cases following the one removed,
3549 /// including the case_end() iterator. It returns an iterator for the next
3550 /// case.
3551 CaseIt removeCase(CaseIt I);
3552
3553 unsigned getNumSuccessors() const { return getNumOperands()/2; }
3554 BasicBlock *getSuccessor(unsigned idx) const {
3555 assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor idx out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3555, __extension__ __PRETTY_FUNCTION__
))
;
3556 return cast<BasicBlock>(getOperand(idx*2+1));
3557 }
3558 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
3559 assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() &&
"Successor # out of range for switch!") ? void (0) : __assert_fail
("idx < getNumSuccessors() && \"Successor # out of range for switch!\""
, "llvm/include/llvm/IR/Instructions.h", 3559, __extension__ __PRETTY_FUNCTION__
))
;
3560 setOperand(idx * 2 + 1, NewSucc);
3561 }
3562
3563 // Methods for support type inquiry through isa, cast, and dyn_cast:
3564 static bool classof(const Instruction *I) {
3565 return I->getOpcode() == Instruction::Switch;
3566 }
3567 static bool classof(const Value *V) {
3568 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3569 }
3570};
3571
3572/// A wrapper class to simplify modification of SwitchInst cases along with
3573/// their prof branch_weights metadata.
3574class SwitchInstProfUpdateWrapper {
3575 SwitchInst &SI;
3576 Optional<SmallVector<uint32_t, 8> > Weights = None;
3577 bool Changed = false;
3578
3579protected:
3580 static MDNode *getProfBranchWeightsMD(const SwitchInst &SI);
3581
3582 MDNode *buildProfBranchWeightsMD();
3583
3584 void init();
3585
3586public:
3587 using CaseWeightOpt = Optional<uint32_t>;
3588 SwitchInst *operator->() { return &SI; }
3589 SwitchInst &operator*() { return SI; }
3590 operator SwitchInst *() { return &SI; }
3591
3592 SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); }
3593
3594 ~SwitchInstProfUpdateWrapper() {
3595 if (Changed)
3596 SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD());
3597 }
3598
3599 /// Delegate the call to the underlying SwitchInst::removeCase() and remove
3600 /// correspondent branch weight.
3601 SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I);
3602
3603 /// Delegate the call to the underlying SwitchInst::addCase() and set the
3604 /// specified branch weight for the added case.
3605 void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W);
3606
3607 /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark
3608 /// this object to not touch the underlying SwitchInst in destructor.
3609 SymbolTableList<Instruction>::iterator eraseFromParent();
3610
3611 void setSuccessorWeight(unsigned idx, CaseWeightOpt W);
3612 CaseWeightOpt getSuccessorWeight(unsigned idx);
3613
3614 static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx);
3615};
3616
3617template <>
3618struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> {
3619};
3620
3621DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits
<SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator
SwitchInst::op_begin() const { return OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst
::op_iterator SwitchInst::op_end() { return OperandTraits<
SwitchInst>::op_end(this); } SwitchInst::const_op_iterator
SwitchInst::op_end() const { return OperandTraits<SwitchInst
>::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SwitchInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3621, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SwitchInst
>::op_begin(const_cast<SwitchInst*>(this))[i_nocapture
].get()); } void SwitchInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3621, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const
{ return OperandTraits<SwitchInst>::operands(this); } template
<int Idx_nocapture> Use &SwitchInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SwitchInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
3622
3623//===----------------------------------------------------------------------===//
3624// IndirectBrInst Class
3625//===----------------------------------------------------------------------===//
3626
3627//===---------------------------------------------------------------------------
3628/// Indirect Branch Instruction.
3629///
3630class IndirectBrInst : public Instruction {
3631 unsigned ReservedSpace;
3632
3633 // Operand[0] = Address to jump to
3634 // Operand[n+1] = n-th destination
3635 IndirectBrInst(const IndirectBrInst &IBI);
3636
3637 /// Create a new indirectbr instruction, specifying an
3638 /// Address to jump to. The number of expected destinations can be specified
3639 /// here to make memory allocation more efficient. This constructor can also
3640 /// autoinsert before another instruction.
3641 IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore);
3642
3643 /// Create a new indirectbr instruction, specifying an
3644 /// Address to jump to. The number of expected destinations can be specified
3645 /// here to make memory allocation more efficient. This constructor also
3646 /// autoinserts at the end of the specified BasicBlock.
3647 IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd);
3648
3649 // allocate space for exactly zero operands
3650 void *operator new(size_t S) { return User::operator new(S); }
3651
3652 void init(Value *Address, unsigned NumDests);
3653 void growOperands();
3654
3655protected:
3656 // Note: Instruction needs to be a friend here to call cloneImpl.
3657 friend class Instruction;
3658
3659 IndirectBrInst *cloneImpl() const;
3660
3661public:
3662 void operator delete(void *Ptr) { User::operator delete(Ptr); }
3663
3664 /// Iterator type that casts an operand to a basic block.
3665 ///
3666 /// This only makes sense because the successors are stored as adjacent
3667 /// operands for indirectbr instructions.
3668 struct succ_op_iterator
3669 : iterator_adaptor_base<succ_op_iterator, value_op_iterator,
3670 std::random_access_iterator_tag, BasicBlock *,
3671 ptrdiff_t, BasicBlock *, BasicBlock *> {
3672 explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {}
3673
3674 BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3675 BasicBlock *operator->() const { return operator*(); }
3676 };
3677
3678 /// The const version of `succ_op_iterator`.
3679 struct const_succ_op_iterator
3680 : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator,
3681 std::random_access_iterator_tag,
3682 const BasicBlock *, ptrdiff_t, const BasicBlock *,
3683 const BasicBlock *> {
3684 explicit const_succ_op_iterator(const_value_op_iterator I)
3685 : iterator_adaptor_base(I) {}
3686
3687 const BasicBlock *operator*() const { return cast<BasicBlock>(*I); }
3688 const BasicBlock *operator->() const { return operator*(); }
3689 };
3690
3691 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3692 Instruction *InsertBefore = nullptr) {
3693 return new IndirectBrInst(Address, NumDests, InsertBefore);
3694 }
3695
3696 static IndirectBrInst *Create(Value *Address, unsigned NumDests,
3697 BasicBlock *InsertAtEnd) {
3698 return new IndirectBrInst(Address, NumDests, InsertAtEnd);
3699 }
3700
3701 /// Provide fast operand accessors.
3702 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
3703
3704 // Accessor Methods for IndirectBrInst instruction.
3705 Value *getAddress() { return getOperand(0); }
3706 const Value *getAddress() const { return getOperand(0); }
3707 void setAddress(Value *V) { setOperand(0, V); }
3708
3709 /// return the number of possible destinations in this
3710 /// indirectbr instruction.
3711 unsigned getNumDestinations() const { return getNumOperands()-1; }
3712
3713 /// Return the specified destination.
3714 BasicBlock *getDestination(unsigned i) { return getSuccessor(i); }
3715 const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); }
3716
3717 /// Add a destination.
3718 ///
3719 void addDestination(BasicBlock *Dest);
3720
3721 /// This method removes the specified successor from the
3722 /// indirectbr instruction.
3723 void removeDestination(unsigned i);
3724
3725 unsigned getNumSuccessors() const { return getNumOperands()-1; }
3726 BasicBlock *getSuccessor(unsigned i) const {
3727 return cast<BasicBlock>(getOperand(i+1));
3728 }
3729 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3730 setOperand(i + 1, NewSucc);
3731 }
3732
3733 iterator_range<succ_op_iterator> successors() {
3734 return make_range(succ_op_iterator(std::next(value_op_begin())),
3735 succ_op_iterator(value_op_end()));
3736 }
3737
3738 iterator_range<const_succ_op_iterator> successors() const {
3739 return make_range(const_succ_op_iterator(std::next(value_op_begin())),
3740 const_succ_op_iterator(value_op_end()));
3741 }
3742
3743 // Methods for support type inquiry through isa, cast, and dyn_cast:
3744 static bool classof(const Instruction *I) {
3745 return I->getOpcode() == Instruction::IndirectBr;
3746 }
3747 static bool classof(const Value *V) {
3748 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3749 }
3750};
3751
3752template <>
3753struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> {
3754};
3755
3756DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return
OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst
::const_op_iterator IndirectBrInst::op_begin() const { return
OperandTraits<IndirectBrInst>::op_begin(const_cast<
IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst
::op_end() { return OperandTraits<IndirectBrInst>::op_end
(this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end
() const { return OperandTraits<IndirectBrInst>::op_end
(const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<IndirectBrInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3756, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<IndirectBrInst
>::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture
].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<IndirectBrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 3756, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands(
) const { return OperandTraits<IndirectBrInst>::operands
(this); } template <int Idx_nocapture> Use &IndirectBrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &IndirectBrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
3757
3758//===----------------------------------------------------------------------===//
3759// InvokeInst Class
3760//===----------------------------------------------------------------------===//
3761
3762/// Invoke instruction. The SubclassData field is used to hold the
3763/// calling convention of the call.
3764///
3765class InvokeInst : public CallBase {
3766 /// The number of operands for this call beyond the called function,
3767 /// arguments, and operand bundles.
3768 static constexpr int NumExtraOperands = 2;
3769
3770 /// The index from the end of the operand array to the normal destination.
3771 static constexpr int NormalDestOpEndIdx = -3;
3772
3773 /// The index from the end of the operand array to the unwind destination.
3774 static constexpr int UnwindDestOpEndIdx = -2;
3775
3776 InvokeInst(const InvokeInst &BI);
3777
3778 /// Construct an InvokeInst given a range of arguments.
3779 ///
3780 /// Construct an InvokeInst from a range of arguments
3781 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3782 BasicBlock *IfException, ArrayRef<Value *> Args,
3783 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3784 const Twine &NameStr, Instruction *InsertBefore);
3785
3786 inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3787 BasicBlock *IfException, ArrayRef<Value *> Args,
3788 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3789 const Twine &NameStr, BasicBlock *InsertAtEnd);
3790
3791 void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3792 BasicBlock *IfException, ArrayRef<Value *> Args,
3793 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3794
3795 /// Compute the number of operands to allocate.
3796 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
3797 // We need one operand for the called function, plus our extra operands and
3798 // the input operand counts provided.
3799 return 1 + NumExtraOperands + NumArgs + NumBundleInputs;
3800 }
3801
3802protected:
3803 // Note: Instruction needs to be a friend here to call cloneImpl.
3804 friend class Instruction;
3805
3806 InvokeInst *cloneImpl() const;
3807
3808public:
3809 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3810 BasicBlock *IfException, ArrayRef<Value *> Args,
3811 const Twine &NameStr,
3812 Instruction *InsertBefore = nullptr) {
3813 int NumOperands = ComputeNumOperands(Args.size());
3814 return new (NumOperands)
3815 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3816 NameStr, InsertBefore);
3817 }
3818
3819 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3820 BasicBlock *IfException, ArrayRef<Value *> Args,
3821 ArrayRef<OperandBundleDef> Bundles = None,
3822 const Twine &NameStr = "",
3823 Instruction *InsertBefore = nullptr) {
3824 int NumOperands =
3825 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3826 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3827
3828 return new (NumOperands, DescriptorBytes)
3829 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3830 NameStr, InsertBefore);
3831 }
3832
3833 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3834 BasicBlock *IfException, ArrayRef<Value *> Args,
3835 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3836 int NumOperands = ComputeNumOperands(Args.size());
3837 return new (NumOperands)
3838 InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands,
3839 NameStr, InsertAtEnd);
3840 }
3841
3842 static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3843 BasicBlock *IfException, ArrayRef<Value *> Args,
3844 ArrayRef<OperandBundleDef> Bundles,
3845 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3846 int NumOperands =
3847 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
3848 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
3849
3850 return new (NumOperands, DescriptorBytes)
3851 InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands,
3852 NameStr, InsertAtEnd);
3853 }
3854
3855 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3856 BasicBlock *IfException, ArrayRef<Value *> Args,
3857 const Twine &NameStr,
3858 Instruction *InsertBefore = nullptr) {
3859 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3860 IfException, Args, None, NameStr, InsertBefore);
3861 }
3862
3863 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3864 BasicBlock *IfException, ArrayRef<Value *> Args,
3865 ArrayRef<OperandBundleDef> Bundles = None,
3866 const Twine &NameStr = "",
3867 Instruction *InsertBefore = nullptr) {
3868 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3869 IfException, Args, Bundles, NameStr, InsertBefore);
3870 }
3871
3872 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3873 BasicBlock *IfException, ArrayRef<Value *> Args,
3874 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3875 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3876 IfException, Args, NameStr, InsertAtEnd);
3877 }
3878
3879 static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal,
3880 BasicBlock *IfException, ArrayRef<Value *> Args,
3881 ArrayRef<OperandBundleDef> Bundles,
3882 const Twine &NameStr, BasicBlock *InsertAtEnd) {
3883 return Create(Func.getFunctionType(), Func.getCallee(), IfNormal,
3884 IfException, Args, Bundles, NameStr, InsertAtEnd);
3885 }
3886
3887 /// Create a clone of \p II with a different set of operand bundles and
3888 /// insert it before \p InsertPt.
3889 ///
3890 /// The returned invoke instruction is identical to \p II in every way except
3891 /// that the operand bundles for the new instruction are set to the operand
3892 /// bundles in \p Bundles.
3893 static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles,
3894 Instruction *InsertPt = nullptr);
3895
3896 // get*Dest - Return the destination basic blocks...
3897 BasicBlock *getNormalDest() const {
3898 return cast<BasicBlock>(Op<NormalDestOpEndIdx>());
3899 }
3900 BasicBlock *getUnwindDest() const {
3901 return cast<BasicBlock>(Op<UnwindDestOpEndIdx>());
3902 }
3903 void setNormalDest(BasicBlock *B) {
3904 Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3905 }
3906 void setUnwindDest(BasicBlock *B) {
3907 Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B);
3908 }
3909
3910 /// Get the landingpad instruction from the landing pad
3911 /// block (the unwind destination).
3912 LandingPadInst *getLandingPadInst() const;
3913
3914 BasicBlock *getSuccessor(unsigned i) const {
3915 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3915, __extension__ __PRETTY_FUNCTION__
))
;
3916 return i == 0 ? getNormalDest() : getUnwindDest();
3917 }
3918
3919 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
3920 assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!"
) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\""
, "llvm/include/llvm/IR/Instructions.h", 3920, __extension__ __PRETTY_FUNCTION__
))
;
3921 if (i == 0)
3922 setNormalDest(NewSucc);
3923 else
3924 setUnwindDest(NewSucc);
3925 }
3926
3927 unsigned getNumSuccessors() const { return 2; }
3928
3929 // Methods for support type inquiry through isa, cast, and dyn_cast:
3930 static bool classof(const Instruction *I) {
3931 return (I->getOpcode() == Instruction::Invoke);
3932 }
3933 static bool classof(const Value *V) {
3934 return isa<Instruction>(V) && classof(cast<Instruction>(V));
3935 }
3936
3937private:
3938 // Shadow Instruction::setInstructionSubclassData with a private forwarding
3939 // method so that subclasses cannot accidentally use it.
3940 template <typename Bitfield>
3941 void setSubclassData(typename Bitfield::Type Value) {
3942 Instruction::setSubclassData<Bitfield>(Value);
3943 }
3944};
3945
3946InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3947 BasicBlock *IfException, ArrayRef<Value *> Args,
3948 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3949 const Twine &NameStr, Instruction *InsertBefore)
3950 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3951 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3952 InsertBefore) {
3953 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3954}
3955
3956InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal,
3957 BasicBlock *IfException, ArrayRef<Value *> Args,
3958 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3959 const Twine &NameStr, BasicBlock *InsertAtEnd)
3960 : CallBase(Ty->getReturnType(), Instruction::Invoke,
3961 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
3962 InsertAtEnd) {
3963 init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr);
3964}
3965
3966//===----------------------------------------------------------------------===//
3967// CallBrInst Class
3968//===----------------------------------------------------------------------===//
3969
3970/// CallBr instruction, tracking function calls that may not return control but
3971/// instead transfer it to a third location. The SubclassData field is used to
3972/// hold the calling convention of the call.
3973///
3974class CallBrInst : public CallBase {
3975
3976 unsigned NumIndirectDests;
3977
3978 CallBrInst(const CallBrInst &BI);
3979
3980 /// Construct a CallBrInst given a range of arguments.
3981 ///
3982 /// Construct a CallBrInst from a range of arguments
3983 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3984 ArrayRef<BasicBlock *> IndirectDests,
3985 ArrayRef<Value *> Args,
3986 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3987 const Twine &NameStr, Instruction *InsertBefore);
3988
3989 inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
3990 ArrayRef<BasicBlock *> IndirectDests,
3991 ArrayRef<Value *> Args,
3992 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
3993 const Twine &NameStr, BasicBlock *InsertAtEnd);
3994
3995 void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest,
3996 ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args,
3997 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
3998
3999 /// Should the Indirect Destinations change, scan + update the Arg list.
4000 void updateArgBlockAddresses(unsigned i, BasicBlock *B);
4001
4002 /// Compute the number of operands to allocate.
4003 static int ComputeNumOperands(int NumArgs, int NumIndirectDests,
4004 int NumBundleInputs = 0) {
4005 // We need one operand for the called function, plus our extra operands and
4006 // the input operand counts provided.
4007 return 2 + NumIndirectDests + NumArgs + NumBundleInputs;
4008 }
4009
4010protected:
4011 // Note: Instruction needs to be a friend here to call cloneImpl.
4012 friend class Instruction;
4013
4014 CallBrInst *cloneImpl() const;
4015
4016public:
4017 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4018 BasicBlock *DefaultDest,
4019 ArrayRef<BasicBlock *> IndirectDests,
4020 ArrayRef<Value *> Args, const Twine &NameStr,
4021 Instruction *InsertBefore = nullptr) {
4022 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4023 return new (NumOperands)
4024 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4025 NumOperands, NameStr, InsertBefore);
4026 }
4027
4028 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4029 BasicBlock *DefaultDest,
4030 ArrayRef<BasicBlock *> IndirectDests,
4031 ArrayRef<Value *> Args,
4032 ArrayRef<OperandBundleDef> Bundles = None,
4033 const Twine &NameStr = "",
4034 Instruction *InsertBefore = nullptr) {
4035 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4036 CountBundleInputs(Bundles));
4037 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4038
4039 return new (NumOperands, DescriptorBytes)
4040 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4041 NumOperands, NameStr, InsertBefore);
4042 }
4043
4044 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4045 BasicBlock *DefaultDest,
4046 ArrayRef<BasicBlock *> IndirectDests,
4047 ArrayRef<Value *> Args, const Twine &NameStr,
4048 BasicBlock *InsertAtEnd) {
4049 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size());
4050 return new (NumOperands)
4051 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None,
4052 NumOperands, NameStr, InsertAtEnd);
4053 }
4054
4055 static CallBrInst *Create(FunctionType *Ty, Value *Func,
4056 BasicBlock *DefaultDest,
4057 ArrayRef<BasicBlock *> IndirectDests,
4058 ArrayRef<Value *> Args,
4059 ArrayRef<OperandBundleDef> Bundles,
4060 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4061 int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(),
4062 CountBundleInputs(Bundles));
4063 unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
4064
4065 return new (NumOperands, DescriptorBytes)
4066 CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles,
4067 NumOperands, NameStr, InsertAtEnd);
4068 }
4069
4070 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4071 ArrayRef<BasicBlock *> IndirectDests,
4072 ArrayRef<Value *> Args, const Twine &NameStr,
4073 Instruction *InsertBefore = nullptr) {
4074 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4075 IndirectDests, Args, NameStr, InsertBefore);
4076 }
4077
4078 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4079 ArrayRef<BasicBlock *> IndirectDests,
4080 ArrayRef<Value *> Args,
4081 ArrayRef<OperandBundleDef> Bundles = None,
4082 const Twine &NameStr = "",
4083 Instruction *InsertBefore = nullptr) {
4084 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4085 IndirectDests, Args, Bundles, NameStr, InsertBefore);
4086 }
4087
4088 static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest,
4089 ArrayRef<BasicBlock *> IndirectDests,
4090 ArrayRef<Value *> Args, const Twine &NameStr,
4091 BasicBlock *InsertAtEnd) {
4092 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4093 IndirectDests, Args, NameStr, InsertAtEnd);
4094 }
4095
4096 static CallBrInst *Create(FunctionCallee Func,
4097 BasicBlock *DefaultDest,
4098 ArrayRef<BasicBlock *> IndirectDests,
4099 ArrayRef<Value *> Args,
4100 ArrayRef<OperandBundleDef> Bundles,
4101 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4102 return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest,
4103 IndirectDests, Args, Bundles, NameStr, InsertAtEnd);
4104 }
4105
4106 /// Create a clone of \p CBI with a different set of operand bundles and
4107 /// insert it before \p InsertPt.
4108 ///
4109 /// The returned callbr instruction is identical to \p CBI in every way
4110 /// except that the operand bundles for the new instruction are set to the
4111 /// operand bundles in \p Bundles.
4112 static CallBrInst *Create(CallBrInst *CBI,
4113 ArrayRef<OperandBundleDef> Bundles,
4114 Instruction *InsertPt = nullptr);
4115
4116 /// Return the number of callbr indirect dest labels.
4117 ///
4118 unsigned getNumIndirectDests() const { return NumIndirectDests; }
4119
4120 /// getIndirectDestLabel - Return the i-th indirect dest label.
4121 ///
4122 Value *getIndirectDestLabel(unsigned i) const {
4123 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4123, __extension__ __PRETTY_FUNCTION__
))
;
4124 return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1);
4125 }
4126
4127 Value *getIndirectDestLabelUse(unsigned i) const {
4128 assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() &&
"Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\""
, "llvm/include/llvm/IR/Instructions.h", 4128, __extension__ __PRETTY_FUNCTION__
))
;
4129 return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1);
4130 }
4131
4132 // Return the destination basic blocks...
4133 BasicBlock *getDefaultDest() const {
4134 return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1));
4135 }
4136 BasicBlock *getIndirectDest(unsigned i) const {
4137 return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i));
4138 }
4139 SmallVector<BasicBlock *, 16> getIndirectDests() const {
4140 SmallVector<BasicBlock *, 16> IndirectDests;
4141 for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i)
4142 IndirectDests.push_back(getIndirectDest(i));
4143 return IndirectDests;
4144 }
4145 void setDefaultDest(BasicBlock *B) {
4146 *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B);
4147 }
4148 void setIndirectDest(unsigned i, BasicBlock *B) {
4149 updateArgBlockAddresses(i, B);
4150 *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B);
4151 }
4152
4153 BasicBlock *getSuccessor(unsigned i) const {
4154 assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4155, __extension__ __PRETTY_FUNCTION__
))
4155 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4155, __extension__ __PRETTY_FUNCTION__
))
;
4156 return i == 0 ? getDefaultDest() : getIndirectDest(i - 1);
4157 }
4158
4159 void setSuccessor(unsigned i, BasicBlock *NewSucc) {
4160 assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4161, __extension__ __PRETTY_FUNCTION__
))
4161 "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 &&
"Successor # out of range for callbr!") ? void (0) : __assert_fail
("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\""
, "llvm/include/llvm/IR/Instructions.h", 4161, __extension__ __PRETTY_FUNCTION__
))
;
4162 return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc);
4163 }
4164
4165 unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; }
4166
4167 // Methods for support type inquiry through isa, cast, and dyn_cast:
4168 static bool classof(const Instruction *I) {
4169 return (I->getOpcode() == Instruction::CallBr);
4170 }
4171 static bool classof(const Value *V) {
4172 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4173 }
4174
4175private:
4176 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4177 // method so that subclasses cannot accidentally use it.
4178 template <typename Bitfield>
4179 void setSubclassData(typename Bitfield::Type Value) {
4180 Instruction::setSubclassData<Bitfield>(Value);
4181 }
4182};
4183
4184CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4185 ArrayRef<BasicBlock *> IndirectDests,
4186 ArrayRef<Value *> Args,
4187 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4188 const Twine &NameStr, Instruction *InsertBefore)
4189 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4190 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4191 InsertBefore) {
4192 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4193}
4194
4195CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest,
4196 ArrayRef<BasicBlock *> IndirectDests,
4197 ArrayRef<Value *> Args,
4198 ArrayRef<OperandBundleDef> Bundles, int NumOperands,
4199 const Twine &NameStr, BasicBlock *InsertAtEnd)
4200 : CallBase(Ty->getReturnType(), Instruction::CallBr,
4201 OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands,
4202 InsertAtEnd) {
4203 init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr);
4204}
4205
4206//===----------------------------------------------------------------------===//
4207// ResumeInst Class
4208//===----------------------------------------------------------------------===//
4209
4210//===---------------------------------------------------------------------------
4211/// Resume the propagation of an exception.
4212///
4213class ResumeInst : public Instruction {
4214 ResumeInst(const ResumeInst &RI);
4215
4216 explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr);
4217 ResumeInst(Value *Exn, BasicBlock *InsertAtEnd);
4218
4219protected:
4220 // Note: Instruction needs to be a friend here to call cloneImpl.
4221 friend class Instruction;
4222
4223 ResumeInst *cloneImpl() const;
4224
4225public:
4226 static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) {
4227 return new(1) ResumeInst(Exn, InsertBefore);
4228 }
4229
4230 static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) {
4231 return new(1) ResumeInst(Exn, InsertAtEnd);
4232 }
4233
4234 /// Provide fast operand accessors
4235 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4236
4237 /// Convenience accessor.
4238 Value *getValue() const { return Op<0>(); }
4239
4240 unsigned getNumSuccessors() const { return 0; }
4241
4242 // Methods for support type inquiry through isa, cast, and dyn_cast:
4243 static bool classof(const Instruction *I) {
4244 return I->getOpcode() == Instruction::Resume;
4245 }
4246 static bool classof(const Value *V) {
4247 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4248 }
4249
4250private:
4251 BasicBlock *getSuccessor(unsigned idx) const {
4252 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4252)
;
4253 }
4254
4255 void setSuccessor(unsigned idx, BasicBlock *NewSucc) {
4256 llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4256)
;
4257 }
4258};
4259
4260template <>
4261struct OperandTraits<ResumeInst> :
4262 public FixedNumOperandTraits<ResumeInst, 1> {
4263};
4264
4265DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits
<ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator
ResumeInst::op_begin() const { return OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst
::op_iterator ResumeInst::op_end() { return OperandTraits<
ResumeInst>::op_end(this); } ResumeInst::const_op_iterator
ResumeInst::op_end() const { return OperandTraits<ResumeInst
>::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<ResumeInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4265, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ResumeInst
>::op_begin(const_cast<ResumeInst*>(this))[i_nocapture
].get()); } void ResumeInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ResumeInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4265, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const
{ return OperandTraits<ResumeInst>::operands(this); } template
<int Idx_nocapture> Use &ResumeInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &ResumeInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
4266
4267//===----------------------------------------------------------------------===//
4268// CatchSwitchInst Class
4269//===----------------------------------------------------------------------===//
4270class CatchSwitchInst : public Instruction {
4271 using UnwindDestField = BoolBitfieldElementT<0>;
4272
4273 /// The number of operands actually allocated. NumOperands is
4274 /// the number actually in use.
4275 unsigned ReservedSpace;
4276
4277 // Operand[0] = Outer scope
4278 // Operand[1] = Unwind block destination
4279 // Operand[n] = BasicBlock to go to on match
4280 CatchSwitchInst(const CatchSwitchInst &CSI);
4281
4282 /// Create a new switch instruction, specifying a
4283 /// default destination. The number of additional handlers can be specified
4284 /// here to make memory allocation more efficient.
4285 /// This constructor can also autoinsert before another instruction.
4286 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4287 unsigned NumHandlers, const Twine &NameStr,
4288 Instruction *InsertBefore);
4289
4290 /// Create a new switch instruction, specifying a
4291 /// default destination. The number of additional handlers can be specified
4292 /// here to make memory allocation more efficient.
4293 /// This constructor also autoinserts at the end of the specified BasicBlock.
4294 CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest,
4295 unsigned NumHandlers, const Twine &NameStr,
4296 BasicBlock *InsertAtEnd);
4297
4298 // allocate space for exactly zero operands
4299 void *operator new(size_t S) { return User::operator new(S); }
4300
4301 void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved);
4302 void growOperands(unsigned Size);
4303
4304protected:
4305 // Note: Instruction needs to be a friend here to call cloneImpl.
4306 friend class Instruction;
4307
4308 CatchSwitchInst *cloneImpl() const;
4309
4310public:
4311 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
4312
4313 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4314 unsigned NumHandlers,
4315 const Twine &NameStr = "",
4316 Instruction *InsertBefore = nullptr) {
4317 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4318 InsertBefore);
4319 }
4320
4321 static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
4322 unsigned NumHandlers, const Twine &NameStr,
4323 BasicBlock *InsertAtEnd) {
4324 return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr,
4325 InsertAtEnd);
4326 }
4327
4328 /// Provide fast operand accessors
4329 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4330
4331 // Accessor Methods for CatchSwitch stmt
4332 Value *getParentPad() const { return getOperand(0); }
4333 void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); }
4334
4335 // Accessor Methods for CatchSwitch stmt
4336 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4337 bool unwindsToCaller() const { return !hasUnwindDest(); }
4338 BasicBlock *getUnwindDest() const {
4339 if (hasUnwindDest())
4340 return cast<BasicBlock>(getOperand(1));
4341 return nullptr;
4342 }
4343 void setUnwindDest(BasicBlock *UnwindDest) {
4344 assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail
("UnwindDest", "llvm/include/llvm/IR/Instructions.h", 4344, __extension__
__PRETTY_FUNCTION__))
;
4345 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4345
, __extension__ __PRETTY_FUNCTION__))
;
4346 setOperand(1, UnwindDest);
4347 }
4348
4349 /// return the number of 'handlers' in this catchswitch
4350 /// instruction, except the default handler
4351 unsigned getNumHandlers() const {
4352 if (hasUnwindDest())
4353 return getNumOperands() - 2;
4354 return getNumOperands() - 1;
4355 }
4356
4357private:
4358 static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); }
4359 static const BasicBlock *handler_helper(const Value *V) {
4360 return cast<BasicBlock>(V);
4361 }
4362
4363public:
4364 using DerefFnTy = BasicBlock *(*)(Value *);
4365 using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>;
4366 using handler_range = iterator_range<handler_iterator>;
4367 using ConstDerefFnTy = const BasicBlock *(*)(const Value *);
4368 using const_handler_iterator =
4369 mapped_iterator<const_op_iterator, ConstDerefFnTy>;
4370 using const_handler_range = iterator_range<const_handler_iterator>;
4371
4372 /// Returns an iterator that points to the first handler in CatchSwitchInst.
4373 handler_iterator handler_begin() {
4374 op_iterator It = op_begin() + 1;
4375 if (hasUnwindDest())
4376 ++It;
4377 return handler_iterator(It, DerefFnTy(handler_helper));
4378 }
4379
4380 /// Returns an iterator that points to the first handler in the
4381 /// CatchSwitchInst.
4382 const_handler_iterator handler_begin() const {
4383 const_op_iterator It = op_begin() + 1;
4384 if (hasUnwindDest())
4385 ++It;
4386 return const_handler_iterator(It, ConstDerefFnTy(handler_helper));
4387 }
4388
4389 /// Returns a read-only iterator that points one past the last
4390 /// handler in the CatchSwitchInst.
4391 handler_iterator handler_end() {
4392 return handler_iterator(op_end(), DerefFnTy(handler_helper));
4393 }
4394
4395 /// Returns an iterator that points one past the last handler in the
4396 /// CatchSwitchInst.
4397 const_handler_iterator handler_end() const {
4398 return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper));
4399 }
4400
4401 /// iteration adapter for range-for loops.
4402 handler_range handlers() {
4403 return make_range(handler_begin(), handler_end());
4404 }
4405
4406 /// iteration adapter for range-for loops.
4407 const_handler_range handlers() const {
4408 return make_range(handler_begin(), handler_end());
4409 }
4410
4411 /// Add an entry to the switch instruction...
4412 /// Note:
4413 /// This action invalidates handler_end(). Old handler_end() iterator will
4414 /// point to the added handler.
4415 void addHandler(BasicBlock *Dest);
4416
4417 void removeHandler(handler_iterator HI);
4418
4419 unsigned getNumSuccessors() const { return getNumOperands() - 1; }
4420 BasicBlock *getSuccessor(unsigned Idx) const {
4421 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4422, __extension__ __PRETTY_FUNCTION__
))
4422 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4422, __extension__ __PRETTY_FUNCTION__
))
;
4423 return cast<BasicBlock>(getOperand(Idx + 1));
4424 }
4425 void setSuccessor(unsigned Idx, BasicBlock *NewSucc) {
4426 assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4427, __extension__ __PRETTY_FUNCTION__
))
4427 "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchswitch!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\""
, "llvm/include/llvm/IR/Instructions.h", 4427, __extension__ __PRETTY_FUNCTION__
))
;
4428 setOperand(Idx + 1, NewSucc);
4429 }
4430
4431 // Methods for support type inquiry through isa, cast, and dyn_cast:
4432 static bool classof(const Instruction *I) {
4433 return I->getOpcode() == Instruction::CatchSwitch;
4434 }
4435 static bool classof(const Value *V) {
4436 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4437 }
4438};
4439
4440template <>
4441struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {};
4442
4443DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return
OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst
::const_op_iterator CatchSwitchInst::op_begin() const { return
OperandTraits<CatchSwitchInst>::op_begin(const_cast<
CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst
::op_end() { return OperandTraits<CatchSwitchInst>::op_end
(this); } CatchSwitchInst::const_op_iterator CatchSwitchInst::
op_end() const { return OperandTraits<CatchSwitchInst>::
op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchSwitchInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4443, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchSwitchInst
>::op_begin(const_cast<CatchSwitchInst*>(this))[i_nocapture
].get()); } void CatchSwitchInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchSwitchInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4443, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchSwitchInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands
() const { return OperandTraits<CatchSwitchInst>::operands
(this); } template <int Idx_nocapture> Use &CatchSwitchInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchSwitchInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4444
4445//===----------------------------------------------------------------------===//
4446// CleanupPadInst Class
4447//===----------------------------------------------------------------------===//
4448class CleanupPadInst : public FuncletPadInst {
4449private:
4450 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4451 unsigned Values, const Twine &NameStr,
4452 Instruction *InsertBefore)
4453 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4454 NameStr, InsertBefore) {}
4455 explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args,
4456 unsigned Values, const Twine &NameStr,
4457 BasicBlock *InsertAtEnd)
4458 : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values,
4459 NameStr, InsertAtEnd) {}
4460
4461public:
4462 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None,
4463 const Twine &NameStr = "",
4464 Instruction *InsertBefore = nullptr) {
4465 unsigned Values = 1 + Args.size();
4466 return new (Values)
4467 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore);
4468 }
4469
4470 static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args,
4471 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4472 unsigned Values = 1 + Args.size();
4473 return new (Values)
4474 CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd);
4475 }
4476
4477 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4478 static bool classof(const Instruction *I) {
4479 return I->getOpcode() == Instruction::CleanupPad;
4480 }
4481 static bool classof(const Value *V) {
4482 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4483 }
4484};
4485
4486//===----------------------------------------------------------------------===//
4487// CatchPadInst Class
4488//===----------------------------------------------------------------------===//
4489class CatchPadInst : public FuncletPadInst {
4490private:
4491 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4492 unsigned Values, const Twine &NameStr,
4493 Instruction *InsertBefore)
4494 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4495 NameStr, InsertBefore) {}
4496 explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args,
4497 unsigned Values, const Twine &NameStr,
4498 BasicBlock *InsertAtEnd)
4499 : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values,
4500 NameStr, InsertAtEnd) {}
4501
4502public:
4503 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4504 const Twine &NameStr = "",
4505 Instruction *InsertBefore = nullptr) {
4506 unsigned Values = 1 + Args.size();
4507 return new (Values)
4508 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore);
4509 }
4510
4511 static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args,
4512 const Twine &NameStr, BasicBlock *InsertAtEnd) {
4513 unsigned Values = 1 + Args.size();
4514 return new (Values)
4515 CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd);
4516 }
4517
4518 /// Convenience accessors
4519 CatchSwitchInst *getCatchSwitch() const {
4520 return cast<CatchSwitchInst>(Op<-1>());
4521 }
4522 void setCatchSwitch(Value *CatchSwitch) {
4523 assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail
("CatchSwitch", "llvm/include/llvm/IR/Instructions.h", 4523,
__extension__ __PRETTY_FUNCTION__))
;
4524 Op<-1>() = CatchSwitch;
4525 }
4526
4527 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4528 static bool classof(const Instruction *I) {
4529 return I->getOpcode() == Instruction::CatchPad;
4530 }
4531 static bool classof(const Value *V) {
4532 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4533 }
4534};
4535
4536//===----------------------------------------------------------------------===//
4537// CatchReturnInst Class
4538//===----------------------------------------------------------------------===//
4539
4540class CatchReturnInst : public Instruction {
4541 CatchReturnInst(const CatchReturnInst &RI);
4542 CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore);
4543 CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd);
4544
4545 void init(Value *CatchPad, BasicBlock *BB);
4546
4547protected:
4548 // Note: Instruction needs to be a friend here to call cloneImpl.
4549 friend class Instruction;
4550
4551 CatchReturnInst *cloneImpl() const;
4552
4553public:
4554 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4555 Instruction *InsertBefore = nullptr) {
4556 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4556, __extension__
__PRETTY_FUNCTION__))
;
4557 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4557, __extension__ __PRETTY_FUNCTION__
))
;
4558 return new (2) CatchReturnInst(CatchPad, BB, InsertBefore);
4559 }
4560
4561 static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB,
4562 BasicBlock *InsertAtEnd) {
4563 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4563, __extension__
__PRETTY_FUNCTION__))
;
4564 assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB"
, "llvm/include/llvm/IR/Instructions.h", 4564, __extension__ __PRETTY_FUNCTION__
))
;
4565 return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd);
4566 }
4567
4568 /// Provide fast operand accessors
4569 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4570
4571 /// Convenience accessors.
4572 CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); }
4573 void setCatchPad(CatchPadInst *CatchPad) {
4574 assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail
("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4574, __extension__
__PRETTY_FUNCTION__))
;
4575 Op<0>() = CatchPad;
4576 }
4577
4578 BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); }
4579 void setSuccessor(BasicBlock *NewSucc) {
4580 assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail
("NewSucc", "llvm/include/llvm/IR/Instructions.h", 4580, __extension__
__PRETTY_FUNCTION__))
;
4581 Op<1>() = NewSucc;
4582 }
4583 unsigned getNumSuccessors() const { return 1; }
4584
4585 /// Get the parentPad of this catchret's catchpad's catchswitch.
4586 /// The successor block is implicitly a member of this funclet.
4587 Value *getCatchSwitchParentPad() const {
4588 return getCatchPad()->getCatchSwitch()->getParentPad();
4589 }
4590
4591 // Methods for support type inquiry through isa, cast, and dyn_cast:
4592 static bool classof(const Instruction *I) {
4593 return (I->getOpcode() == Instruction::CatchRet);
4594 }
4595 static bool classof(const Value *V) {
4596 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4597 }
4598
4599private:
4600 BasicBlock *getSuccessor(unsigned Idx) const {
4601 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4601, __extension__ __PRETTY_FUNCTION__
))
;
4602 return getSuccessor();
4603 }
4604
4605 void setSuccessor(unsigned Idx, BasicBlock *B) {
4606 assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() &&
"Successor # out of range for catchret!") ? void (0) : __assert_fail
("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\""
, "llvm/include/llvm/IR/Instructions.h", 4606, __extension__ __PRETTY_FUNCTION__
))
;
4607 setSuccessor(B);
4608 }
4609};
4610
4611template <>
4612struct OperandTraits<CatchReturnInst>
4613 : public FixedNumOperandTraits<CatchReturnInst, 2> {};
4614
4615DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return
OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst
::const_op_iterator CatchReturnInst::op_begin() const { return
OperandTraits<CatchReturnInst>::op_begin(const_cast<
CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst
::op_end() { return OperandTraits<CatchReturnInst>::op_end
(this); } CatchReturnInst::const_op_iterator CatchReturnInst::
op_end() const { return OperandTraits<CatchReturnInst>::
op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<CatchReturnInst>::
operands(this) && "getOperand() out of range!") ? void
(0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4615, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CatchReturnInst
>::op_begin(const_cast<CatchReturnInst*>(this))[i_nocapture
].get()); } void CatchReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CatchReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4615, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CatchReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands
() const { return OperandTraits<CatchReturnInst>::operands
(this); } template <int Idx_nocapture> Use &CatchReturnInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CatchReturnInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4616
4617//===----------------------------------------------------------------------===//
4618// CleanupReturnInst Class
4619//===----------------------------------------------------------------------===//
4620
4621class CleanupReturnInst : public Instruction {
4622 using UnwindDestField = BoolBitfieldElementT<0>;
4623
4624private:
4625 CleanupReturnInst(const CleanupReturnInst &RI);
4626 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4627 Instruction *InsertBefore = nullptr);
4628 CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values,
4629 BasicBlock *InsertAtEnd);
4630
4631 void init(Value *CleanupPad, BasicBlock *UnwindBB);
4632
4633protected:
4634 // Note: Instruction needs to be a friend here to call cloneImpl.
4635 friend class Instruction;
4636
4637 CleanupReturnInst *cloneImpl() const;
4638
4639public:
4640 static CleanupReturnInst *Create(Value *CleanupPad,
4641 BasicBlock *UnwindBB = nullptr,
4642 Instruction *InsertBefore = nullptr) {
4643 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4643, __extension__
__PRETTY_FUNCTION__))
;
4644 unsigned Values = 1;
4645 if (UnwindBB)
4646 ++Values;
4647 return new (Values)
4648 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore);
4649 }
4650
4651 static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB,
4652 BasicBlock *InsertAtEnd) {
4653 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4653, __extension__
__PRETTY_FUNCTION__))
;
4654 unsigned Values = 1;
4655 if (UnwindBB)
4656 ++Values;
4657 return new (Values)
4658 CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd);
4659 }
4660
4661 /// Provide fast operand accessors
4662 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
4663
4664 bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); }
4665 bool unwindsToCaller() const { return !hasUnwindDest(); }
4666
4667 /// Convenience accessor.
4668 CleanupPadInst *getCleanupPad() const {
4669 return cast<CleanupPadInst>(Op<0>());
4670 }
4671 void setCleanupPad(CleanupPadInst *CleanupPad) {
4672 assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail
("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4672, __extension__
__PRETTY_FUNCTION__))
;
4673 Op<0>() = CleanupPad;
4674 }
4675
4676 unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; }
4677
4678 BasicBlock *getUnwindDest() const {
4679 return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr;
4680 }
4681 void setUnwindDest(BasicBlock *NewDest) {
4682 assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail
("NewDest", "llvm/include/llvm/IR/Instructions.h", 4682, __extension__
__PRETTY_FUNCTION__))
;
4683 assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail
("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4683
, __extension__ __PRETTY_FUNCTION__))
;
4684 Op<1>() = NewDest;
4685 }
4686
4687 // Methods for support type inquiry through isa, cast, and dyn_cast:
4688 static bool classof(const Instruction *I) {
4689 return (I->getOpcode() == Instruction::CleanupRet);
4690 }
4691 static bool classof(const Value *V) {
4692 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4693 }
4694
4695private:
4696 BasicBlock *getSuccessor(unsigned Idx) const {
4697 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4697, __extension__
__PRETTY_FUNCTION__))
;
4698 return getUnwindDest();
4699 }
4700
4701 void setSuccessor(unsigned Idx, BasicBlock *B) {
4702 assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail
("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4702, __extension__
__PRETTY_FUNCTION__))
;
4703 setUnwindDest(B);
4704 }
4705
4706 // Shadow Instruction::setInstructionSubclassData with a private forwarding
4707 // method so that subclasses cannot accidentally use it.
4708 template <typename Bitfield>
4709 void setSubclassData(typename Bitfield::Type Value) {
4710 Instruction::setSubclassData<Bitfield>(Value);
4711 }
4712};
4713
4714template <>
4715struct OperandTraits<CleanupReturnInst>
4716 : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {};
4717
4718DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() {
return OperandTraits<CleanupReturnInst>::op_begin(this
); } CleanupReturnInst::const_op_iterator CleanupReturnInst::
op_begin() const { return OperandTraits<CleanupReturnInst>
::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst
::op_iterator CleanupReturnInst::op_end() { return OperandTraits
<CleanupReturnInst>::op_end(this); } CleanupReturnInst::
const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits
<CleanupReturnInst>::op_end(const_cast<CleanupReturnInst
*>(this)); } Value *CleanupReturnInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<CleanupReturnInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4718, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<CleanupReturnInst
>::op_begin(const_cast<CleanupReturnInst*>(this))[i_nocapture
].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<CleanupReturnInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 4718, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<CleanupReturnInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands
() const { return OperandTraits<CleanupReturnInst>::operands
(this); } template <int Idx_nocapture> Use &CleanupReturnInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &CleanupReturnInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
4719
4720//===----------------------------------------------------------------------===//
4721// UnreachableInst Class
4722//===----------------------------------------------------------------------===//
4723
4724//===---------------------------------------------------------------------------
4725/// This function has undefined behavior. In particular, the
4726/// presence of this instruction indicates some higher level knowledge that the
4727/// end of the block cannot be reached.
4728///
4729class UnreachableInst : public Instruction {
4730protected:
4731 // Note: Instruction needs to be a friend here to call cloneImpl.
4732 friend class Instruction;
4733
4734 UnreachableInst *cloneImpl() const;
4735
4736public:
4737 explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr);
4738 explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd);
4739
4740 // allocate space for exactly zero operands
4741 void *operator new(size_t S) { return User::operator new(S, 0); }
4742 void operator delete(void *Ptr) { User::operator delete(Ptr); }
4743
4744 unsigned getNumSuccessors() const { return 0; }
4745
4746 // Methods for support type inquiry through isa, cast, and dyn_cast:
4747 static bool classof(const Instruction *I) {
4748 return I->getOpcode() == Instruction::Unreachable;
4749 }
4750 static bool classof(const Value *V) {
4751 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4752 }
4753
4754private:
4755 BasicBlock *getSuccessor(unsigned idx) const {
4756 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4756)
;
4757 }
4758
4759 void setSuccessor(unsigned idx, BasicBlock *B) {
4760 llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!"
, "llvm/include/llvm/IR/Instructions.h", 4760)
;
4761 }
4762};
4763
4764//===----------------------------------------------------------------------===//
4765// TruncInst Class
4766//===----------------------------------------------------------------------===//
4767
4768/// This class represents a truncation of integer types.
4769class TruncInst : public CastInst {
4770protected:
4771 // Note: Instruction needs to be a friend here to call cloneImpl.
4772 friend class Instruction;
4773
4774 /// Clone an identical TruncInst
4775 TruncInst *cloneImpl() const;
4776
4777public:
4778 /// Constructor with insert-before-instruction semantics
4779 TruncInst(
4780 Value *S, ///< The value to be truncated
4781 Type *Ty, ///< The (smaller) type to truncate to
4782 const Twine &NameStr = "", ///< A name for the new instruction
4783 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4784 );
4785
4786 /// Constructor with insert-at-end-of-block semantics
4787 TruncInst(
4788 Value *S, ///< The value to be truncated
4789 Type *Ty, ///< The (smaller) type to truncate to
4790 const Twine &NameStr, ///< A name for the new instruction
4791 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4792 );
4793
4794 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4795 static bool classof(const Instruction *I) {
4796 return I->getOpcode() == Trunc;
4797 }
4798 static bool classof(const Value *V) {
4799 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4800 }
4801};
4802
4803//===----------------------------------------------------------------------===//
4804// ZExtInst Class
4805//===----------------------------------------------------------------------===//
4806
4807/// This class represents zero extension of integer types.
4808class ZExtInst : public CastInst {
4809protected:
4810 // Note: Instruction needs to be a friend here to call cloneImpl.
4811 friend class Instruction;
4812
4813 /// Clone an identical ZExtInst
4814 ZExtInst *cloneImpl() const;
4815
4816public:
4817 /// Constructor with insert-before-instruction semantics
4818 ZExtInst(
4819 Value *S, ///< The value to be zero extended
4820 Type *Ty, ///< The type to zero extend to
4821 const Twine &NameStr = "", ///< A name for the new instruction
4822 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4823 );
4824
4825 /// Constructor with insert-at-end semantics.
4826 ZExtInst(
4827 Value *S, ///< The value to be zero extended
4828 Type *Ty, ///< The type to zero extend to
4829 const Twine &NameStr, ///< A name for the new instruction
4830 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4831 );
4832
4833 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4834 static bool classof(const Instruction *I) {
4835 return I->getOpcode() == ZExt;
4836 }
4837 static bool classof(const Value *V) {
4838 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4839 }
4840};
4841
4842//===----------------------------------------------------------------------===//
4843// SExtInst Class
4844//===----------------------------------------------------------------------===//
4845
4846/// This class represents a sign extension of integer types.
4847class SExtInst : public CastInst {
4848protected:
4849 // Note: Instruction needs to be a friend here to call cloneImpl.
4850 friend class Instruction;
4851
4852 /// Clone an identical SExtInst
4853 SExtInst *cloneImpl() const;
4854
4855public:
4856 /// Constructor with insert-before-instruction semantics
4857 SExtInst(
4858 Value *S, ///< The value to be sign extended
4859 Type *Ty, ///< The type to sign extend to
4860 const Twine &NameStr = "", ///< A name for the new instruction
4861 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4862 );
4863
4864 /// Constructor with insert-at-end-of-block semantics
4865 SExtInst(
4866 Value *S, ///< The value to be sign extended
4867 Type *Ty, ///< The type to sign extend to
4868 const Twine &NameStr, ///< A name for the new instruction
4869 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4870 );
4871
4872 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4873 static bool classof(const Instruction *I) {
4874 return I->getOpcode() == SExt;
4875 }
4876 static bool classof(const Value *V) {
4877 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4878 }
4879};
4880
4881//===----------------------------------------------------------------------===//
4882// FPTruncInst Class
4883//===----------------------------------------------------------------------===//
4884
4885/// This class represents a truncation of floating point types.
4886class FPTruncInst : public CastInst {
4887protected:
4888 // Note: Instruction needs to be a friend here to call cloneImpl.
4889 friend class Instruction;
4890
4891 /// Clone an identical FPTruncInst
4892 FPTruncInst *cloneImpl() const;
4893
4894public:
4895 /// Constructor with insert-before-instruction semantics
4896 FPTruncInst(
4897 Value *S, ///< The value to be truncated
4898 Type *Ty, ///< The type to truncate to
4899 const Twine &NameStr = "", ///< A name for the new instruction
4900 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4901 );
4902
4903 /// Constructor with insert-before-instruction semantics
4904 FPTruncInst(
4905 Value *S, ///< The value to be truncated
4906 Type *Ty, ///< The type to truncate to
4907 const Twine &NameStr, ///< A name for the new instruction
4908 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4909 );
4910
4911 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4912 static bool classof(const Instruction *I) {
4913 return I->getOpcode() == FPTrunc;
4914 }
4915 static bool classof(const Value *V) {
4916 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4917 }
4918};
4919
4920//===----------------------------------------------------------------------===//
4921// FPExtInst Class
4922//===----------------------------------------------------------------------===//
4923
4924/// This class represents an extension of floating point types.
4925class FPExtInst : public CastInst {
4926protected:
4927 // Note: Instruction needs to be a friend here to call cloneImpl.
4928 friend class Instruction;
4929
4930 /// Clone an identical FPExtInst
4931 FPExtInst *cloneImpl() const;
4932
4933public:
4934 /// Constructor with insert-before-instruction semantics
4935 FPExtInst(
4936 Value *S, ///< The value to be extended
4937 Type *Ty, ///< The type to extend to
4938 const Twine &NameStr = "", ///< A name for the new instruction
4939 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4940 );
4941
4942 /// Constructor with insert-at-end-of-block semantics
4943 FPExtInst(
4944 Value *S, ///< The value to be extended
4945 Type *Ty, ///< The type to extend to
4946 const Twine &NameStr, ///< A name for the new instruction
4947 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4948 );
4949
4950 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4951 static bool classof(const Instruction *I) {
4952 return I->getOpcode() == FPExt;
4953 }
4954 static bool classof(const Value *V) {
4955 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4956 }
4957};
4958
4959//===----------------------------------------------------------------------===//
4960// UIToFPInst Class
4961//===----------------------------------------------------------------------===//
4962
4963/// This class represents a cast unsigned integer to floating point.
4964class UIToFPInst : public CastInst {
4965protected:
4966 // Note: Instruction needs to be a friend here to call cloneImpl.
4967 friend class Instruction;
4968
4969 /// Clone an identical UIToFPInst
4970 UIToFPInst *cloneImpl() const;
4971
4972public:
4973 /// Constructor with insert-before-instruction semantics
4974 UIToFPInst(
4975 Value *S, ///< The value to be converted
4976 Type *Ty, ///< The type to convert to
4977 const Twine &NameStr = "", ///< A name for the new instruction
4978 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
4979 );
4980
4981 /// Constructor with insert-at-end-of-block semantics
4982 UIToFPInst(
4983 Value *S, ///< The value to be converted
4984 Type *Ty, ///< The type to convert to
4985 const Twine &NameStr, ///< A name for the new instruction
4986 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
4987 );
4988
4989 /// Methods for support type inquiry through isa, cast, and dyn_cast:
4990 static bool classof(const Instruction *I) {
4991 return I->getOpcode() == UIToFP;
4992 }
4993 static bool classof(const Value *V) {
4994 return isa<Instruction>(V) && classof(cast<Instruction>(V));
4995 }
4996};
4997
4998//===----------------------------------------------------------------------===//
4999// SIToFPInst Class
5000//===----------------------------------------------------------------------===//
5001
5002/// This class represents a cast from signed integer to floating point.
5003class SIToFPInst : public CastInst {
5004protected:
5005 // Note: Instruction needs to be a friend here to call cloneImpl.
5006 friend class Instruction;
5007
5008 /// Clone an identical SIToFPInst
5009 SIToFPInst *cloneImpl() const;
5010
5011public:
5012 /// Constructor with insert-before-instruction semantics
5013 SIToFPInst(
5014 Value *S, ///< The value to be converted
5015 Type *Ty, ///< The type to convert to
5016 const Twine &NameStr = "", ///< A name for the new instruction
5017 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5018 );
5019
5020 /// Constructor with insert-at-end-of-block semantics
5021 SIToFPInst(
5022 Value *S, ///< The value to be converted
5023 Type *Ty, ///< The type to convert to
5024 const Twine &NameStr, ///< A name for the new instruction
5025 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5026 );
5027
5028 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5029 static bool classof(const Instruction *I) {
5030 return I->getOpcode() == SIToFP;
5031 }
5032 static bool classof(const Value *V) {
5033 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5034 }
5035};
5036
5037//===----------------------------------------------------------------------===//
5038// FPToUIInst Class
5039//===----------------------------------------------------------------------===//
5040
5041/// This class represents a cast from floating point to unsigned integer
5042class FPToUIInst : public CastInst {
5043protected:
5044 // Note: Instruction needs to be a friend here to call cloneImpl.
5045 friend class Instruction;
5046
5047 /// Clone an identical FPToUIInst
5048 FPToUIInst *cloneImpl() const;
5049
5050public:
5051 /// Constructor with insert-before-instruction semantics
5052 FPToUIInst(
5053 Value *S, ///< The value to be converted
5054 Type *Ty, ///< The type to convert to
5055 const Twine &NameStr = "", ///< A name for the new instruction
5056 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5057 );
5058
5059 /// Constructor with insert-at-end-of-block semantics
5060 FPToUIInst(
5061 Value *S, ///< The value to be converted
5062 Type *Ty, ///< The type to convert to
5063 const Twine &NameStr, ///< A name for the new instruction
5064 BasicBlock *InsertAtEnd ///< Where to insert the new instruction
5065 );
5066
5067 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5068 static bool classof(const Instruction *I) {
5069 return I->getOpcode() == FPToUI;
5070 }
5071 static bool classof(const Value *V) {
5072 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5073 }
5074};
5075
5076//===----------------------------------------------------------------------===//
5077// FPToSIInst Class
5078//===----------------------------------------------------------------------===//
5079
5080/// This class represents a cast from floating point to signed integer.
5081class FPToSIInst : public CastInst {
5082protected:
5083 // Note: Instruction needs to be a friend here to call cloneImpl.
5084 friend class Instruction;
5085
5086 /// Clone an identical FPToSIInst
5087 FPToSIInst *cloneImpl() const;
5088
5089public:
5090 /// Constructor with insert-before-instruction semantics
5091 FPToSIInst(
5092 Value *S, ///< The value to be converted
5093 Type *Ty, ///< The type to convert to
5094 const Twine &NameStr = "", ///< A name for the new instruction
5095 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5096 );
5097
5098 /// Constructor with insert-at-end-of-block semantics
5099 FPToSIInst(
5100 Value *S, ///< The value to be converted
5101 Type *Ty, ///< The type to convert to
5102 const Twine &NameStr, ///< A name for the new instruction
5103 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5104 );
5105
5106 /// Methods for support type inquiry through isa, cast, and dyn_cast:
5107 static bool classof(const Instruction *I) {
5108 return I->getOpcode() == FPToSI;
5109 }
5110 static bool classof(const Value *V) {
5111 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5112 }
5113};
5114
5115//===----------------------------------------------------------------------===//
5116// IntToPtrInst Class
5117//===----------------------------------------------------------------------===//
5118
5119/// This class represents a cast from an integer to a pointer.
5120class IntToPtrInst : public CastInst {
5121public:
5122 // Note: Instruction needs to be a friend here to call cloneImpl.
5123 friend class Instruction;
5124
5125 /// Constructor with insert-before-instruction semantics
5126 IntToPtrInst(
5127 Value *S, ///< The value to be converted
5128 Type *Ty, ///< The type to convert to
5129 const Twine &NameStr = "", ///< A name for the new instruction
5130 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5131 );
5132
5133 /// Constructor with insert-at-end-of-block semantics
5134 IntToPtrInst(
5135 Value *S, ///< The value to be converted
5136 Type *Ty, ///< The type to convert to
5137 const Twine &NameStr, ///< A name for the new instruction
5138 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5139 );
5140
5141 /// Clone an identical IntToPtrInst.
5142 IntToPtrInst *cloneImpl() const;
5143
5144 /// Returns the address space of this instruction's pointer type.
5145 unsigned getAddressSpace() const {
5146 return getType()->getPointerAddressSpace();
5147 }
5148
5149 // Methods for support type inquiry through isa, cast, and dyn_cast:
5150 static bool classof(const Instruction *I) {
5151 return I->getOpcode() == IntToPtr;
5152 }
5153 static bool classof(const Value *V) {
5154 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5155 }
5156};
5157
5158//===----------------------------------------------------------------------===//
5159// PtrToIntInst Class
5160//===----------------------------------------------------------------------===//
5161
5162/// This class represents a cast from a pointer to an integer.
5163class PtrToIntInst : public CastInst {
5164protected:
5165 // Note: Instruction needs to be a friend here to call cloneImpl.
5166 friend class Instruction;
5167
5168 /// Clone an identical PtrToIntInst.
5169 PtrToIntInst *cloneImpl() const;
5170
5171public:
5172 /// Constructor with insert-before-instruction semantics
5173 PtrToIntInst(
5174 Value *S, ///< The value to be converted
5175 Type *Ty, ///< The type to convert to
5176 const Twine &NameStr = "", ///< A name for the new instruction
5177 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5178 );
5179
5180 /// Constructor with insert-at-end-of-block semantics
5181 PtrToIntInst(
5182 Value *S, ///< The value to be converted
5183 Type *Ty, ///< The type to convert to
5184 const Twine &NameStr, ///< A name for the new instruction
5185 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5186 );
5187
5188 /// Gets the pointer operand.
5189 Value *getPointerOperand() { return getOperand(0); }
5190 /// Gets the pointer operand.
5191 const Value *getPointerOperand() const { return getOperand(0); }
5192 /// Gets the operand index of the pointer operand.
5193 static unsigned getPointerOperandIndex() { return 0U; }
5194
5195 /// Returns the address space of the pointer operand.
5196 unsigned getPointerAddressSpace() const {
5197 return getPointerOperand()->getType()->getPointerAddressSpace();
5198 }
5199
5200 // Methods for support type inquiry through isa, cast, and dyn_cast:
5201 static bool classof(const Instruction *I) {
5202 return I->getOpcode() == PtrToInt;
5203 }
5204 static bool classof(const Value *V) {
5205 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5206 }
5207};
5208
5209//===----------------------------------------------------------------------===//
5210// BitCastInst Class
5211//===----------------------------------------------------------------------===//
5212
5213/// This class represents a no-op cast from one type to another.
5214class BitCastInst : public CastInst {
5215protected:
5216 // Note: Instruction needs to be a friend here to call cloneImpl.
5217 friend class Instruction;
5218
5219 /// Clone an identical BitCastInst.
5220 BitCastInst *cloneImpl() const;
5221
5222public:
5223 /// Constructor with insert-before-instruction semantics
5224 BitCastInst(
5225 Value *S, ///< The value to be casted
5226 Type *Ty, ///< The type to casted to
5227 const Twine &NameStr = "", ///< A name for the new instruction
5228 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5229 );
5230
5231 /// Constructor with insert-at-end-of-block semantics
5232 BitCastInst(
5233 Value *S, ///< The value to be casted
5234 Type *Ty, ///< The type to casted to
5235 const Twine &NameStr, ///< A name for the new instruction
5236 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5237 );
5238
5239 // Methods for support type inquiry through isa, cast, and dyn_cast:
5240 static bool classof(const Instruction *I) {
5241 return I->getOpcode() == BitCast;
5242 }
5243 static bool classof(const Value *V) {
5244 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5245 }
5246};
5247
5248//===----------------------------------------------------------------------===//
5249// AddrSpaceCastInst Class
5250//===----------------------------------------------------------------------===//
5251
5252/// This class represents a conversion between pointers from one address space
5253/// to another.
5254class AddrSpaceCastInst : public CastInst {
5255protected:
5256 // Note: Instruction needs to be a friend here to call cloneImpl.
5257 friend class Instruction;
5258
5259 /// Clone an identical AddrSpaceCastInst.
5260 AddrSpaceCastInst *cloneImpl() const;
5261
5262public:
5263 /// Constructor with insert-before-instruction semantics
5264 AddrSpaceCastInst(
5265 Value *S, ///< The value to be casted
5266 Type *Ty, ///< The type to casted to
5267 const Twine &NameStr = "", ///< A name for the new instruction
5268 Instruction *InsertBefore = nullptr ///< Where to insert the new instruction
5269 );
5270
5271 /// Constructor with insert-at-end-of-block semantics
5272 AddrSpaceCastInst(
5273 Value *S, ///< The value to be casted
5274 Type *Ty, ///< The type to casted to
5275 const Twine &NameStr, ///< A name for the new instruction
5276 BasicBlock *InsertAtEnd ///< The block to insert the instruction into
5277 );
5278
5279 // Methods for support type inquiry through isa, cast, and dyn_cast:
5280 static bool classof(const Instruction *I) {
5281 return I->getOpcode() == AddrSpaceCast;
5282 }
5283 static bool classof(const Value *V) {
5284 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5285 }
5286
5287 /// Gets the pointer operand.
5288 Value *getPointerOperand() {
5289 return getOperand(0);
5290 }
5291
5292 /// Gets the pointer operand.
5293 const Value *getPointerOperand() const {
5294 return getOperand(0);
5295 }
5296
5297 /// Gets the operand index of the pointer operand.
5298 static unsigned getPointerOperandIndex() {
5299 return 0U;
5300 }
5301
5302 /// Returns the address space of the pointer operand.
5303 unsigned getSrcAddressSpace() const {
5304 return getPointerOperand()->getType()->getPointerAddressSpace();
5305 }
5306
5307 /// Returns the address space of the result.
5308 unsigned getDestAddressSpace() const {
5309 return getType()->getPointerAddressSpace();
5310 }
5311};
5312
5313//===----------------------------------------------------------------------===//
5314// Helper functions
5315//===----------------------------------------------------------------------===//
5316
5317/// A helper function that returns the pointer operand of a load or store
5318/// instruction. Returns nullptr if not load or store.
5319inline const Value *getLoadStorePointerOperand(const Value *V) {
5320 if (auto *Load = dyn_cast<LoadInst>(V))
5321 return Load->getPointerOperand();
5322 if (auto *Store = dyn_cast<StoreInst>(V))
5323 return Store->getPointerOperand();
5324 return nullptr;
5325}
5326inline Value *getLoadStorePointerOperand(Value *V) {
5327 return const_cast<Value *>(
5328 getLoadStorePointerOperand(static_cast<const Value *>(V)));
5329}
5330
5331/// A helper function that returns the pointer operand of a load, store
5332/// or GEP instruction. Returns nullptr if not load, store, or GEP.
5333inline const Value *getPointerOperand(const Value *V) {
5334 if (auto *Ptr = getLoadStorePointerOperand(V))
5335 return Ptr;
5336 if (auto *Gep = dyn_cast<GetElementPtrInst>(V))
5337 return Gep->getPointerOperand();
5338 return nullptr;
5339}
5340inline Value *getPointerOperand(Value *V) {
5341 return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V)));
5342}
5343
5344/// A helper function that returns the alignment of load or store instruction.
5345inline Align getLoadStoreAlignment(Value *I) {
5346 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5347, __extension__ __PRETTY_FUNCTION__
))
5347 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5347, __extension__ __PRETTY_FUNCTION__
))
;
5348 if (auto *LI = dyn_cast<LoadInst>(I))
5349 return LI->getAlign();
5350 return cast<StoreInst>(I)->getAlign();
5351}
5352
5353/// A helper function that returns the address space of the pointer operand of
5354/// load or store instruction.
5355inline unsigned getLoadStoreAddressSpace(Value *I) {
5356 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5357, __extension__ __PRETTY_FUNCTION__
))
5357 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5357, __extension__ __PRETTY_FUNCTION__
))
;
5358 if (auto *LI = dyn_cast<LoadInst>(I))
5359 return LI->getPointerAddressSpace();
5360 return cast<StoreInst>(I)->getPointerAddressSpace();
5361}
5362
5363/// A helper function that returns the type of a load or store instruction.
5364inline Type *getLoadStoreType(Value *I) {
5365 assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5366, __extension__ __PRETTY_FUNCTION__
))
5366 "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa<
StoreInst>(I)) && "Expected Load or Store instruction"
) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\""
, "llvm/include/llvm/IR/Instructions.h", 5366, __extension__ __PRETTY_FUNCTION__
))
;
5367 if (auto *LI = dyn_cast<LoadInst>(I))
5368 return LI->getType();
5369 return cast<StoreInst>(I)->getValueOperand()->getType();
5370}
5371
5372/// A helper function that returns an atomic operation's sync scope; returns
5373/// None if it is not an atomic operation.
5374inline Optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) {
5375 if (!I->isAtomic())
5376 return None;
5377 if (auto *AI = dyn_cast<LoadInst>(I))
5378 return AI->getSyncScopeID();
5379 if (auto *AI = dyn_cast<StoreInst>(I))
5380 return AI->getSyncScopeID();
5381 if (auto *AI = dyn_cast<FenceInst>(I))
5382 return AI->getSyncScopeID();
5383 if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I))
5384 return AI->getSyncScopeID();
5385 if (auto *AI = dyn_cast<AtomicRMWInst>(I))
5386 return AI->getSyncScopeID();
5387 llvm_unreachable("unhandled atomic operation")::llvm::llvm_unreachable_internal("unhandled atomic operation"
, "llvm/include/llvm/IR/Instructions.h", 5387)
;
5388}
5389
5390//===----------------------------------------------------------------------===//
5391// FreezeInst Class
5392//===----------------------------------------------------------------------===//
5393
5394/// This class represents a freeze function that returns random concrete
5395/// value if an operand is either a poison value or an undef value
5396class FreezeInst : public UnaryInstruction {
5397protected:
5398 // Note: Instruction needs to be a friend here to call cloneImpl.
5399 friend class Instruction;
5400
5401 /// Clone an identical FreezeInst
5402 FreezeInst *cloneImpl() const;
5403
5404public:
5405 explicit FreezeInst(Value *S,
5406 const Twine &NameStr = "",
5407 Instruction *InsertBefore = nullptr);
5408 FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd);
5409
5410 // Methods for support type inquiry through isa, cast, and dyn_cast:
5411 static inline bool classof(const Instruction *I) {
5412 return I->getOpcode() == Freeze;
5413 }
5414 static inline bool classof(const Value *V) {
5415 return isa<Instruction>(V) && classof(cast<Instruction>(V));
5416 }
5417};
5418
5419} // end namespace llvm
5420
5421#endif // LLVM_IR_INSTRUCTIONS_H

/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/llvm/include/llvm/IR/Instruction.h

1//===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the declaration of the Instruction class, which is the
10// base class for all of the LLVM instructions.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_INSTRUCTION_H
15#define LLVM_IR_INSTRUCTION_H
16
17#include "llvm/ADT/ArrayRef.h"
18#include "llvm/ADT/Bitfields.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/StringRef.h"
21#include "llvm/ADT/ilist_node.h"
22#include "llvm/IR/DebugLoc.h"
23#include "llvm/IR/SymbolTableListTraits.h"
24#include "llvm/IR/User.h"
25#include "llvm/IR/Value.h"
26#include "llvm/Support/AtomicOrdering.h"
27#include <cstdint>
28#include <utility>
29
30namespace llvm {
31
32class BasicBlock;
33class FastMathFlags;
34class MDNode;
35class Module;
36struct AAMDNodes;
37
38template <> struct ilist_alloc_traits<Instruction> {
39 static inline void deleteNode(Instruction *V);
40};
41
42class Instruction : public User,
43 public ilist_node_with_parent<Instruction, BasicBlock> {
44 BasicBlock *Parent;
45 DebugLoc DbgLoc; // 'dbg' Metadata cache.
46
47 /// Relative order of this instruction in its parent basic block. Used for
48 /// O(1) local dominance checks between instructions.
49 mutable unsigned Order = 0;
50
51protected:
52 // The 15 first bits of `Value::SubclassData` are available for subclasses of
53 // `Instruction` to use.
54 using OpaqueField = Bitfield::Element<uint16_t, 0, 15>;
55
56 // Template alias so that all Instruction storing alignment use the same
57 // definiton.
58 // Valid alignments are powers of two from 2^0 to 2^MaxAlignmentExponent =
59 // 2^32. We store them as Log2(Alignment), so we need 6 bits to encode the 33
60 // possible values.
61 template <unsigned Offset>
62 using AlignmentBitfieldElementT =
63 typename Bitfield::Element<unsigned, Offset, 6,
64 Value::MaxAlignmentExponent>;
65
66 template <unsigned Offset>
67 using BoolBitfieldElementT = typename Bitfield::Element<bool, Offset, 1>;
68
69 template <unsigned Offset>
70 using AtomicOrderingBitfieldElementT =
71 typename Bitfield::Element<AtomicOrdering, Offset, 3,
72 AtomicOrdering::LAST>;
73
74private:
75 // The last bit is used to store whether the instruction has metadata attached
76 // or not.
77 using HasMetadataField = Bitfield::Element<bool, 15, 1>;
78
79protected:
80 ~Instruction(); // Use deleteValue() to delete a generic Instruction.
81
82public:
83 Instruction(const Instruction &) = delete;
84 Instruction &operator=(const Instruction &) = delete;
85
86 /// Specialize the methods defined in Value, as we know that an instruction
87 /// can only be used by other instructions.
88 Instruction *user_back() { return cast<Instruction>(*user_begin());}
89 const Instruction *user_back() const { return cast<Instruction>(*user_begin());}
90
91 inline const BasicBlock *getParent() const { return Parent; }
92 inline BasicBlock *getParent() { return Parent; }
93
94 /// Return the module owning the function this instruction belongs to
95 /// or nullptr it the function does not have a module.
96 ///
97 /// Note: this is undefined behavior if the instruction does not have a
98 /// parent, or the parent basic block does not have a parent function.
99 const Module *getModule() const;
100 Module *getModule() {
101 return const_cast<Module *>(
102 static_cast<const Instruction *>(this)->getModule());
103 }
104
105 /// Return the function this instruction belongs to.
106 ///
107 /// Note: it is undefined behavior to call this on an instruction not
108 /// currently inserted into a function.
109 const Function *getFunction() const;
110 Function *getFunction() {
111 return const_cast<Function *>(
112 static_cast<const Instruction *>(this)->getFunction());
113 }
114
115 /// This method unlinks 'this' from the containing basic block, but does not
116 /// delete it.
117 void removeFromParent();
118
119 /// This method unlinks 'this' from the containing basic block and deletes it.
120 ///
121 /// \returns an iterator pointing to the element after the erased one
122 SymbolTableList<Instruction>::iterator eraseFromParent();
123
124 /// Insert an unlinked instruction into a basic block immediately before
125 /// the specified instruction.
126 void insertBefore(Instruction *InsertPos);
127
128 /// Insert an unlinked instruction into a basic block immediately after the
129 /// specified instruction.
130 void insertAfter(Instruction *InsertPos);
131
132 /// Unlink this instruction from its current basic block and insert it into
133 /// the basic block that MovePos lives in, right before MovePos.
134 void moveBefore(Instruction *MovePos);
135
136 /// Unlink this instruction and insert into BB before I.
137 ///
138 /// \pre I is a valid iterator into BB.
139 void moveBefore(BasicBlock &BB, SymbolTableList<Instruction>::iterator I);
140
141 /// Unlink this instruction from its current basic block and insert it into
142 /// the basic block that MovePos lives in, right after MovePos.
143 void moveAfter(Instruction *MovePos);
144
145 /// Given an instruction Other in the same basic block as this instruction,
146 /// return true if this instruction comes before Other. In this worst case,
147 /// this takes linear time in the number of instructions in the block. The
148 /// results are cached, so in common cases when the block remains unmodified,
149 /// it takes constant time.
150 bool comesBefore(const Instruction *Other) const;
151
152 //===--------------------------------------------------------------------===//
153 // Subclass classification.
154 //===--------------------------------------------------------------------===//
155
156 /// Returns a member of one of the enums like Instruction::Add.
157 unsigned getOpcode() const { return getValueID() - InstructionVal; }
158
159 const char *getOpcodeName() const { return getOpcodeName(getOpcode()); }
160 bool isTerminator() const { return isTerminator(getOpcode()); }
161 bool isUnaryOp() const { return isUnaryOp(getOpcode()); }
162 bool isBinaryOp() const { return isBinaryOp(getOpcode()); }
163 bool isIntDivRem() const { return isIntDivRem(getOpcode()); }
164 bool isShift() const { return isShift(getOpcode()); }
165 bool isCast() const { return isCast(getOpcode()); }
166 bool isFuncletPad() const { return isFuncletPad(getOpcode()); }
167 bool isExceptionalTerminator() const {
168 return isExceptionalTerminator(getOpcode());
169 }
170
171 /// It checks if this instruction is the only user of at least one of
172 /// its operands.
173 bool isOnlyUserOfAnyOperand();
174
175 bool isIndirectTerminator() const {
176 return isIndirectTerminator(getOpcode());
177 }
178
179 static const char* getOpcodeName(unsigned OpCode);
180
181 static inline bool isTerminator(unsigned OpCode) {
182 return OpCode >= TermOpsBegin && OpCode < TermOpsEnd;
183 }
184
185 static inline bool isUnaryOp(unsigned Opcode) {
186 return Opcode >= UnaryOpsBegin && Opcode < UnaryOpsEnd;
187 }
188 static inline bool isBinaryOp(unsigned Opcode) {
189 return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd;
190 }
191
192 static inline bool isIntDivRem(unsigned Opcode) {
193 return Opcode == UDiv || Opcode == SDiv || Opcode == URem || Opcode == SRem;
194 }
195
196 /// Determine if the Opcode is one of the shift instructions.
197 static inline bool isShift(unsigned Opcode) {
198 return Opcode >= Shl && Opcode <= AShr;
199 }
200
201 /// Return true if this is a logical shift left or a logical shift right.
202 inline bool isLogicalShift() const {
203 return getOpcode() == Shl || getOpcode() == LShr;
204 }
205
206 /// Return true if this is an arithmetic shift right.
207 inline bool isArithmeticShift() const {
208 return getOpcode() == AShr;
209 }
210
211 /// Determine if the Opcode is and/or/xor.
212 static inline bool isBitwiseLogicOp(unsigned Opcode) {
213 return Opcode == And || Opcode == Or || Opcode == Xor;
214 }
215
216 /// Return true if this is and/or/xor.
217 inline bool isBitwiseLogicOp() const {
218 return isBitwiseLogicOp(getOpcode());
219 }
220
221 /// Determine if the OpCode is one of the CastInst instructions.
222 static inline bool isCast(unsigned OpCode) {
223 return OpCode >= CastOpsBegin && OpCode < CastOpsEnd;
224 }
225
226 /// Determine if the OpCode is one of the FuncletPadInst instructions.
227 static inline bool isFuncletPad(unsigned OpCode) {
228 return OpCode >= FuncletPadOpsBegin && OpCode < FuncletPadOpsEnd;
229 }
230
231 /// Returns true if the OpCode is a terminator related to exception handling.
232 static inline bool isExceptionalTerminator(unsigned OpCode) {
233 switch (OpCode) {
234 case Instruction::CatchSwitch:
235 case Instruction::CatchRet:
236 case Instruction::CleanupRet:
237 case Instruction::Invoke:
238 case Instruction::Resume:
239 return true;
240 default:
241 return false;
242 }
243 }
244
245 /// Returns true if the OpCode is a terminator with indirect targets.
246 static inline bool isIndirectTerminator(unsigned OpCode) {
247 switch (OpCode) {
248 case Instruction::IndirectBr:
249 case Instruction::CallBr:
250 return true;
251 default:
252 return false;
253 }
254 }
255
256 //===--------------------------------------------------------------------===//
257 // Metadata manipulation.
258 //===--------------------------------------------------------------------===//
259
260 /// Return true if this instruction has any metadata attached to it.
261 bool hasMetadata() const { return DbgLoc || Value::hasMetadata(); }
262
263 /// Return true if this instruction has metadata attached to it other than a
264 /// debug location.
265 bool hasMetadataOtherThanDebugLoc() const { return Value::hasMetadata(); }
266
267 /// Return true if this instruction has the given type of metadata attached.
268 bool hasMetadata(unsigned KindID) const {
269 return getMetadata(KindID) != nullptr;
24
Assuming the condition is false
25
Returning zero, which participates in a condition later
270 }
271
272 /// Return true if this instruction has the given type of metadata attached.
273 bool hasMetadata(StringRef Kind) const {
274 return getMetadata(Kind) != nullptr;
275 }
276
277 /// Get the metadata of given kind attached to this Instruction.
278 /// If the metadata is not found then return null.
279 MDNode *getMetadata(unsigned KindID) const {
280 if (!hasMetadata()) return nullptr;
281 return getMetadataImpl(KindID);
282 }
283
284 /// Get the metadata of given kind attached to this Instruction.
285 /// If the metadata is not found then return null.
286 MDNode *getMetadata(StringRef Kind) const {
287 if (!hasMetadata()) return nullptr;
288 return getMetadataImpl(Kind);
289 }
290
291 /// Get all metadata attached to this Instruction. The first element of each
292 /// pair returned is the KindID, the second element is the metadata value.
293 /// This list is returned sorted by the KindID.
294 void
295 getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
296 if (hasMetadata())
297 getAllMetadataImpl(MDs);
298 }
299
300 /// This does the same thing as getAllMetadata, except that it filters out the
301 /// debug location.
302 void getAllMetadataOtherThanDebugLoc(
303 SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
304 Value::getAllMetadata(MDs);
305 }
306
307 /// Set the metadata of the specified kind to the specified node. This updates
308 /// or replaces metadata if already present, or removes it if Node is null.
309 void setMetadata(unsigned KindID, MDNode *Node);
310 void setMetadata(StringRef Kind, MDNode *Node);
311
312 /// Copy metadata from \p SrcInst to this instruction. \p WL, if not empty,
313 /// specifies the list of meta data that needs to be copied. If \p WL is
314 /// empty, all meta data will be copied.
315 void copyMetadata(const Instruction &SrcInst,
316 ArrayRef<unsigned> WL = ArrayRef<unsigned>());
317
318 /// If the instruction has "branch_weights" MD_prof metadata and the MDNode
319 /// has three operands (including name string), swap the order of the
320 /// metadata.
321 void swapProfMetadata();
322
323 /// Drop all unknown metadata except for debug locations.
324 /// @{
325 /// Passes are required to drop metadata they don't understand. This is a
326 /// convenience method for passes to do so.
327 /// dropUndefImplyingAttrsAndUnknownMetadata should be used instead of
328 /// this API if the Instruction being modified is a call.
329 void dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs);
330 void dropUnknownNonDebugMetadata() {
331 return dropUnknownNonDebugMetadata(None);
332 }
333 void dropUnknownNonDebugMetadata(unsigned ID1) {
334 return dropUnknownNonDebugMetadata(makeArrayRef(ID1));
335 }
336 void dropUnknownNonDebugMetadata(unsigned ID1, unsigned ID2) {
337 unsigned IDs[] = {ID1, ID2};
338 return dropUnknownNonDebugMetadata(IDs);
339 }
340 /// @}
341
342 /// Adds an !annotation metadata node with \p Annotation to this instruction.
343 /// If this instruction already has !annotation metadata, append \p Annotation
344 /// to the existing node.
345 void addAnnotationMetadata(StringRef Annotation);
346
347 /// Returns the AA metadata for this instruction.
348 AAMDNodes getAAMetadata() const;
349
350 /// Sets the AA metadata on this instruction from the AAMDNodes structure.
351 void setAAMetadata(const AAMDNodes &N);
352
353 /// Retrieve the raw weight values of a conditional branch or select.
354 /// Returns true on success with profile weights filled in.
355 /// Returns false if no metadata or invalid metadata was found.
356 bool extractProfMetadata(uint64_t &TrueVal, uint64_t &FalseVal) const;
357
358 /// Retrieve total raw weight values of a branch.
359 /// Returns true on success with profile total weights filled in.
360 /// Returns false if no metadata was found.
361 bool extractProfTotalWeight(uint64_t &TotalVal) const;
362
363 /// Set the debug location information for this instruction.
364 void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); }
365
366 /// Return the debug location for this node as a DebugLoc.
367 const DebugLoc &getDebugLoc() const { return DbgLoc; }
368
369 /// Set or clear the nuw flag on this instruction, which must be an operator
370 /// which supports this flag. See LangRef.html for the meaning of this flag.
371 void setHasNoUnsignedWrap(bool b = true);
372
373 /// Set or clear the nsw flag on this instruction, which must be an operator
374 /// which supports this flag. See LangRef.html for the meaning of this flag.
375 void setHasNoSignedWrap(bool b = true);
376
377 /// Set or clear the exact flag on this instruction, which must be an operator
378 /// which supports this flag. See LangRef.html for the meaning of this flag.
379 void setIsExact(bool b = true);
380
381 /// Determine whether the no unsigned wrap flag is set.
382 bool hasNoUnsignedWrap() const;
383
384 /// Determine whether the no signed wrap flag is set.
385 bool hasNoSignedWrap() const;
386
387 /// Return true if this operator has flags which may cause this instruction
388 /// to evaluate to poison despite having non-poison inputs.
389 bool hasPoisonGeneratingFlags() const;
390
391 /// Drops flags that may cause this instruction to evaluate to poison despite
392 /// having non-poison inputs.
393 void dropPoisonGeneratingFlags();
394
395 /// This function drops non-debug unknown metadata (through
396 /// dropUnknownNonDebugMetadata). For calls, it also drops parameter and
397 /// return attributes that can cause undefined behaviour. Both of these should
398 /// be done by passes which move instructions in IR.
399 void
400 dropUndefImplyingAttrsAndUnknownMetadata(ArrayRef<unsigned> KnownIDs = {});
401
402 /// Determine whether the exact flag is set.
403 bool isExact() const;
404
405 /// Set or clear all fast-math-flags on this instruction, which must be an
406 /// operator which supports this flag. See LangRef.html for the meaning of
407 /// this flag.
408 void setFast(bool B);
409
410 /// Set or clear the reassociation flag on this instruction, which must be
411 /// an operator which supports this flag. See LangRef.html for the meaning of
412 /// this flag.
413 void setHasAllowReassoc(bool B);
414
415 /// Set or clear the no-nans flag on this instruction, which must be an
416 /// operator which supports this flag. See LangRef.html for the meaning of
417 /// this flag.
418 void setHasNoNaNs(bool B);
419
420 /// Set or clear the no-infs flag on this instruction, which must be an
421 /// operator which supports this flag. See LangRef.html for the meaning of
422 /// this flag.
423 void setHasNoInfs(bool B);
424
425 /// Set or clear the no-signed-zeros flag on this instruction, which must be
426 /// an operator which supports this flag. See LangRef.html for the meaning of
427 /// this flag.
428 void setHasNoSignedZeros(bool B);
429
430 /// Set or clear the allow-reciprocal flag on this instruction, which must be
431 /// an operator which supports this flag. See LangRef.html for the meaning of
432 /// this flag.
433 void setHasAllowReciprocal(bool B);
434
435 /// Set or clear the allow-contract flag on this instruction, which must be
436 /// an operator which supports this flag. See LangRef.html for the meaning of
437 /// this flag.
438 void setHasAllowContract(bool B);
439
440 /// Set or clear the approximate-math-functions flag on this instruction,
441 /// which must be an operator which supports this flag. See LangRef.html for
442 /// the meaning of this flag.
443 void setHasApproxFunc(bool B);
444
445 /// Convenience function for setting multiple fast-math flags on this
446 /// instruction, which must be an operator which supports these flags. See
447 /// LangRef.html for the meaning of these flags.
448 void setFastMathFlags(FastMathFlags FMF);
449
450 /// Convenience function for transferring all fast-math flag values to this
451 /// instruction, which must be an operator which supports these flags. See
452 /// LangRef.html for the meaning of these flags.
453 void copyFastMathFlags(FastMathFlags FMF);
454
455 /// Determine whether all fast-math-flags are set.
456 bool isFast() const;
457
458 /// Determine whether the allow-reassociation flag is set.
459 bool hasAllowReassoc() const;
460
461 /// Determine whether the no-NaNs flag is set.
462 bool hasNoNaNs() const;
463
464 /// Determine whether the no-infs flag is set.
465 bool hasNoInfs() const;
466
467 /// Determine whether the no-signed-zeros flag is set.
468 bool hasNoSignedZeros() const;
469
470 /// Determine whether the allow-reciprocal flag is set.
471 bool hasAllowReciprocal() const;
472
473 /// Determine whether the allow-contract flag is set.
474 bool hasAllowContract() const;
475
476 /// Determine whether the approximate-math-functions flag is set.
477 bool hasApproxFunc() const;
478
479 /// Convenience function for getting all the fast-math flags, which must be an
480 /// operator which supports these flags. See LangRef.html for the meaning of
481 /// these flags.
482 FastMathFlags getFastMathFlags() const;
483
484 /// Copy I's fast-math flags
485 void copyFastMathFlags(const Instruction *I);
486
487 /// Convenience method to copy supported exact, fast-math, and (optionally)
488 /// wrapping flags from V to this instruction.
489 void copyIRFlags(const Value *V, bool IncludeWrapFlags = true);
490
491 /// Logical 'and' of any supported wrapping, exact, and fast-math flags of
492 /// V and this instruction.
493 void andIRFlags(const Value *V);
494
495 /// Merge 2 debug locations and apply it to the Instruction. If the
496 /// instruction is a CallIns, we need to traverse the inline chain to find
497 /// the common scope. This is not efficient for N-way merging as each time
498 /// you merge 2 iterations, you need to rebuild the hashmap to find the
499 /// common scope. However, we still choose this API because:
500 /// 1) Simplicity: it takes 2 locations instead of a list of locations.
501 /// 2) In worst case, it increases the complexity from O(N*I) to
502 /// O(2*N*I), where N is # of Instructions to merge, and I is the
503 /// maximum level of inline stack. So it is still linear.
504 /// 3) Merging of call instructions should be extremely rare in real
505 /// applications, thus the N-way merging should be in code path.
506 /// The DebugLoc attached to this instruction will be overwritten by the
507 /// merged DebugLoc.
508 void applyMergedLocation(const DILocation *LocA, const DILocation *LocB);
509
510 /// Updates the debug location given that the instruction has been hoisted
511 /// from a block to a predecessor of that block.
512 /// Note: it is undefined behavior to call this on an instruction not
513 /// currently inserted into a function.
514 void updateLocationAfterHoist();
515
516 /// Drop the instruction's debug location. This does not guarantee removal
517 /// of the !dbg source location attachment, as it must set a line 0 location
518 /// with scope information attached on call instructions. To guarantee
519 /// removal of the !dbg attachment, use the \ref setDebugLoc() API.
520 /// Note: it is undefined behavior to call this on an instruction not
521 /// currently inserted into a function.
522 void dropLocation();
523
524private:
525 // These are all implemented in Metadata.cpp.
526 MDNode *getMetadataImpl(unsigned KindID) const;
527 MDNode *getMetadataImpl(StringRef Kind) const;
528 void
529 getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const;
530
531public:
532 //===--------------------------------------------------------------------===//
533 // Predicates and helper methods.
534 //===--------------------------------------------------------------------===//
535
536 /// Return true if the instruction is associative:
537 ///
538 /// Associative operators satisfy: x op (y op z) === (x op y) op z
539 ///
540 /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
541 ///
542 bool isAssociative() const LLVM_READONLY__attribute__((__pure__));
543 static bool isAssociative(unsigned Opcode) {
544 return Opcode == And || Opcode == Or || Opcode == Xor ||
545 Opcode == Add || Opcode == Mul;
546 }
547
548 /// Return true if the instruction is commutative:
549 ///
550 /// Commutative operators satisfy: (x op y) === (y op x)
551 ///
552 /// In LLVM, these are the commutative operators, plus SetEQ and SetNE, when
553 /// applied to any type.
554 ///
555 bool isCommutative() const LLVM_READONLY__attribute__((__pure__));
556 static bool isCommutative(unsigned Opcode) {
557 switch (Opcode) {
558 case Add: case FAdd:
559 case Mul: case FMul:
560 case And: case Or: case Xor:
561 return true;
562 default:
563 return false;
564 }
565 }
566
567 /// Return true if the instruction is idempotent:
568 ///
569 /// Idempotent operators satisfy: x op x === x
570 ///
571 /// In LLVM, the And and Or operators are idempotent.
572 ///
573 bool isIdempotent() const { return isIdempotent(getOpcode()); }
574 static bool isIdempotent(unsigned Opcode) {
575 return Opcode == And || Opcode == Or;
576 }
577
578 /// Return true if the instruction is nilpotent:
579 ///
580 /// Nilpotent operators satisfy: x op x === Id,
581 ///
582 /// where Id is the identity for the operator, i.e. a constant such that
583 /// x op Id === x and Id op x === x for all x.
584 ///
585 /// In LLVM, the Xor operator is nilpotent.
586 ///
587 bool isNilpotent() const { return isNilpotent(getOpcode()); }
588 static bool isNilpotent(unsigned Opcode) {
589 return Opcode == Xor;
590 }
591
592 /// Return true if this instruction may modify memory.
593 bool mayWriteToMemory() const;
594
595 /// Return true if this instruction may read memory.
596 bool mayReadFromMemory() const;
597
598 /// Return true if this instruction may read or write memory.
599 bool mayReadOrWriteMemory() const {
600 return mayReadFromMemory() || mayWriteToMemory();
601 }
602
603 /// Return true if this instruction has an AtomicOrdering of unordered or
604 /// higher.
605 bool isAtomic() const;
606
607 /// Return true if this atomic instruction loads from memory.
608 bool hasAtomicLoad() const;
609
610 /// Return true if this atomic instruction stores to memory.
611 bool hasAtomicStore() const;
612
613 /// Return true if this instruction has a volatile memory access.
614 bool isVolatile() const;
615
616 /// Return true if this instruction may throw an exception.
617 bool mayThrow() const;
618
619 /// Return true if this instruction behaves like a memory fence: it can load
620 /// or store to memory location without being given a memory location.
621 bool isFenceLike() const {
622 switch (getOpcode()) {
623 default:
624 return false;
625 // This list should be kept in sync with the list in mayWriteToMemory for
626 // all opcodes which don't have a memory location.
627 case Instruction::Fence:
628 case Instruction::CatchPad:
629 case Instruction::CatchRet:
630 case Instruction::Call:
631 case Instruction::Invoke:
632 return true;
633 }
634 }
635
636 /// Return true if the instruction may have side effects.
637 ///
638 /// Side effects are:
639 /// * Writing to memory.
640 /// * Unwinding.
641 /// * Not returning (e.g. an infinite loop).
642 ///
643 /// Note that this does not consider malloc and alloca to have side
644 /// effects because the newly allocated memory is completely invisible to
645 /// instructions which don't use the returned value. For cases where this
646 /// matters, isSafeToSpeculativelyExecute may be more appropriate.
647 bool mayHaveSideEffects() const;
648
649 /// Return true if the instruction can be removed if the result is unused.
650 ///
651 /// When constant folding some instructions cannot be removed even if their
652 /// results are unused. Specifically terminator instructions and calls that
653 /// may have side effects cannot be removed without semantically changing the
654 /// generated program.
655 bool isSafeToRemove() const;
656
657 /// Return true if the instruction will return (unwinding is considered as
658 /// a form of returning control flow here).
659 bool willReturn() const;
660
661 /// Return true if the instruction is a variety of EH-block.
662 bool isEHPad() const {
663 switch (getOpcode()) {
664 case Instruction::CatchSwitch:
665 case Instruction::CatchPad:
666 case Instruction::CleanupPad:
667 case Instruction::LandingPad:
668 return true;
669 default:
670 return false;
671 }
672 }
673
674 /// Return true if the instruction is a llvm.lifetime.start or
675 /// llvm.lifetime.end marker.
676 bool isLifetimeStartOrEnd() const;
677
678 /// Return true if the instruction is a llvm.launder.invariant.group or
679 /// llvm.strip.invariant.group.
680 bool isLaunderOrStripInvariantGroup() const;
681
682 /// Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
683 bool isDebugOrPseudoInst() const;
684
685 /// Return a pointer to the next non-debug instruction in the same basic
686 /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
687 /// operations if \c SkipPseudoOp is true.
688 const Instruction *
689 getNextNonDebugInstruction(bool SkipPseudoOp = false) const;
690 Instruction *getNextNonDebugInstruction(bool SkipPseudoOp = false) {
691 return const_cast<Instruction *>(
692 static_cast<const Instruction *>(this)->getNextNonDebugInstruction(
693 SkipPseudoOp));
694 }
695
696 /// Return a pointer to the previous non-debug instruction in the same basic
697 /// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
698 /// operations if \c SkipPseudoOp is true.
699 const Instruction *
700 getPrevNonDebugInstruction(bool SkipPseudoOp = false) const;
701 Instruction *getPrevNonDebugInstruction(bool SkipPseudoOp = false) {
702 return const_cast<Instruction *>(
703 static_cast<const Instruction *>(this)->getPrevNonDebugInstruction(
704 SkipPseudoOp));
705 }
706
707 /// Create a copy of 'this' instruction that is identical in all ways except
708 /// the following:
709 /// * The instruction has no parent
710 /// * The instruction has no name
711 ///
712 Instruction *clone() const;
713
714 /// Return true if the specified instruction is exactly identical to the
715 /// current one. This means that all operands match and any extra information
716 /// (e.g. load is volatile) agree.
717 bool isIdenticalTo(const Instruction *I) const;
718
719 /// This is like isIdenticalTo, except that it ignores the
720 /// SubclassOptionalData flags, which may specify conditions under which the
721 /// instruction's result is undefined.
722 bool isIdenticalToWhenDefined(const Instruction *I) const;
723
724 /// When checking for operation equivalence (using isSameOperationAs) it is
725 /// sometimes useful to ignore certain attributes.
726 enum OperationEquivalenceFlags {
727 /// Check for equivalence ignoring load/store alignment.
728 CompareIgnoringAlignment = 1<<0,
729 /// Check for equivalence treating a type and a vector of that type
730 /// as equivalent.
731 CompareUsingScalarTypes = 1<<1
732 };
733
734 /// This function determines if the specified instruction executes the same
735 /// operation as the current one. This means that the opcodes, type, operand
736 /// types and any other factors affecting the operation must be the same. This
737 /// is similar to isIdenticalTo except the operands themselves don't have to
738 /// be identical.
739 /// @returns true if the specified instruction is the same operation as
740 /// the current one.
741 /// Determine if one instruction is the same operation as another.
742 bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const;
743
744 /// Return true if there are any uses of this instruction in blocks other than
745 /// the specified block. Note that PHI nodes are considered to evaluate their
746 /// operands in the corresponding predecessor block.
747 bool isUsedOutsideOfBlock(const BasicBlock *BB) const;
748
749 /// Return the number of successors that this instruction has. The instruction
750 /// must be a terminator.
751 unsigned getNumSuccessors() const;
752
753 /// Return the specified successor. This instruction must be a terminator.
754 BasicBlock *getSuccessor(unsigned Idx) const;
755
756 /// Update the specified successor to point at the provided block. This
757 /// instruction must be a terminator.
758 void setSuccessor(unsigned Idx, BasicBlock *BB);
759
760 /// Replace specified successor OldBB to point at the provided block.
761 /// This instruction must be a terminator.
762 void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB);
763
764 /// Methods for support type inquiry through isa, cast, and dyn_cast:
765 static bool classof(const Value *V) {
766 return V->getValueID() >= Value::InstructionVal;
767 }
768
769 //----------------------------------------------------------------------
770 // Exported enumerations.
771 //
772 enum TermOps { // These terminate basic blocks
773#define FIRST_TERM_INST(N) TermOpsBegin = N,
774#define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N,
775#define LAST_TERM_INST(N) TermOpsEnd = N+1
776#include "llvm/IR/Instruction.def"
777 };
778
779 enum UnaryOps {
780#define FIRST_UNARY_INST(N) UnaryOpsBegin = N,
781#define HANDLE_UNARY_INST(N, OPC, CLASS) OPC = N,
782#define LAST_UNARY_INST(N) UnaryOpsEnd = N+1
783#include "llvm/IR/Instruction.def"
784 };
785
786 enum BinaryOps {
787#define FIRST_BINARY_INST(N) BinaryOpsBegin = N,
788#define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N,
789#define LAST_BINARY_INST(N) BinaryOpsEnd = N+1
790#include "llvm/IR/Instruction.def"
791 };
792
793 enum MemoryOps {
794#define FIRST_MEMORY_INST(N) MemoryOpsBegin = N,
795#define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N,
796#define LAST_MEMORY_INST(N) MemoryOpsEnd = N+1
797#include "llvm/IR/Instruction.def"
798 };
799
800 enum CastOps {
801#define FIRST_CAST_INST(N) CastOpsBegin = N,
802#define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N,
803#define LAST_CAST_INST(N) CastOpsEnd = N+1
804#include "llvm/IR/Instruction.def"
805 };
806
807 enum FuncletPadOps {
808#define FIRST_FUNCLETPAD_INST(N) FuncletPadOpsBegin = N,
809#define HANDLE_FUNCLETPAD_INST(N, OPC, CLASS) OPC = N,
810#define LAST_FUNCLETPAD_INST(N) FuncletPadOpsEnd = N+1
811#include "llvm/IR/Instruction.def"
812 };
813
814 enum OtherOps {
815#define FIRST_OTHER_INST(N) OtherOpsBegin = N,
816#define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N,
817#define LAST_OTHER_INST(N) OtherOpsEnd = N+1
818#include "llvm/IR/Instruction.def"
819 };
820
821private:
822 friend class SymbolTableListTraits<Instruction>;
823 friend class BasicBlock; // For renumbering.
824
825 // Shadow Value::setValueSubclassData with a private forwarding method so that
826 // subclasses cannot accidentally use it.
827 void setValueSubclassData(unsigned short D) {
828 Value::setValueSubclassData(D);
829 }
830
831 unsigned short getSubclassDataFromValue() const {
832 return Value::getSubclassDataFromValue();
833 }
834
835 void setParent(BasicBlock *P);
836
837protected:
838 // Instruction subclasses can stick up to 15 bits of stuff into the
839 // SubclassData field of instruction with these members.
840
841 template <typename BitfieldElement>
842 typename BitfieldElement::Type getSubclassData() const {
843 static_assert(
844 std::is_same<BitfieldElement, HasMetadataField>::value ||
845 !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
846 "Must not overlap with the metadata bit");
847 return Bitfield::get<BitfieldElement>(getSubclassDataFromValue());
848 }
849
850 template <typename BitfieldElement>
851 void setSubclassData(typename BitfieldElement::Type Value) {
852 static_assert(
853 std::is_same<BitfieldElement, HasMetadataField>::value ||
854 !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
855 "Must not overlap with the metadata bit");
856 auto Storage = getSubclassDataFromValue();
857 Bitfield::set<BitfieldElement>(Storage, Value);
858 setValueSubclassData(Storage);
859 }
860
861 Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
862 Instruction *InsertBefore = nullptr);
863 Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
864 BasicBlock *InsertAtEnd);
865
866private:
867 /// Create a copy of this instruction.
868 Instruction *cloneImpl() const;
869};
870
871inline void ilist_alloc_traits<Instruction>::deleteNode(Instruction *V) {
872 V->deleteValue();
873}
874
875} // end namespace llvm
876
877#endif // LLVM_IR_INSTRUCTION_H

/build/llvm-toolchain-snapshot-15~++20220407100720+1c9415806ba6/llvm/include/llvm/Support/TypeSize.h

1//===- TypeSize.h - Wrapper around type sizes -------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file provides a struct that can be used to query the size of IR types
10// which may be scalable vectors. It provides convenience operators so that
11// it can be used in much the same way as a single scalar value.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_SUPPORT_TYPESIZE_H
16#define LLVM_SUPPORT_TYPESIZE_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/Support/MathExtras.h"
20#include "llvm/Support/raw_ostream.h"
21
22#include <algorithm>
23#include <array>
24#include <cassert>
25#include <cstdint>
26#include <type_traits>
27
28namespace llvm {
29
30/// Reports a diagnostic message to indicate an invalid size request has been
31/// done on a scalable vector. This function may not return.
32void reportInvalidSizeRequest(const char *Msg);
33
34template <typename LeafTy> struct LinearPolyBaseTypeTraits {};
35
36//===----------------------------------------------------------------------===//
37// LinearPolyBase - a base class for linear polynomials with multiple
38// dimensions. This can e.g. be used to describe offsets that are have both a
39// fixed and scalable component.
40//===----------------------------------------------------------------------===//
41
42/// LinearPolyBase describes a linear polynomial:
43/// c0 * scale0 + c1 * scale1 + ... + cK * scaleK
44/// where the scale is implicit, so only the coefficients are encoded.
45template <typename LeafTy>
46class LinearPolyBase {
47public:
48 using ScalarTy = typename LinearPolyBaseTypeTraits<LeafTy>::ScalarTy;
49 static constexpr auto Dimensions = LinearPolyBaseTypeTraits<LeafTy>::Dimensions;
50 static_assert(Dimensions != std::numeric_limits<unsigned>::max(),
51 "Dimensions out of range");
52
53private:
54 std::array<ScalarTy, Dimensions> Coefficients;
55
56protected:
57 LinearPolyBase(ArrayRef<ScalarTy> Values) {
58 std::copy(Values.begin(), Values.end(), Coefficients.begin());
59 }
60
61public:
62 friend LeafTy &operator+=(LeafTy &LHS, const LeafTy &RHS) {
63 for (unsigned I=0; I<Dimensions; ++I)
64 LHS.Coefficients[I] += RHS.Coefficients[I];
65 return LHS;
66 }
67
68 friend LeafTy &operator-=(LeafTy &LHS, const LeafTy &RHS) {
69 for (unsigned I=0; I<Dimensions; ++I)
70 LHS.Coefficients[I] -= RHS.Coefficients[I];
71 return LHS;
72 }
73
74 friend LeafTy &operator*=(LeafTy &LHS, ScalarTy RHS) {
75 for (auto &C : LHS.Coefficients)
76 C *= RHS;
77 return LHS;
78 }
79
80 friend LeafTy operator+(const LeafTy &LHS, const LeafTy &RHS) {
81 LeafTy Copy = LHS;
82 return Copy += RHS;
83 }
84
85 friend LeafTy operator-(const LeafTy &LHS, const LeafTy &RHS) {
86 LeafTy Copy = LHS;
87 return Copy -= RHS;
88 }
89
90 friend LeafTy operator*(const LeafTy &LHS, ScalarTy RHS) {
91 LeafTy Copy = LHS;
92 return Copy *= RHS;
93 }
94
95 template <typename U = ScalarTy>
96 friend typename std::enable_if_t<std::is_signed<U>::value, LeafTy>
97 operator-(const LeafTy &LHS) {
98 LeafTy Copy = LHS;
99 return Copy *= -1;
100 }
101
102 bool operator==(const LinearPolyBase &RHS) const {
103 return std::equal(Coefficients.begin(), Coefficients.end(),
104 RHS.Coefficients.begin());
105 }
106
107 bool operator!=(const LinearPolyBase &RHS) const {
108 return !(*this == RHS);
109 }
110
111 bool isZero() const {
112 return all_of(Coefficients, [](const ScalarTy &C) { return C == 0; });
113 }
114 bool isNonZero() const { return !isZero(); }
115 explicit operator bool() const { return isNonZero(); }
116
117 ScalarTy getValue(unsigned Dim) const { return Coefficients[Dim]; }
118};
119
120//===----------------------------------------------------------------------===//
121// StackOffset - Represent an offset with named fixed and scalable components.
122//===----------------------------------------------------------------------===//
123
124class StackOffset;
125template <> struct LinearPolyBaseTypeTraits<StackOffset> {
126 using ScalarTy = int64_t;
127 static constexpr unsigned Dimensions = 2;
128};
129
130/// StackOffset is a class to represent an offset with 2 dimensions,
131/// named fixed and scalable, respectively. This class allows a value for both
132/// dimensions to depict e.g. "8 bytes and 16 scalable bytes", which is needed
133/// to represent stack offsets.
134class StackOffset : public LinearPolyBase<StackOffset> {
135protected:
136 StackOffset(ScalarTy Fixed, ScalarTy Scalable)
137 : LinearPolyBase<StackOffset>({Fixed, Scalable}) {}
138
139public:
140 StackOffset() : StackOffset({0, 0}) {}
141 StackOffset(const LinearPolyBase<StackOffset> &Other)
142 : LinearPolyBase<StackOffset>(Other) {}
143 static StackOffset getFixed(ScalarTy Fixed) { return {Fixed, 0}; }
144 static StackOffset getScalable(ScalarTy Scalable) { return {0, Scalable}; }
145 static StackOffset get(ScalarTy Fixed, ScalarTy Scalable) {
146 return {Fixed, Scalable};
147 }
148
149 ScalarTy getFixed() const { return this->getValue(0); }
150 ScalarTy getScalable() const { return this->getValue(1); }
151};
152
153//===----------------------------------------------------------------------===//
154// UnivariateLinearPolyBase - a base class for linear polynomials with multiple
155// dimensions, but where only one dimension can be set at any time.
156// This can e.g. be used to describe sizes that are either fixed or scalable.
157//===----------------------------------------------------------------------===//
158
159/// UnivariateLinearPolyBase is a base class for ElementCount and TypeSize.
160/// Like LinearPolyBase it tries to represent a linear polynomial
161/// where only one dimension can be set at any time, e.g.
162/// 0 * scale0 + 0 * scale1 + ... + cJ * scaleJ + ... + 0 * scaleK
163/// The dimension that is set is the univariate dimension.
164template <typename LeafTy>
165class UnivariateLinearPolyBase {
166public:
167 using ScalarTy = typename LinearPolyBaseTypeTraits<LeafTy>::ScalarTy;
168 static constexpr auto Dimensions = LinearPolyBaseTypeTraits<LeafTy>::Dimensions;
169 static_assert(Dimensions != std::numeric_limits<unsigned>::max(),
170 "Dimensions out of range");
171
172protected:
173 ScalarTy Value; // The value at the univeriate dimension.
174 unsigned UnivariateDim; // The univeriate dimension.
175
176 UnivariateLinearPolyBase(ScalarTy Val, unsigned UnivariateDim)
177 : Value(Val), UnivariateDim(UnivariateDim) {
178 assert(UnivariateDim < Dimensions && "Dimension out of range")(static_cast <bool> (UnivariateDim < Dimensions &&
"Dimension out of range") ? void (0) : __assert_fail ("UnivariateDim < Dimensions && \"Dimension out of range\""
, "llvm/include/llvm/Support/TypeSize.h", 178, __extension__ __PRETTY_FUNCTION__
))
;
179 }
180
181 friend LeafTy &operator+=(LeafTy &LHS, const LeafTy &RHS) {
182 assert(LHS.UnivariateDim == RHS.UnivariateDim && "Invalid dimensions")(static_cast <bool> (LHS.UnivariateDim == RHS.UnivariateDim
&& "Invalid dimensions") ? void (0) : __assert_fail (
"LHS.UnivariateDim == RHS.UnivariateDim && \"Invalid dimensions\""
, "llvm/include/llvm/Support/TypeSize.h", 182, __extension__ __PRETTY_FUNCTION__
))
;
183 LHS.Value += RHS.Value;
184 return LHS;
185 }
186
187 friend LeafTy &operator-=(LeafTy &LHS, const LeafTy &RHS) {
188 assert(LHS.UnivariateDim == RHS.UnivariateDim && "Invalid dimensions")(static_cast <bool> (LHS.UnivariateDim == RHS.UnivariateDim
&& "Invalid dimensions") ? void (0) : __assert_fail (
"LHS.UnivariateDim == RHS.UnivariateDim && \"Invalid dimensions\""
, "llvm/include/llvm/Support/TypeSize.h", 188, __extension__ __PRETTY_FUNCTION__
))
;
189 LHS.Value -= RHS.Value;
190 return LHS;
191 }
192
193 friend LeafTy &operator*=(LeafTy &LHS, ScalarTy RHS) {
194 LHS.Value *= RHS;
195 return LHS;
196 }
197
198 friend LeafTy operator+(const LeafTy &LHS, const LeafTy &RHS) {
199 LeafTy Copy = LHS;
200 return Copy += RHS;
201 }
202
203 friend LeafTy operator-(const LeafTy &LHS, const LeafTy &RHS) {
204 LeafTy Copy = LHS;
205 return Copy -= RHS;
206 }
207
208 friend LeafTy operator*(const LeafTy &LHS, ScalarTy RHS) {
209 LeafTy Copy = LHS;
210 return Copy *= RHS;
211 }
212
213 template <typename U = ScalarTy>
214 friend typename std::enable_if<std::is_signed<U>::value, LeafTy>::type
215 operator-(const LeafTy &LHS) {
216 LeafTy Copy = LHS;
217 return Copy *= -1;
218 }
219
220public:
221 bool operator==(const UnivariateLinearPolyBase &RHS) const {
222 return Value == RHS.Value && UnivariateDim == RHS.UnivariateDim;
223 }
224
225 bool operator!=(const UnivariateLinearPolyBase &RHS) const {
226 return !(*this == RHS);
227 }
228
229 bool isZero() const { return !Value; }
230 bool isNonZero() const { return !isZero(); }
231 explicit operator bool() const { return isNonZero(); }
232 ScalarTy getValue(unsigned Dim) const {
233 return Dim == UnivariateDim ? Value : 0;
234 }
235
236 /// Add \p RHS to the value at the univariate dimension.
237 LeafTy getWithIncrement(ScalarTy RHS) const {
238 return static_cast<LeafTy>(
239 UnivariateLinearPolyBase(Value + RHS, UnivariateDim));
240 }
241
242 /// Subtract \p RHS from the value at the univariate dimension.
243 LeafTy getWithDecrement(ScalarTy RHS) const {
244 return static_cast<LeafTy>(
245 UnivariateLinearPolyBase(Value - RHS, UnivariateDim));
246 }
247};
248
249
250//===----------------------------------------------------------------------===//
251// LinearPolySize - base class for fixed- or scalable sizes.
252// ^ ^
253// | |
254// | +----- ElementCount - Leaf class to represent an element count
255// | (vscale x unsigned)
256// |
257// +-------- TypeSize - Leaf class to represent a type size
258// (vscale x uint64_t)
259//===----------------------------------------------------------------------===//
260
261/// LinearPolySize is a base class to represent sizes. It is either
262/// fixed-sized or it is scalable-sized, but it cannot be both.
263template <typename LeafTy>
264class LinearPolySize : public UnivariateLinearPolyBase<LeafTy> {
265 // Make the parent class a friend, so that it can access the protected
266 // conversion/copy-constructor for UnivariatePolyBase<LeafTy> ->
267 // LinearPolySize<LeafTy>.
268 friend class UnivariateLinearPolyBase<LeafTy>;
269
270public:
271 using ScalarTy = typename UnivariateLinearPolyBase<LeafTy>::ScalarTy;
272 enum Dims : unsigned { FixedDim = 0, ScalableDim = 1 };
273
274protected:
275 LinearPolySize(ScalarTy MinVal, Dims D)
276 : UnivariateLinearPolyBase<LeafTy>(MinVal, D) {}
277
278 LinearPolySize(const UnivariateLinearPolyBase<LeafTy> &V)
279 : UnivariateLinearPolyBase<LeafTy>(V) {}
280
281public:
282
283 static LeafTy getFixed(ScalarTy MinVal) {
284 return static_cast<LeafTy>(LinearPolySize(MinVal, FixedDim));
285 }
286 static LeafTy getScalable(ScalarTy MinVal) {
287 return static_cast<LeafTy>(LinearPolySize(MinVal, ScalableDim));
288 }
289 static LeafTy get(ScalarTy MinVal, bool Scalable) {
290 return static_cast<LeafTy>(
291 LinearPolySize(MinVal, Scalable ? ScalableDim : FixedDim));
292 }
293 static LeafTy getNull() { return get(0, false); }
294
295 /// Returns the minimum value this size can represent.
296 ScalarTy getKnownMinValue() const { return this->Value; }
297 /// Returns whether the size is scaled by a runtime quantity (vscale).
298 bool isScalable() const { return this->UnivariateDim == ScalableDim; }
30
Assuming field 'UnivariateDim' is equal to ScalableDim
31
Returning the value 1, which participates in a condition later
299 /// A return value of true indicates we know at compile time that the number
300 /// of elements (vscale * Min) is definitely even. However, returning false
301 /// does not guarantee that the total number of elements is odd.
302 bool isKnownEven() const { return (getKnownMinValue() & 0x1) == 0; }
303 /// This function tells the caller whether the element count is known at
304 /// compile time to be a multiple of the scalar value RHS.
305 bool isKnownMultipleOf(ScalarTy RHS) const {
306 return getKnownMinValue() % RHS == 0;
307 }
308
309 // Return the minimum value with the assumption that the count is exact.
310 // Use in places where a scalable count doesn't make sense (e.g. non-vector
311 // types, or vectors in backends which don't support scalable vectors).
312 ScalarTy getFixedValue() const {
313 assert(!isScalable() &&(static_cast <bool> (!isScalable() && "Request for a fixed element count on a scalable object"
) ? void (0) : __assert_fail ("!isScalable() && \"Request for a fixed element count on a scalable object\""
, "llvm/include/llvm/Support/TypeSize.h", 314, __extension__ __PRETTY_FUNCTION__
))
314 "Request for a fixed element count on a scalable object")(static_cast <bool> (!isScalable() && "Request for a fixed element count on a scalable object"
) ? void (0) : __assert_fail ("!isScalable() && \"Request for a fixed element count on a scalable object\""
, "llvm/include/llvm/Support/TypeSize.h", 314, __extension__ __PRETTY_FUNCTION__
))
;
315 return getKnownMinValue();
316 }
317
318 // For some cases, size ordering between scalable and fixed size types cannot
319 // be determined at compile time, so such comparisons aren't allowed.
320 //
321 // e.g. <vscale x 2 x i16> could be bigger than <4 x i32> with a runtime
322 // vscale >= 5, equal sized with a vscale of 4, and smaller with
323 // a vscale <= 3.
324 //
325 // All the functions below make use of the fact vscale is always >= 1, which
326 // means that <vscale x 4 x i32> is guaranteed to be >= <4 x i32>, etc.
327
328 static bool isKnownLT(const LinearPolySize &LHS, const LinearPolySize &RHS) {
329 if (!LHS.isScalable() || RHS.isScalable())
330 return LHS.getKnownMinValue() < RHS.getKnownMinValue();
331 return false;
332 }
333
334 static bool isKnownGT(const LinearPolySize &LHS, const LinearPolySize &RHS) {
335 if (LHS.isScalable() || !RHS.isScalable())
336 return LHS.getKnownMinValue() > RHS.getKnownMinValue();
337 return false;
338 }
339
340 static bool isKnownLE(const LinearPolySize &LHS, const LinearPolySize &RHS) {
341 if (!LHS.isScalable() || RHS.isScalable())
342 return LHS.getKnownMinValue() <= RHS.getKnownMinValue();
343 return false;
344 }
345
346 static bool isKnownGE(const LinearPolySize &LHS, const LinearPolySize &RHS) {
347 if (LHS.isScalable() || !RHS.isScalable())
348 return LHS.getKnownMinValue() >= RHS.getKnownMinValue();
349 return false;
350 }
351
352 /// We do not provide the '/' operator here because division for polynomial
353 /// types does not work in the same way as for normal integer types. We can
354 /// only divide the minimum value (or coefficient) by RHS, which is not the
355 /// same as
356 /// (Min * Vscale) / RHS
357 /// The caller is recommended to use this function in combination with
358 /// isKnownMultipleOf(RHS), which lets the caller know if it's possible to
359 /// perform a lossless divide by RHS.
360 LeafTy divideCoefficientBy(ScalarTy RHS) const {
361 return static_cast<LeafTy>(
362 LinearPolySize::get(getKnownMinValue() / RHS, isScalable()));
363 }
364
365 LeafTy coefficientNextPowerOf2() const {
366 return static_cast<LeafTy>(LinearPolySize::get(
367 static_cast<ScalarTy>(llvm::NextPowerOf2(getKnownMinValue())),
368 isScalable()));
369 }
370
371 /// Printing function.
372 void print(raw_ostream &OS) const {
373 if (isScalable())
374 OS << "vscale x ";
375 OS << getKnownMinValue();
376 }
377};
378
379class ElementCount;
380template <> struct LinearPolyBaseTypeTraits<ElementCount> {
381 using ScalarTy = unsigned;
382 static constexpr unsigned Dimensions = 2;
383};
384
385class ElementCount : public LinearPolySize<ElementCount> {
386public:
387 ElementCount() : LinearPolySize(LinearPolySize::getNull()) {}
388
389 ElementCount(const LinearPolySize<ElementCount> &V) : LinearPolySize(V) {}
390
391 /// Counting predicates.
392 ///
393 ///@{ Number of elements..
394 /// Exactly one element.
395 bool isScalar() const { return !isScalable() && getKnownMinValue() == 1; }
396 /// One or more elements.
397 bool isVector() const {
398 return (isScalable() && getKnownMinValue() != 0) || getKnownMinValue() > 1;
399 }
400 ///@}
401};
402
403// This class is used to represent the size of types. If the type is of fixed
404class TypeSize;
405template <> struct LinearPolyBaseTypeTraits<TypeSize> {
406 using ScalarTy = uint64_t;
407 static constexpr unsigned Dimensions = 2;
408};
409
410// TODO: Most functionality in this class will gradually be phased out
411// so it will resemble LinearPolySize as much as possible.
412//
413// TypeSize is used to represent the size of types. If the type is of fixed
414// size, it will represent the exact size. If the type is a scalable vector,
415// it will represent the known minimum size.
416class TypeSize : public LinearPolySize<TypeSize> {
417public:
418 TypeSize(const LinearPolySize<TypeSize> &V) : LinearPolySize(V) {}
419 TypeSize(ScalarTy MinVal, bool IsScalable)
420 : LinearPolySize(LinearPolySize::get(MinVal, IsScalable)) {}
421
422 static TypeSize Fixed(ScalarTy MinVal) { return TypeSize(MinVal, false); }
423 static TypeSize Scalable(ScalarTy MinVal) { return TypeSize(MinVal, true); }
424
425 ScalarTy getFixedSize() const { return getFixedValue(); }
426 ScalarTy getKnownMinSize() const { return getKnownMinValue(); }
427
428 // All code for this class below this point is needed because of the
429 // temporary implicit conversion to uint64_t. The operator overloads are
430 // needed because otherwise the conversion of the parent class
431 // UnivariateLinearPolyBase -> TypeSize is ambiguous.
432 // TODO: Remove the implicit conversion.
433
434 // Casts to a uint64_t if this is a fixed-width size.
435 //
436 // This interface is deprecated and will be removed in a future version
437 // of LLVM in favour of upgrading uses that rely on this implicit conversion
438 // to uint64_t. Calls to functions that return a TypeSize should use the
439 // proper interfaces to TypeSize.
440 // In practice this is mostly calls to MVT/EVT::getSizeInBits().
441 //
442 // To determine how to upgrade the code:
443 //
444 // if (<algorithm works for both scalable and fixed-width vectors>)
445 // use getKnownMinValue()
446 // else if (<algorithm works only for fixed-width vectors>) {
447 // if <algorithm can be adapted for both scalable and fixed-width vectors>
448 // update the algorithm and use getKnownMinValue()
449 // else
450 // bail out early for scalable vectors and use getFixedValue()
451 // }
452 operator ScalarTy() const;
453
454 // Additional operators needed to avoid ambiguous parses
455 // because of the implicit conversion hack.
456 friend TypeSize operator*(const TypeSize &LHS, const int RHS) {
457 return LHS * (ScalarTy)RHS;
458 }
459 friend TypeSize operator*(const TypeSize &LHS, const unsigned RHS) {
460 return LHS * (ScalarTy)RHS;
461 }
462 friend TypeSize operator*(const TypeSize &LHS, const int64_t RHS) {
463 return LHS * (ScalarTy)RHS;
464 }
465 friend TypeSize operator*(const int LHS, const TypeSize &RHS) {
466 return RHS * LHS;
467 }
468 friend TypeSize operator*(const unsigned LHS, const TypeSize &RHS) {
469 return RHS * LHS;
470 }
471 friend TypeSize operator*(const int64_t LHS, const TypeSize &RHS) {
472 return RHS * LHS;
473 }
474 friend TypeSize operator*(const uint64_t LHS, const TypeSize &RHS) {
475 return RHS * LHS;
476 }
477};
478
479//===----------------------------------------------------------------------===//
480// Utilities
481//===----------------------------------------------------------------------===//
482
483/// Returns a TypeSize with a known minimum size that is the next integer
484/// (mod 2**64) that is greater than or equal to \p Value and is a multiple
485/// of \p Align. \p Align must be non-zero.
486///
487/// Similar to the alignTo functions in MathExtras.h
488inline TypeSize alignTo(TypeSize Size, uint64_t Align) {
489 assert(Align != 0u && "Align must be non-zero")(static_cast <bool> (Align != 0u && "Align must be non-zero"
) ? void (0) : __assert_fail ("Align != 0u && \"Align must be non-zero\""
, "llvm/include/llvm/Support/TypeSize.h", 489, __extension__ __PRETTY_FUNCTION__
))
;
490 return {(Size.getKnownMinValue() + Align - 1) / Align * Align,
491 Size.isScalable()};
492}
493
494/// Stream operator function for `LinearPolySize`.
495template <typename LeafTy>
496inline raw_ostream &operator<<(raw_ostream &OS,
497 const LinearPolySize<LeafTy> &PS) {
498 PS.print(OS);
499 return OS;
500}
501
502template <> struct DenseMapInfo<ElementCount, void> {
503 static inline ElementCount getEmptyKey() {
504 return ElementCount::getScalable(~0U);
505 }
506 static inline ElementCount getTombstoneKey() {
507 return ElementCount::getFixed(~0U - 1);
508 }
509 static unsigned getHashValue(const ElementCount &EltCnt) {
510 unsigned HashVal = EltCnt.getKnownMinValue() * 37U;
511 if (EltCnt.isScalable())
512 return (HashVal - 1U);
513
514 return HashVal;
515 }
516
517 static bool isEqual(const ElementCount &LHS, const ElementCount &RHS) {
518 return LHS == RHS;
519 }
520};
521
522} // end namespace llvm
523
524#endif // LLVM_SUPPORT_TYPESIZE_H