File: | llvm/lib/Transforms/Scalar/LICM.cpp |
Warning: | line 1142, column 33 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===// | ||||||
2 | // | ||||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||||
6 | // | ||||||
7 | //===----------------------------------------------------------------------===// | ||||||
8 | // | ||||||
9 | // This pass performs loop invariant code motion, attempting to remove as much | ||||||
10 | // code from the body of a loop as possible. It does this by either hoisting | ||||||
11 | // code into the preheader block, or by sinking code to the exit blocks if it is | ||||||
12 | // safe. This pass also promotes must-aliased memory locations in the loop to | ||||||
13 | // live in registers, thus hoisting and sinking "invariant" loads and stores. | ||||||
14 | // | ||||||
15 | // This pass uses alias analysis for two purposes: | ||||||
16 | // | ||||||
17 | // 1. Moving loop invariant loads and calls out of loops. If we can determine | ||||||
18 | // that a load or call inside of a loop never aliases anything stored to, | ||||||
19 | // we can hoist it or sink it like any other instruction. | ||||||
20 | // 2. Scalar Promotion of Memory - If there is a store instruction inside of | ||||||
21 | // the loop, we try to move the store to happen AFTER the loop instead of | ||||||
22 | // inside of the loop. This can only happen if a few conditions are true: | ||||||
23 | // A. The pointer stored through is loop invariant | ||||||
24 | // B. There are no stores or loads in the loop which _may_ alias the | ||||||
25 | // pointer. There are no calls in the loop which mod/ref the pointer. | ||||||
26 | // If these conditions are true, we can promote the loads and stores in the | ||||||
27 | // loop of the pointer to use a temporary alloca'd variable. We then use | ||||||
28 | // the SSAUpdater to construct the appropriate SSA form for the value. | ||||||
29 | // | ||||||
30 | //===----------------------------------------------------------------------===// | ||||||
31 | |||||||
32 | #include "llvm/Transforms/Scalar/LICM.h" | ||||||
33 | #include "llvm/ADT/SetOperations.h" | ||||||
34 | #include "llvm/ADT/Statistic.h" | ||||||
35 | #include "llvm/Analysis/AliasAnalysis.h" | ||||||
36 | #include "llvm/Analysis/AliasSetTracker.h" | ||||||
37 | #include "llvm/Analysis/BasicAliasAnalysis.h" | ||||||
38 | #include "llvm/Analysis/BlockFrequencyInfo.h" | ||||||
39 | #include "llvm/Analysis/CaptureTracking.h" | ||||||
40 | #include "llvm/Analysis/ConstantFolding.h" | ||||||
41 | #include "llvm/Analysis/GlobalsModRef.h" | ||||||
42 | #include "llvm/Analysis/GuardUtils.h" | ||||||
43 | #include "llvm/Analysis/LazyBlockFrequencyInfo.h" | ||||||
44 | #include "llvm/Analysis/Loads.h" | ||||||
45 | #include "llvm/Analysis/LoopInfo.h" | ||||||
46 | #include "llvm/Analysis/LoopIterator.h" | ||||||
47 | #include "llvm/Analysis/LoopPass.h" | ||||||
48 | #include "llvm/Analysis/MemoryBuiltins.h" | ||||||
49 | #include "llvm/Analysis/MemorySSA.h" | ||||||
50 | #include "llvm/Analysis/MemorySSAUpdater.h" | ||||||
51 | #include "llvm/Analysis/MustExecute.h" | ||||||
52 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||||
53 | #include "llvm/Analysis/ScalarEvolution.h" | ||||||
54 | #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h" | ||||||
55 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||||
56 | #include "llvm/Analysis/ValueTracking.h" | ||||||
57 | #include "llvm/IR/CFG.h" | ||||||
58 | #include "llvm/IR/Constants.h" | ||||||
59 | #include "llvm/IR/DataLayout.h" | ||||||
60 | #include "llvm/IR/DebugInfoMetadata.h" | ||||||
61 | #include "llvm/IR/DerivedTypes.h" | ||||||
62 | #include "llvm/IR/Dominators.h" | ||||||
63 | #include "llvm/IR/Instructions.h" | ||||||
64 | #include "llvm/IR/IntrinsicInst.h" | ||||||
65 | #include "llvm/IR/LLVMContext.h" | ||||||
66 | #include "llvm/IR/Metadata.h" | ||||||
67 | #include "llvm/IR/PatternMatch.h" | ||||||
68 | #include "llvm/IR/PredIteratorCache.h" | ||||||
69 | #include "llvm/InitializePasses.h" | ||||||
70 | #include "llvm/Support/CommandLine.h" | ||||||
71 | #include "llvm/Support/Debug.h" | ||||||
72 | #include "llvm/Support/raw_ostream.h" | ||||||
73 | #include "llvm/Transforms/Scalar.h" | ||||||
74 | #include "llvm/Transforms/Scalar/LoopPassManager.h" | ||||||
75 | #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" | ||||||
76 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | ||||||
77 | #include "llvm/Transforms/Utils/Local.h" | ||||||
78 | #include "llvm/Transforms/Utils/LoopUtils.h" | ||||||
79 | #include "llvm/Transforms/Utils/SSAUpdater.h" | ||||||
80 | #include <algorithm> | ||||||
81 | #include <utility> | ||||||
82 | using namespace llvm; | ||||||
83 | |||||||
84 | #define DEBUG_TYPE"licm" "licm" | ||||||
85 | |||||||
86 | STATISTIC(NumCreatedBlocks, "Number of blocks created")static llvm::Statistic NumCreatedBlocks = {"licm", "NumCreatedBlocks" , "Number of blocks created"}; | ||||||
87 | STATISTIC(NumClonedBranches, "Number of branches cloned")static llvm::Statistic NumClonedBranches = {"licm", "NumClonedBranches" , "Number of branches cloned"}; | ||||||
88 | STATISTIC(NumSunk, "Number of instructions sunk out of loop")static llvm::Statistic NumSunk = {"licm", "NumSunk", "Number of instructions sunk out of loop" }; | ||||||
89 | STATISTIC(NumHoisted, "Number of instructions hoisted out of loop")static llvm::Statistic NumHoisted = {"licm", "NumHoisted", "Number of instructions hoisted out of loop" }; | ||||||
90 | STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk")static llvm::Statistic NumMovedLoads = {"licm", "NumMovedLoads" , "Number of load insts hoisted or sunk"}; | ||||||
91 | STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk")static llvm::Statistic NumMovedCalls = {"licm", "NumMovedCalls" , "Number of call insts hoisted or sunk"}; | ||||||
92 | STATISTIC(NumPromoted, "Number of memory locations promoted to registers")static llvm::Statistic NumPromoted = {"licm", "NumPromoted", "Number of memory locations promoted to registers" }; | ||||||
93 | |||||||
94 | /// Memory promotion is enabled by default. | ||||||
95 | static cl::opt<bool> | ||||||
96 | DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false), | ||||||
97 | cl::desc("Disable memory promotion in LICM pass")); | ||||||
98 | |||||||
99 | static cl::opt<bool> ControlFlowHoisting( | ||||||
100 | "licm-control-flow-hoisting", cl::Hidden, cl::init(false), | ||||||
101 | cl::desc("Enable control flow (and PHI) hoisting in LICM")); | ||||||
102 | |||||||
103 | static cl::opt<unsigned> HoistSinkColdnessThreshold( | ||||||
104 | "licm-coldness-threshold", cl::Hidden, cl::init(4), | ||||||
105 | cl::desc("Relative coldness Threshold of hoisting/sinking destination " | ||||||
106 | "block for LICM to be considered beneficial")); | ||||||
107 | |||||||
108 | static cl::opt<uint32_t> MaxNumUsesTraversed( | ||||||
109 | "licm-max-num-uses-traversed", cl::Hidden, cl::init(8), | ||||||
110 | cl::desc("Max num uses visited for identifying load " | ||||||
111 | "invariance in loop using invariant start (default = 8)")); | ||||||
112 | |||||||
113 | // Default value of zero implies we use the regular alias set tracker mechanism | ||||||
114 | // instead of the cross product using AA to identify aliasing of the memory | ||||||
115 | // location we are interested in. | ||||||
116 | static cl::opt<int> | ||||||
117 | LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0), | ||||||
118 | cl::desc("How many instruction to cross product using AA")); | ||||||
119 | |||||||
120 | // Experimental option to allow imprecision in LICM in pathological cases, in | ||||||
121 | // exchange for faster compile. This is to be removed if MemorySSA starts to | ||||||
122 | // address the same issue. This flag applies only when LICM uses MemorySSA | ||||||
123 | // instead on AliasSetTracker. LICM calls MemorySSAWalker's | ||||||
124 | // getClobberingMemoryAccess, up to the value of the Cap, getting perfect | ||||||
125 | // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess, | ||||||
126 | // which may not be precise, since optimizeUses is capped. The result is | ||||||
127 | // correct, but we may not get as "far up" as possible to get which access is | ||||||
128 | // clobbering the one queried. | ||||||
129 | cl::opt<unsigned> llvm::SetLicmMssaOptCap( | ||||||
130 | "licm-mssa-optimization-cap", cl::init(100), cl::Hidden, | ||||||
131 | cl::desc("Enable imprecision in LICM in pathological cases, in exchange " | ||||||
132 | "for faster compile. Caps the MemorySSA clobbering calls.")); | ||||||
133 | |||||||
134 | // Experimentally, memory promotion carries less importance than sinking and | ||||||
135 | // hoisting. Limit when we do promotion when using MemorySSA, in order to save | ||||||
136 | // compile time. | ||||||
137 | cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap( | ||||||
138 | "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden, | ||||||
139 | cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no " | ||||||
140 | "effect. When MSSA in LICM is enabled, then this is the maximum " | ||||||
141 | "number of accesses allowed to be present in a loop in order to " | ||||||
142 | "enable memory promotion.")); | ||||||
143 | |||||||
144 | static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI); | ||||||
145 | static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, | ||||||
146 | const LoopSafetyInfo *SafetyInfo, | ||||||
147 | TargetTransformInfo *TTI, bool &FreeInLoop); | ||||||
148 | static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, | ||||||
149 | BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, | ||||||
150 | MemorySSAUpdater *MSSAU, ScalarEvolution *SE, | ||||||
151 | OptimizationRemarkEmitter *ORE); | ||||||
152 | static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, | ||||||
153 | BlockFrequencyInfo *BFI, const Loop *CurLoop, | ||||||
154 | ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, | ||||||
155 | OptimizationRemarkEmitter *ORE); | ||||||
156 | static bool isSafeToExecuteUnconditionally(Instruction &Inst, | ||||||
157 | const DominatorTree *DT, | ||||||
158 | const Loop *CurLoop, | ||||||
159 | const LoopSafetyInfo *SafetyInfo, | ||||||
160 | OptimizationRemarkEmitter *ORE, | ||||||
161 | const Instruction *CtxI = nullptr); | ||||||
162 | static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, | ||||||
163 | AliasSetTracker *CurAST, Loop *CurLoop, | ||||||
164 | AAResults *AA); | ||||||
165 | static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, | ||||||
166 | Loop *CurLoop, | ||||||
167 | SinkAndHoistLICMFlags &Flags); | ||||||
168 | static Instruction *cloneInstructionInExitBlock( | ||||||
169 | Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, | ||||||
170 | const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU); | ||||||
171 | |||||||
172 | static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, | ||||||
173 | AliasSetTracker *AST, MemorySSAUpdater *MSSAU); | ||||||
174 | |||||||
175 | static void moveInstructionBefore(Instruction &I, Instruction &Dest, | ||||||
176 | ICFLoopSafetyInfo &SafetyInfo, | ||||||
177 | MemorySSAUpdater *MSSAU, ScalarEvolution *SE); | ||||||
178 | |||||||
179 | namespace { | ||||||
180 | struct LoopInvariantCodeMotion { | ||||||
181 | bool runOnLoop(Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, | ||||||
182 | BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, | ||||||
183 | TargetTransformInfo *TTI, ScalarEvolution *SE, MemorySSA *MSSA, | ||||||
184 | OptimizationRemarkEmitter *ORE); | ||||||
185 | |||||||
186 | LoopInvariantCodeMotion(unsigned LicmMssaOptCap, | ||||||
187 | unsigned LicmMssaNoAccForPromotionCap) | ||||||
188 | : LicmMssaOptCap(LicmMssaOptCap), | ||||||
189 | LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {} | ||||||
190 | |||||||
191 | private: | ||||||
192 | unsigned LicmMssaOptCap; | ||||||
193 | unsigned LicmMssaNoAccForPromotionCap; | ||||||
194 | |||||||
195 | std::unique_ptr<AliasSetTracker> | ||||||
196 | collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AAResults *AA); | ||||||
197 | std::unique_ptr<AliasSetTracker> | ||||||
198 | collectAliasInfoForLoopWithMSSA(Loop *L, AAResults *AA, | ||||||
199 | MemorySSAUpdater *MSSAU); | ||||||
200 | }; | ||||||
201 | |||||||
202 | struct LegacyLICMPass : public LoopPass { | ||||||
203 | static char ID; // Pass identification, replacement for typeid | ||||||
204 | LegacyLICMPass( | ||||||
205 | unsigned LicmMssaOptCap = SetLicmMssaOptCap, | ||||||
206 | unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap) | ||||||
207 | : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) { | ||||||
208 | initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry()); | ||||||
209 | } | ||||||
210 | |||||||
211 | bool runOnLoop(Loop *L, LPPassManager &LPM) override { | ||||||
212 | if (skipLoop(L)) | ||||||
213 | return false; | ||||||
214 | |||||||
215 | auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>(); | ||||||
216 | MemorySSA *MSSA = EnableMSSALoopDependency | ||||||
217 | ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA()) | ||||||
218 | : nullptr; | ||||||
219 | bool hasProfileData = L->getHeader()->getParent()->hasProfileData(); | ||||||
220 | BlockFrequencyInfo *BFI = | ||||||
221 | hasProfileData ? &getAnalysis<LazyBlockFrequencyInfoPass>().getBFI() | ||||||
222 | : nullptr; | ||||||
223 | // For the old PM, we can't use OptimizationRemarkEmitter as an analysis | ||||||
224 | // pass. Function analyses need to be preserved across loop transformations | ||||||
225 | // but ORE cannot be preserved (see comment before the pass definition). | ||||||
226 | OptimizationRemarkEmitter ORE(L->getHeader()->getParent()); | ||||||
227 | return LICM.runOnLoop( | ||||||
228 | L, &getAnalysis<AAResultsWrapperPass>().getAAResults(), | ||||||
229 | &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(), | ||||||
230 | &getAnalysis<DominatorTreeWrapperPass>().getDomTree(), BFI, | ||||||
231 | &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI( | ||||||
232 | *L->getHeader()->getParent()), | ||||||
233 | &getAnalysis<TargetTransformInfoWrapperPass>().getTTI( | ||||||
234 | *L->getHeader()->getParent()), | ||||||
235 | SE ? &SE->getSE() : nullptr, MSSA, &ORE); | ||||||
236 | } | ||||||
237 | |||||||
238 | /// This transformation requires natural loop information & requires that | ||||||
239 | /// loop preheaders be inserted into the CFG... | ||||||
240 | /// | ||||||
241 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||||
242 | AU.addPreserved<DominatorTreeWrapperPass>(); | ||||||
243 | AU.addPreserved<LoopInfoWrapperPass>(); | ||||||
244 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||||
245 | if (EnableMSSALoopDependency) { | ||||||
246 | AU.addRequired<MemorySSAWrapperPass>(); | ||||||
247 | AU.addPreserved<MemorySSAWrapperPass>(); | ||||||
248 | } | ||||||
249 | AU.addRequired<TargetTransformInfoWrapperPass>(); | ||||||
250 | getLoopAnalysisUsage(AU); | ||||||
251 | LazyBlockFrequencyInfoPass::getLazyBFIAnalysisUsage(AU); | ||||||
252 | AU.addPreserved<LazyBlockFrequencyInfoPass>(); | ||||||
253 | AU.addPreserved<LazyBranchProbabilityInfoPass>(); | ||||||
254 | } | ||||||
255 | |||||||
256 | private: | ||||||
257 | LoopInvariantCodeMotion LICM; | ||||||
258 | }; | ||||||
259 | } // namespace | ||||||
260 | |||||||
261 | PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM, | ||||||
262 | LoopStandardAnalysisResults &AR, LPMUpdater &) { | ||||||
263 | // For the new PM, we also can't use OptimizationRemarkEmitter as an analysis | ||||||
264 | // pass. Function analyses need to be preserved across loop transformations | ||||||
265 | // but ORE cannot be preserved (see comment before the pass definition). | ||||||
266 | OptimizationRemarkEmitter ORE(L.getHeader()->getParent()); | ||||||
267 | |||||||
268 | LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); | ||||||
269 | if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, AR.BFI, &AR.TLI, &AR.TTI, | ||||||
270 | &AR.SE, AR.MSSA, &ORE)) | ||||||
271 | return PreservedAnalyses::all(); | ||||||
272 | |||||||
273 | auto PA = getLoopPassPreservedAnalyses(); | ||||||
274 | |||||||
275 | PA.preserve<DominatorTreeAnalysis>(); | ||||||
276 | PA.preserve<LoopAnalysis>(); | ||||||
277 | if (AR.MSSA) | ||||||
278 | PA.preserve<MemorySSAAnalysis>(); | ||||||
279 | |||||||
280 | return PA; | ||||||
281 | } | ||||||
282 | |||||||
283 | char LegacyLICMPass::ID = 0; | ||||||
284 | INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",static void *initializeLegacyLICMPassPassOnce(PassRegistry & Registry) { | ||||||
285 | false, false)static void *initializeLegacyLICMPassPassOnce(PassRegistry & Registry) { | ||||||
286 | INITIALIZE_PASS_DEPENDENCY(LoopPass)initializeLoopPassPass(Registry); | ||||||
287 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||||
288 | INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)initializeTargetTransformInfoWrapperPassPass(Registry); | ||||||
289 | INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)initializeMemorySSAWrapperPassPass(Registry); | ||||||
290 | INITIALIZE_PASS_DEPENDENCY(LazyBFIPass)initializeLazyBFIPassPass(Registry); | ||||||
291 | INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm" , &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <LegacyLICMPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag ; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce , std::ref(Registry)); } | ||||||
292 | false)PassInfo *PI = new PassInfo( "Loop Invariant Code Motion", "licm" , &LegacyLICMPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <LegacyLICMPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeLegacyLICMPassPassFlag ; void llvm::initializeLegacyLICMPassPass(PassRegistry &Registry ) { llvm::call_once(InitializeLegacyLICMPassPassFlag, initializeLegacyLICMPassPassOnce , std::ref(Registry)); } | ||||||
293 | |||||||
294 | Pass *llvm::createLICMPass() { return new LegacyLICMPass(); } | ||||||
295 | Pass *llvm::createLICMPass(unsigned LicmMssaOptCap, | ||||||
296 | unsigned LicmMssaNoAccForPromotionCap) { | ||||||
297 | return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap); | ||||||
298 | } | ||||||
299 | |||||||
300 | /// Hoist expressions out of the specified loop. Note, alias info for inner | ||||||
301 | /// loop is not preserved so it is not a good idea to run LICM multiple | ||||||
302 | /// times on one loop. | ||||||
303 | bool LoopInvariantCodeMotion::runOnLoop( | ||||||
304 | Loop *L, AAResults *AA, LoopInfo *LI, DominatorTree *DT, | ||||||
305 | BlockFrequencyInfo *BFI, TargetLibraryInfo *TLI, TargetTransformInfo *TTI, | ||||||
306 | ScalarEvolution *SE, MemorySSA *MSSA, OptimizationRemarkEmitter *ORE) { | ||||||
307 | bool Changed = false; | ||||||
308 | |||||||
309 | assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.")((L->isLCSSAForm(*DT) && "Loop is not in LCSSA form." ) ? static_cast<void> (0) : __assert_fail ("L->isLCSSAForm(*DT) && \"Loop is not in LCSSA form.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 309, __PRETTY_FUNCTION__)); | ||||||
310 | |||||||
311 | // If this loop has metadata indicating that LICM is not to be performed then | ||||||
312 | // just exit. | ||||||
313 | if (hasDisableLICMTransformsHint(L)) { | ||||||
314 | return false; | ||||||
315 | } | ||||||
316 | |||||||
317 | std::unique_ptr<AliasSetTracker> CurAST; | ||||||
318 | std::unique_ptr<MemorySSAUpdater> MSSAU; | ||||||
319 | bool NoOfMemAccTooLarge = false; | ||||||
320 | unsigned LicmMssaOptCounter = 0; | ||||||
321 | |||||||
322 | if (!MSSA) { | ||||||
323 | LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM: Using Alias Set Tracker.\n" ; } } while (false); | ||||||
324 | CurAST = collectAliasInfoForLoop(L, LI, AA); | ||||||
325 | } else { | ||||||
326 | LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM: Using MemorySSA.\n"; } } while (false); | ||||||
327 | MSSAU = std::make_unique<MemorySSAUpdater>(MSSA); | ||||||
328 | |||||||
329 | unsigned AccessCapCount = 0; | ||||||
330 | for (auto *BB : L->getBlocks()) { | ||||||
331 | if (auto *Accesses = MSSA->getBlockAccesses(BB)) { | ||||||
332 | for (const auto &MA : *Accesses) { | ||||||
333 | (void)MA; | ||||||
334 | AccessCapCount++; | ||||||
335 | if (AccessCapCount > LicmMssaNoAccForPromotionCap) { | ||||||
336 | NoOfMemAccTooLarge = true; | ||||||
337 | break; | ||||||
338 | } | ||||||
339 | } | ||||||
340 | } | ||||||
341 | if (NoOfMemAccTooLarge) | ||||||
342 | break; | ||||||
343 | } | ||||||
344 | } | ||||||
345 | |||||||
346 | // Get the preheader block to move instructions into... | ||||||
347 | BasicBlock *Preheader = L->getLoopPreheader(); | ||||||
348 | |||||||
349 | // Compute loop safety information. | ||||||
350 | ICFLoopSafetyInfo SafetyInfo; | ||||||
351 | SafetyInfo.computeLoopSafetyInfo(L); | ||||||
352 | |||||||
353 | // We want to visit all of the instructions in this loop... that are not parts | ||||||
354 | // of our subloops (they have already had their invariants hoisted out of | ||||||
355 | // their loop, into this loop, so there is no need to process the BODIES of | ||||||
356 | // the subloops). | ||||||
357 | // | ||||||
358 | // Traverse the body of the loop in depth first order on the dominator tree so | ||||||
359 | // that we are guaranteed to see definitions before we see uses. This allows | ||||||
360 | // us to sink instructions in one pass, without iteration. After sinking | ||||||
361 | // instructions, we perform another pass to hoist them out of the loop. | ||||||
362 | SinkAndHoistLICMFlags Flags = {NoOfMemAccTooLarge, LicmMssaOptCounter, | ||||||
363 | LicmMssaOptCap, LicmMssaNoAccForPromotionCap, | ||||||
364 | /*IsSink=*/true}; | ||||||
365 | if (L->hasDedicatedExits()) | ||||||
366 | Changed |= | ||||||
367 | sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, TTI, L, | ||||||
368 | CurAST.get(), MSSAU.get(), &SafetyInfo, Flags, ORE); | ||||||
369 | Flags.IsSink = false; | ||||||
370 | if (Preheader) | ||||||
371 | Changed |= | ||||||
372 | hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, BFI, TLI, L, | ||||||
373 | CurAST.get(), MSSAU.get(), SE, &SafetyInfo, Flags, ORE); | ||||||
374 | |||||||
375 | // Now that all loop invariants have been removed from the loop, promote any | ||||||
376 | // memory references to scalars that we can. | ||||||
377 | // Don't sink stores from loops without dedicated block exits. Exits | ||||||
378 | // containing indirect branches are not transformed by loop simplify, | ||||||
379 | // make sure we catch that. An additional load may be generated in the | ||||||
380 | // preheader for SSA updater, so also avoid sinking when no preheader | ||||||
381 | // is available. | ||||||
382 | if (!DisablePromotion && Preheader && L->hasDedicatedExits() && | ||||||
383 | !NoOfMemAccTooLarge) { | ||||||
384 | // Figure out the loop exits and their insertion points | ||||||
385 | SmallVector<BasicBlock *, 8> ExitBlocks; | ||||||
386 | L->getUniqueExitBlocks(ExitBlocks); | ||||||
387 | |||||||
388 | // We can't insert into a catchswitch. | ||||||
389 | bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) { | ||||||
390 | return isa<CatchSwitchInst>(Exit->getTerminator()); | ||||||
391 | }); | ||||||
392 | |||||||
393 | if (!HasCatchSwitch) { | ||||||
394 | SmallVector<Instruction *, 8> InsertPts; | ||||||
395 | SmallVector<MemoryAccess *, 8> MSSAInsertPts; | ||||||
396 | InsertPts.reserve(ExitBlocks.size()); | ||||||
397 | if (MSSAU) | ||||||
398 | MSSAInsertPts.reserve(ExitBlocks.size()); | ||||||
399 | for (BasicBlock *ExitBlock : ExitBlocks) { | ||||||
400 | InsertPts.push_back(&*ExitBlock->getFirstInsertionPt()); | ||||||
401 | if (MSSAU) | ||||||
402 | MSSAInsertPts.push_back(nullptr); | ||||||
403 | } | ||||||
404 | |||||||
405 | PredIteratorCache PIC; | ||||||
406 | |||||||
407 | bool Promoted = false; | ||||||
408 | |||||||
409 | // Build an AST using MSSA. | ||||||
410 | if (!CurAST.get()) | ||||||
411 | CurAST = collectAliasInfoForLoopWithMSSA(L, AA, MSSAU.get()); | ||||||
412 | |||||||
413 | // Loop over all of the alias sets in the tracker object. | ||||||
414 | for (AliasSet &AS : *CurAST) { | ||||||
415 | // We can promote this alias set if it has a store, if it is a "Must" | ||||||
416 | // alias set, if the pointer is loop invariant, and if we are not | ||||||
417 | // eliminating any volatile loads or stores. | ||||||
418 | if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() || | ||||||
419 | !L->isLoopInvariant(AS.begin()->getValue())) | ||||||
420 | continue; | ||||||
421 | |||||||
422 | assert(((!AS.empty() && "Must alias set should have at least one pointer element in it!" ) ? static_cast<void> (0) : __assert_fail ("!AS.empty() && \"Must alias set should have at least one pointer element in it!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 424, __PRETTY_FUNCTION__)) | ||||||
423 | !AS.empty() &&((!AS.empty() && "Must alias set should have at least one pointer element in it!" ) ? static_cast<void> (0) : __assert_fail ("!AS.empty() && \"Must alias set should have at least one pointer element in it!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 424, __PRETTY_FUNCTION__)) | ||||||
424 | "Must alias set should have at least one pointer element in it!")((!AS.empty() && "Must alias set should have at least one pointer element in it!" ) ? static_cast<void> (0) : __assert_fail ("!AS.empty() && \"Must alias set should have at least one pointer element in it!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 424, __PRETTY_FUNCTION__)); | ||||||
425 | |||||||
426 | SmallSetVector<Value *, 8> PointerMustAliases; | ||||||
427 | for (const auto &ASI : AS) | ||||||
428 | PointerMustAliases.insert(ASI.getValue()); | ||||||
429 | |||||||
430 | Promoted |= promoteLoopAccessesToScalars( | ||||||
431 | PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI, | ||||||
432 | DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE); | ||||||
433 | } | ||||||
434 | |||||||
435 | // Once we have promoted values across the loop body we have to | ||||||
436 | // recursively reform LCSSA as any nested loop may now have values defined | ||||||
437 | // within the loop used in the outer loop. | ||||||
438 | // FIXME: This is really heavy handed. It would be a bit better to use an | ||||||
439 | // SSAUpdater strategy during promotion that was LCSSA aware and reformed | ||||||
440 | // it as it went. | ||||||
441 | if (Promoted) | ||||||
442 | formLCSSARecursively(*L, *DT, LI, SE); | ||||||
443 | |||||||
444 | Changed |= Promoted; | ||||||
445 | } | ||||||
446 | } | ||||||
447 | |||||||
448 | // Check that neither this loop nor its parent have had LCSSA broken. LICM is | ||||||
449 | // specifically moving instructions across the loop boundary and so it is | ||||||
450 | // especially in need of sanity checking here. | ||||||
451 | assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!")((L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!" ) ? static_cast<void> (0) : __assert_fail ("L->isLCSSAForm(*DT) && \"Loop not left in LCSSA form after LICM!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 451, __PRETTY_FUNCTION__)); | ||||||
452 | assert((L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) &&(((L->isOutermost() || L->getParentLoop()->isLCSSAForm (*DT)) && "Parent loop not left in LCSSA form after LICM!" ) ? static_cast<void> (0) : __assert_fail ("(L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) && \"Parent loop not left in LCSSA form after LICM!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 453, __PRETTY_FUNCTION__)) | ||||||
453 | "Parent loop not left in LCSSA form after LICM!")(((L->isOutermost() || L->getParentLoop()->isLCSSAForm (*DT)) && "Parent loop not left in LCSSA form after LICM!" ) ? static_cast<void> (0) : __assert_fail ("(L->isOutermost() || L->getParentLoop()->isLCSSAForm(*DT)) && \"Parent loop not left in LCSSA form after LICM!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 453, __PRETTY_FUNCTION__)); | ||||||
454 | |||||||
455 | if (MSSAU.get() && VerifyMemorySSA) | ||||||
456 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||
457 | |||||||
458 | if (Changed && SE) | ||||||
459 | SE->forgetLoopDispositions(L); | ||||||
460 | return Changed; | ||||||
461 | } | ||||||
462 | |||||||
463 | /// Walk the specified region of the CFG (defined by all blocks dominated by | ||||||
464 | /// the specified block, and that are in the current loop) in reverse depth | ||||||
465 | /// first order w.r.t the DominatorTree. This allows us to visit uses before | ||||||
466 | /// definitions, allowing us to sink a loop body in one pass without iteration. | ||||||
467 | /// | ||||||
468 | bool llvm::sinkRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, | ||||||
469 | DominatorTree *DT, BlockFrequencyInfo *BFI, | ||||||
470 | TargetLibraryInfo *TLI, TargetTransformInfo *TTI, | ||||||
471 | Loop *CurLoop, AliasSetTracker *CurAST, | ||||||
472 | MemorySSAUpdater *MSSAU, ICFLoopSafetyInfo *SafetyInfo, | ||||||
473 | SinkAndHoistLICMFlags &Flags, | ||||||
474 | OptimizationRemarkEmitter *ORE) { | ||||||
475 | |||||||
476 | // Verify inputs. | ||||||
477 | assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to sinkRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 479, __PRETTY_FUNCTION__)) | ||||||
478 | CurLoop != nullptr && SafetyInfo != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to sinkRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 479, __PRETTY_FUNCTION__)) | ||||||
479 | "Unexpected input to sinkRegion.")((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to sinkRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to sinkRegion.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 479, __PRETTY_FUNCTION__)); | ||||||
480 | assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized." ) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 481, __PRETTY_FUNCTION__)) | ||||||
481 | "Either AliasSetTracker or MemorySSA should be initialized.")((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized." ) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 481, __PRETTY_FUNCTION__)); | ||||||
482 | |||||||
483 | // We want to visit children before parents. We will enque all the parents | ||||||
484 | // before their children in the worklist and process the worklist in reverse | ||||||
485 | // order. | ||||||
486 | SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop); | ||||||
487 | |||||||
488 | bool Changed = false; | ||||||
489 | for (DomTreeNode *DTN : reverse(Worklist)) { | ||||||
490 | BasicBlock *BB = DTN->getBlock(); | ||||||
491 | // Only need to process the contents of this block if it is not part of a | ||||||
492 | // subloop (which would already have been processed). | ||||||
493 | if (inSubLoop(BB, CurLoop, LI)) | ||||||
494 | continue; | ||||||
495 | |||||||
496 | for (BasicBlock::iterator II = BB->end(); II != BB->begin();) { | ||||||
497 | Instruction &I = *--II; | ||||||
498 | |||||||
499 | // If the instruction is dead, we would try to sink it because it isn't | ||||||
500 | // used in the loop, instead, just delete it. | ||||||
501 | if (isInstructionTriviallyDead(&I, TLI)) { | ||||||
502 | LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM deleting dead inst: " << I << '\n'; } } while (false); | ||||||
503 | salvageKnowledge(&I); | ||||||
504 | salvageDebugInfo(I); | ||||||
505 | ++II; | ||||||
506 | eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); | ||||||
507 | Changed = true; | ||||||
508 | continue; | ||||||
509 | } | ||||||
510 | |||||||
511 | // Check to see if we can sink this instruction to the exit blocks | ||||||
512 | // of the loop. We can do this if the all users of the instruction are | ||||||
513 | // outside of the loop. In this case, it doesn't even matter if the | ||||||
514 | // operands of the instruction are loop invariant. | ||||||
515 | // | ||||||
516 | bool FreeInLoop = false; | ||||||
517 | if (!I.mayHaveSideEffects() && | ||||||
518 | isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) && | ||||||
519 | canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, | ||||||
520 | ORE)) { | ||||||
521 | if (sink(I, LI, DT, BFI, CurLoop, SafetyInfo, MSSAU, ORE)) { | ||||||
522 | if (!FreeInLoop) { | ||||||
523 | ++II; | ||||||
524 | salvageDebugInfo(I); | ||||||
525 | eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); | ||||||
526 | } | ||||||
527 | Changed = true; | ||||||
528 | } | ||||||
529 | } | ||||||
530 | } | ||||||
531 | } | ||||||
532 | if (MSSAU && VerifyMemorySSA) | ||||||
533 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||
534 | return Changed; | ||||||
535 | } | ||||||
536 | |||||||
537 | namespace { | ||||||
538 | // This is a helper class for hoistRegion to make it able to hoist control flow | ||||||
539 | // in order to be able to hoist phis. The way this works is that we initially | ||||||
540 | // start hoisting to the loop preheader, and when we see a loop invariant branch | ||||||
541 | // we make note of this. When we then come to hoist an instruction that's | ||||||
542 | // conditional on such a branch we duplicate the branch and the relevant control | ||||||
543 | // flow, then hoist the instruction into the block corresponding to its original | ||||||
544 | // block in the duplicated control flow. | ||||||
545 | class ControlFlowHoister { | ||||||
546 | private: | ||||||
547 | // Information about the loop we are hoisting from | ||||||
548 | LoopInfo *LI; | ||||||
549 | DominatorTree *DT; | ||||||
550 | Loop *CurLoop; | ||||||
551 | MemorySSAUpdater *MSSAU; | ||||||
552 | |||||||
553 | // A map of blocks in the loop to the block their instructions will be hoisted | ||||||
554 | // to. | ||||||
555 | DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap; | ||||||
556 | |||||||
557 | // The branches that we can hoist, mapped to the block that marks a | ||||||
558 | // convergence point of their control flow. | ||||||
559 | DenseMap<BranchInst *, BasicBlock *> HoistableBranches; | ||||||
560 | |||||||
561 | public: | ||||||
562 | ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop, | ||||||
563 | MemorySSAUpdater *MSSAU) | ||||||
564 | : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {} | ||||||
565 | |||||||
566 | void registerPossiblyHoistableBranch(BranchInst *BI) { | ||||||
567 | // We can only hoist conditional branches with loop invariant operands. | ||||||
568 | if (!ControlFlowHoisting || !BI->isConditional() || | ||||||
569 | !CurLoop->hasLoopInvariantOperands(BI)) | ||||||
570 | return; | ||||||
571 | |||||||
572 | // The branch destinations need to be in the loop, and we don't gain | ||||||
573 | // anything by duplicating conditional branches with duplicate successors, | ||||||
574 | // as it's essentially the same as an unconditional branch. | ||||||
575 | BasicBlock *TrueDest = BI->getSuccessor(0); | ||||||
576 | BasicBlock *FalseDest = BI->getSuccessor(1); | ||||||
577 | if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) || | ||||||
578 | TrueDest == FalseDest) | ||||||
579 | return; | ||||||
580 | |||||||
581 | // We can hoist BI if one branch destination is the successor of the other, | ||||||
582 | // or both have common successor which we check by seeing if the | ||||||
583 | // intersection of their successors is non-empty. | ||||||
584 | // TODO: This could be expanded to allowing branches where both ends | ||||||
585 | // eventually converge to a single block. | ||||||
586 | SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc; | ||||||
587 | TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest)); | ||||||
588 | FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest)); | ||||||
589 | BasicBlock *CommonSucc = nullptr; | ||||||
590 | if (TrueDestSucc.count(FalseDest)) { | ||||||
591 | CommonSucc = FalseDest; | ||||||
592 | } else if (FalseDestSucc.count(TrueDest)) { | ||||||
593 | CommonSucc = TrueDest; | ||||||
594 | } else { | ||||||
595 | set_intersect(TrueDestSucc, FalseDestSucc); | ||||||
596 | // If there's one common successor use that. | ||||||
597 | if (TrueDestSucc.size() == 1) | ||||||
598 | CommonSucc = *TrueDestSucc.begin(); | ||||||
599 | // If there's more than one pick whichever appears first in the block list | ||||||
600 | // (we can't use the value returned by TrueDestSucc.begin() as it's | ||||||
601 | // unpredicatable which element gets returned). | ||||||
602 | else if (!TrueDestSucc.empty()) { | ||||||
603 | Function *F = TrueDest->getParent(); | ||||||
604 | auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); }; | ||||||
605 | auto It = std::find_if(F->begin(), F->end(), IsSucc); | ||||||
606 | assert(It != F->end() && "Could not find successor in function")((It != F->end() && "Could not find successor in function" ) ? static_cast<void> (0) : __assert_fail ("It != F->end() && \"Could not find successor in function\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 606, __PRETTY_FUNCTION__)); | ||||||
607 | CommonSucc = &*It; | ||||||
608 | } | ||||||
609 | } | ||||||
610 | // The common successor has to be dominated by the branch, as otherwise | ||||||
611 | // there will be some other path to the successor that will not be | ||||||
612 | // controlled by this branch so any phi we hoist would be controlled by the | ||||||
613 | // wrong condition. This also takes care of avoiding hoisting of loop back | ||||||
614 | // edges. | ||||||
615 | // TODO: In some cases this could be relaxed if the successor is dominated | ||||||
616 | // by another block that's been hoisted and we can guarantee that the | ||||||
617 | // control flow has been replicated exactly. | ||||||
618 | if (CommonSucc && DT->dominates(BI, CommonSucc)) | ||||||
619 | HoistableBranches[BI] = CommonSucc; | ||||||
620 | } | ||||||
621 | |||||||
622 | bool canHoistPHI(PHINode *PN) { | ||||||
623 | // The phi must have loop invariant operands. | ||||||
624 | if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN)) | ||||||
625 | return false; | ||||||
626 | // We can hoist phis if the block they are in is the target of hoistable | ||||||
627 | // branches which cover all of the predecessors of the block. | ||||||
628 | SmallPtrSet<BasicBlock *, 8> PredecessorBlocks; | ||||||
629 | BasicBlock *BB = PN->getParent(); | ||||||
630 | for (BasicBlock *PredBB : predecessors(BB)) | ||||||
631 | PredecessorBlocks.insert(PredBB); | ||||||
632 | // If we have less predecessor blocks than predecessors then the phi will | ||||||
633 | // have more than one incoming value for the same block which we can't | ||||||
634 | // handle. | ||||||
635 | // TODO: This could be handled be erasing some of the duplicate incoming | ||||||
636 | // values. | ||||||
637 | if (PredecessorBlocks.size() != pred_size(BB)) | ||||||
638 | return false; | ||||||
639 | for (auto &Pair : HoistableBranches) { | ||||||
640 | if (Pair.second == BB) { | ||||||
641 | // Which blocks are predecessors via this branch depends on if the | ||||||
642 | // branch is triangle-like or diamond-like. | ||||||
643 | if (Pair.first->getSuccessor(0) == BB) { | ||||||
644 | PredecessorBlocks.erase(Pair.first->getParent()); | ||||||
645 | PredecessorBlocks.erase(Pair.first->getSuccessor(1)); | ||||||
646 | } else if (Pair.first->getSuccessor(1) == BB) { | ||||||
647 | PredecessorBlocks.erase(Pair.first->getParent()); | ||||||
648 | PredecessorBlocks.erase(Pair.first->getSuccessor(0)); | ||||||
649 | } else { | ||||||
650 | PredecessorBlocks.erase(Pair.first->getSuccessor(0)); | ||||||
651 | PredecessorBlocks.erase(Pair.first->getSuccessor(1)); | ||||||
652 | } | ||||||
653 | } | ||||||
654 | } | ||||||
655 | // PredecessorBlocks will now be empty if for every predecessor of BB we | ||||||
656 | // found a hoistable branch source. | ||||||
657 | return PredecessorBlocks.empty(); | ||||||
658 | } | ||||||
659 | |||||||
660 | BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) { | ||||||
661 | if (!ControlFlowHoisting) | ||||||
662 | return CurLoop->getLoopPreheader(); | ||||||
663 | // If BB has already been hoisted, return that | ||||||
664 | if (HoistDestinationMap.count(BB)) | ||||||
665 | return HoistDestinationMap[BB]; | ||||||
666 | |||||||
667 | // Check if this block is conditional based on a pending branch | ||||||
668 | auto HasBBAsSuccessor = | ||||||
669 | [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) { | ||||||
670 | return BB != Pair.second && (Pair.first->getSuccessor(0) == BB || | ||||||
671 | Pair.first->getSuccessor(1) == BB); | ||||||
672 | }; | ||||||
673 | auto It = std::find_if(HoistableBranches.begin(), HoistableBranches.end(), | ||||||
674 | HasBBAsSuccessor); | ||||||
675 | |||||||
676 | // If not involved in a pending branch, hoist to preheader | ||||||
677 | BasicBlock *InitialPreheader = CurLoop->getLoopPreheader(); | ||||||
678 | if (It == HoistableBranches.end()) { | ||||||
679 | LLVM_DEBUG(dbgs() << "LICM using " << InitialPreheader->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM using " << InitialPreheader ->getName() << " as hoist destination for " << BB->getName() << "\n"; } } while (false) | ||||||
680 | << " as hoist destination for " << BB->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM using " << InitialPreheader ->getName() << " as hoist destination for " << BB->getName() << "\n"; } } while (false) | ||||||
681 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM using " << InitialPreheader ->getName() << " as hoist destination for " << BB->getName() << "\n"; } } while (false); | ||||||
682 | HoistDestinationMap[BB] = InitialPreheader; | ||||||
683 | return InitialPreheader; | ||||||
684 | } | ||||||
685 | BranchInst *BI = It->first; | ||||||
686 | assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==((std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor ) == HoistableBranches.end() && "BB is expected to be the target of at most one branch" ) ? static_cast<void> (0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 688, __PRETTY_FUNCTION__)) | ||||||
687 | HoistableBranches.end() &&((std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor ) == HoistableBranches.end() && "BB is expected to be the target of at most one branch" ) ? static_cast<void> (0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 688, __PRETTY_FUNCTION__)) | ||||||
688 | "BB is expected to be the target of at most one branch")((std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor ) == HoistableBranches.end() && "BB is expected to be the target of at most one branch" ) ? static_cast<void> (0) : __assert_fail ("std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) == HoistableBranches.end() && \"BB is expected to be the target of at most one branch\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 688, __PRETTY_FUNCTION__)); | ||||||
689 | |||||||
690 | LLVMContext &C = BB->getContext(); | ||||||
691 | BasicBlock *TrueDest = BI->getSuccessor(0); | ||||||
692 | BasicBlock *FalseDest = BI->getSuccessor(1); | ||||||
693 | BasicBlock *CommonSucc = HoistableBranches[BI]; | ||||||
694 | BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent()); | ||||||
695 | |||||||
696 | // Create hoisted versions of blocks that currently don't have them | ||||||
697 | auto CreateHoistedBlock = [&](BasicBlock *Orig) { | ||||||
698 | if (HoistDestinationMap.count(Orig)) | ||||||
699 | return HoistDestinationMap[Orig]; | ||||||
700 | BasicBlock *New = | ||||||
701 | BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent()); | ||||||
702 | HoistDestinationMap[Orig] = New; | ||||||
703 | DT->addNewBlock(New, HoistTarget); | ||||||
704 | if (CurLoop->getParentLoop()) | ||||||
705 | CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI); | ||||||
706 | ++NumCreatedBlocks; | ||||||
707 | LLVM_DEBUG(dbgs() << "LICM created " << New->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM created " << New-> getName() << " as hoist destination for " << Orig ->getName() << "\n"; } } while (false) | ||||||
708 | << " as hoist destination for " << Orig->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM created " << New-> getName() << " as hoist destination for " << Orig ->getName() << "\n"; } } while (false) | ||||||
709 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM created " << New-> getName() << " as hoist destination for " << Orig ->getName() << "\n"; } } while (false); | ||||||
710 | return New; | ||||||
711 | }; | ||||||
712 | BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest); | ||||||
713 | BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest); | ||||||
714 | BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc); | ||||||
715 | |||||||
716 | // Link up these blocks with branches. | ||||||
717 | if (!HoistCommonSucc->getTerminator()) { | ||||||
718 | // The new common successor we've generated will branch to whatever that | ||||||
719 | // hoist target branched to. | ||||||
720 | BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor(); | ||||||
721 | assert(TargetSucc && "Expected hoist target to have a single successor")((TargetSucc && "Expected hoist target to have a single successor" ) ? static_cast<void> (0) : __assert_fail ("TargetSucc && \"Expected hoist target to have a single successor\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 721, __PRETTY_FUNCTION__)); | ||||||
722 | HoistCommonSucc->moveBefore(TargetSucc); | ||||||
723 | BranchInst::Create(TargetSucc, HoistCommonSucc); | ||||||
724 | } | ||||||
725 | if (!HoistTrueDest->getTerminator()) { | ||||||
726 | HoistTrueDest->moveBefore(HoistCommonSucc); | ||||||
727 | BranchInst::Create(HoistCommonSucc, HoistTrueDest); | ||||||
728 | } | ||||||
729 | if (!HoistFalseDest->getTerminator()) { | ||||||
730 | HoistFalseDest->moveBefore(HoistCommonSucc); | ||||||
731 | BranchInst::Create(HoistCommonSucc, HoistFalseDest); | ||||||
732 | } | ||||||
733 | |||||||
734 | // If BI is being cloned to what was originally the preheader then | ||||||
735 | // HoistCommonSucc will now be the new preheader. | ||||||
736 | if (HoistTarget == InitialPreheader) { | ||||||
737 | // Phis in the loop header now need to use the new preheader. | ||||||
738 | InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc); | ||||||
739 | if (MSSAU) | ||||||
740 | MSSAU->wireOldPredecessorsToNewImmediatePredecessor( | ||||||
741 | HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget}); | ||||||
742 | // The new preheader dominates the loop header. | ||||||
743 | DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc); | ||||||
744 | DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader()); | ||||||
745 | DT->changeImmediateDominator(HeaderNode, PreheaderNode); | ||||||
746 | // The preheader hoist destination is now the new preheader, with the | ||||||
747 | // exception of the hoist destination of this branch. | ||||||
748 | for (auto &Pair : HoistDestinationMap) | ||||||
749 | if (Pair.second == InitialPreheader && Pair.first != BI->getParent()) | ||||||
750 | Pair.second = HoistCommonSucc; | ||||||
751 | } | ||||||
752 | |||||||
753 | // Now finally clone BI. | ||||||
754 | ReplaceInstWithInst( | ||||||
755 | HoistTarget->getTerminator(), | ||||||
756 | BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition())); | ||||||
757 | ++NumClonedBranches; | ||||||
758 | |||||||
759 | assert(CurLoop->getLoopPreheader() &&((CurLoop->getLoopPreheader() && "Hoisting blocks should not have destroyed preheader" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->getLoopPreheader() && \"Hoisting blocks should not have destroyed preheader\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 760, __PRETTY_FUNCTION__)) | ||||||
760 | "Hoisting blocks should not have destroyed preheader")((CurLoop->getLoopPreheader() && "Hoisting blocks should not have destroyed preheader" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->getLoopPreheader() && \"Hoisting blocks should not have destroyed preheader\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 760, __PRETTY_FUNCTION__)); | ||||||
761 | return HoistDestinationMap[BB]; | ||||||
762 | } | ||||||
763 | }; | ||||||
764 | } // namespace | ||||||
765 | |||||||
766 | // Hoisting/sinking instruction out of a loop isn't always beneficial. It's only | ||||||
767 | // only worthwhile if the destination block is actually colder than current | ||||||
768 | // block. | ||||||
769 | static bool worthSinkOrHoistInst(Instruction &I, BasicBlock *DstBlock, | ||||||
770 | OptimizationRemarkEmitter *ORE, | ||||||
771 | BlockFrequencyInfo *BFI) { | ||||||
772 | // Check block frequency only when runtime profile is available | ||||||
773 | // to avoid pathological cases. With static profile, lean towards | ||||||
774 | // hosting because it helps canonicalize the loop for vectorizer. | ||||||
775 | if (!DstBlock->getParent()->hasProfileData()) | ||||||
776 | return true; | ||||||
777 | |||||||
778 | if (!HoistSinkColdnessThreshold || !BFI) | ||||||
779 | return true; | ||||||
780 | |||||||
781 | BasicBlock *SrcBlock = I.getParent(); | ||||||
782 | if (BFI->getBlockFreq(DstBlock).getFrequency() / HoistSinkColdnessThreshold > | ||||||
783 | BFI->getBlockFreq(SrcBlock).getFrequency()) { | ||||||
784 | ORE->emit([&]() { | ||||||
785 | return OptimizationRemarkMissed(DEBUG_TYPE"licm", "SinkHoistInst", &I) | ||||||
786 | << "failed to sink or hoist instruction because containing block " | ||||||
787 | "has lower frequency than destination block"; | ||||||
788 | }); | ||||||
789 | return false; | ||||||
790 | } | ||||||
791 | |||||||
792 | return true; | ||||||
793 | } | ||||||
794 | |||||||
795 | /// Walk the specified region of the CFG (defined by all blocks dominated by | ||||||
796 | /// the specified block, and that are in the current loop) in depth first | ||||||
797 | /// order w.r.t the DominatorTree. This allows us to visit definitions before | ||||||
798 | /// uses, allowing us to hoist a loop body in one pass without iteration. | ||||||
799 | /// | ||||||
800 | bool llvm::hoistRegion(DomTreeNode *N, AAResults *AA, LoopInfo *LI, | ||||||
801 | DominatorTree *DT, BlockFrequencyInfo *BFI, | ||||||
802 | TargetLibraryInfo *TLI, Loop *CurLoop, | ||||||
803 | AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, | ||||||
804 | ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo, | ||||||
805 | SinkAndHoistLICMFlags &Flags, | ||||||
806 | OptimizationRemarkEmitter *ORE) { | ||||||
807 | // Verify inputs. | ||||||
808 | assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to hoistRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 810, __PRETTY_FUNCTION__)) | ||||||
809 | CurLoop != nullptr && SafetyInfo != nullptr &&((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to hoistRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 810, __PRETTY_FUNCTION__)) | ||||||
810 | "Unexpected input to hoistRegion.")((N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected input to hoistRegion." ) ? static_cast<void> (0) : __assert_fail ("N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected input to hoistRegion.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 810, __PRETTY_FUNCTION__)); | ||||||
811 | assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized." ) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 812, __PRETTY_FUNCTION__)) | ||||||
812 | "Either AliasSetTracker or MemorySSA should be initialized.")((((CurAST != nullptr) ^ (MSSAU != nullptr)) && "Either AliasSetTracker or MemorySSA should be initialized." ) ? static_cast<void> (0) : __assert_fail ("((CurAST != nullptr) ^ (MSSAU != nullptr)) && \"Either AliasSetTracker or MemorySSA should be initialized.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 812, __PRETTY_FUNCTION__)); | ||||||
813 | |||||||
814 | ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU); | ||||||
815 | |||||||
816 | // Keep track of instructions that have been hoisted, as they may need to be | ||||||
817 | // re-hoisted if they end up not dominating all of their uses. | ||||||
818 | SmallVector<Instruction *, 16> HoistedInstructions; | ||||||
819 | |||||||
820 | // For PHI hoisting to work we need to hoist blocks before their successors. | ||||||
821 | // We can do this by iterating through the blocks in the loop in reverse | ||||||
822 | // post-order. | ||||||
823 | LoopBlocksRPO Worklist(CurLoop); | ||||||
824 | Worklist.perform(LI); | ||||||
825 | bool Changed = false; | ||||||
826 | for (BasicBlock *BB : Worklist) { | ||||||
827 | // Only need to process the contents of this block if it is not part of a | ||||||
828 | // subloop (which would already have been processed). | ||||||
829 | if (inSubLoop(BB, CurLoop, LI)) | ||||||
830 | continue; | ||||||
831 | |||||||
832 | for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) { | ||||||
833 | Instruction &I = *II++; | ||||||
834 | // Try constant folding this instruction. If all the operands are | ||||||
835 | // constants, it is technically hoistable, but it would be better to | ||||||
836 | // just fold it. | ||||||
837 | if (Constant *C = ConstantFoldInstruction( | ||||||
838 | &I, I.getModule()->getDataLayout(), TLI)) { | ||||||
839 | LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *Cdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'; } } while (false) | ||||||
840 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n'; } } while (false); | ||||||
841 | if (CurAST) | ||||||
842 | CurAST->copyValue(&I, C); | ||||||
843 | // FIXME MSSA: Such replacements may make accesses unoptimized (D51960). | ||||||
844 | I.replaceAllUsesWith(C); | ||||||
845 | if (isInstructionTriviallyDead(&I, TLI)) | ||||||
846 | eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); | ||||||
847 | Changed = true; | ||||||
848 | continue; | ||||||
849 | } | ||||||
850 | |||||||
851 | // Try hoisting the instruction out to the preheader. We can only do | ||||||
852 | // this if all of the operands of the instruction are loop invariant and | ||||||
853 | // if it is safe to hoist the instruction. We also check block frequency | ||||||
854 | // to make sure instruction only gets hoisted into colder blocks. | ||||||
855 | // TODO: It may be safe to hoist if we are hoisting to a conditional block | ||||||
856 | // and we have accurately duplicated the control flow from the loop header | ||||||
857 | // to that block. | ||||||
858 | if (CurLoop->hasLoopInvariantOperands(&I) && | ||||||
859 | canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags, | ||||||
860 | ORE) && | ||||||
861 | worthSinkOrHoistInst(I, CurLoop->getLoopPreheader(), ORE, BFI) && | ||||||
862 | isSafeToExecuteUnconditionally( | ||||||
863 | I, DT, CurLoop, SafetyInfo, ORE, | ||||||
864 | CurLoop->getLoopPreheader()->getTerminator())) { | ||||||
865 | hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, | ||||||
866 | MSSAU, SE, ORE); | ||||||
867 | HoistedInstructions.push_back(&I); | ||||||
868 | Changed = true; | ||||||
869 | continue; | ||||||
870 | } | ||||||
871 | |||||||
872 | // Attempt to remove floating point division out of the loop by | ||||||
873 | // converting it to a reciprocal multiplication. | ||||||
874 | if (I.getOpcode() == Instruction::FDiv && I.hasAllowReciprocal() && | ||||||
875 | CurLoop->isLoopInvariant(I.getOperand(1))) { | ||||||
876 | auto Divisor = I.getOperand(1); | ||||||
877 | auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0); | ||||||
878 | auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor); | ||||||
879 | ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags()); | ||||||
880 | SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent()); | ||||||
881 | ReciprocalDivisor->insertBefore(&I); | ||||||
882 | |||||||
883 | auto Product = | ||||||
884 | BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor); | ||||||
885 | Product->setFastMathFlags(I.getFastMathFlags()); | ||||||
886 | SafetyInfo->insertInstructionTo(Product, I.getParent()); | ||||||
887 | Product->insertAfter(&I); | ||||||
888 | I.replaceAllUsesWith(Product); | ||||||
889 | eraseInstruction(I, *SafetyInfo, CurAST, MSSAU); | ||||||
890 | |||||||
891 | hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), | ||||||
892 | SafetyInfo, MSSAU, SE, ORE); | ||||||
893 | HoistedInstructions.push_back(ReciprocalDivisor); | ||||||
894 | Changed = true; | ||||||
895 | continue; | ||||||
896 | } | ||||||
897 | |||||||
898 | auto IsInvariantStart = [&](Instruction &I) { | ||||||
899 | using namespace PatternMatch; | ||||||
900 | return I.use_empty() && | ||||||
901 | match(&I, m_Intrinsic<Intrinsic::invariant_start>()); | ||||||
902 | }; | ||||||
903 | auto MustExecuteWithoutWritesBefore = [&](Instruction &I) { | ||||||
904 | return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) && | ||||||
905 | SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop); | ||||||
906 | }; | ||||||
907 | if ((IsInvariantStart(I) || isGuard(&I)) && | ||||||
908 | CurLoop->hasLoopInvariantOperands(&I) && | ||||||
909 | MustExecuteWithoutWritesBefore(I)) { | ||||||
910 | hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, | ||||||
911 | MSSAU, SE, ORE); | ||||||
912 | HoistedInstructions.push_back(&I); | ||||||
913 | Changed = true; | ||||||
914 | continue; | ||||||
915 | } | ||||||
916 | |||||||
917 | if (PHINode *PN = dyn_cast<PHINode>(&I)) { | ||||||
918 | if (CFH.canHoistPHI(PN)) { | ||||||
919 | // Redirect incoming blocks first to ensure that we create hoisted | ||||||
920 | // versions of those blocks before we hoist the phi. | ||||||
921 | for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i) | ||||||
922 | PN->setIncomingBlock( | ||||||
923 | i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i))); | ||||||
924 | hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo, | ||||||
925 | MSSAU, SE, ORE); | ||||||
926 | assert(DT->dominates(PN, BB) && "Conditional PHIs not expected")((DT->dominates(PN, BB) && "Conditional PHIs not expected" ) ? static_cast<void> (0) : __assert_fail ("DT->dominates(PN, BB) && \"Conditional PHIs not expected\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 926, __PRETTY_FUNCTION__)); | ||||||
927 | Changed = true; | ||||||
928 | continue; | ||||||
929 | } | ||||||
930 | } | ||||||
931 | |||||||
932 | // Remember possibly hoistable branches so we can actually hoist them | ||||||
933 | // later if needed. | ||||||
934 | if (BranchInst *BI = dyn_cast<BranchInst>(&I)) | ||||||
935 | CFH.registerPossiblyHoistableBranch(BI); | ||||||
936 | } | ||||||
937 | } | ||||||
938 | |||||||
939 | // If we hoisted instructions to a conditional block they may not dominate | ||||||
940 | // their uses that weren't hoisted (such as phis where some operands are not | ||||||
941 | // loop invariant). If so make them unconditional by moving them to their | ||||||
942 | // immediate dominator. We iterate through the instructions in reverse order | ||||||
943 | // which ensures that when we rehoist an instruction we rehoist its operands, | ||||||
944 | // and also keep track of where in the block we are rehoisting to to make sure | ||||||
945 | // that we rehoist instructions before the instructions that use them. | ||||||
946 | Instruction *HoistPoint = nullptr; | ||||||
947 | if (ControlFlowHoisting) { | ||||||
948 | for (Instruction *I : reverse(HoistedInstructions)) { | ||||||
949 | if (!llvm::all_of(I->uses(), | ||||||
950 | [&](Use &U) { return DT->dominates(I, U); })) { | ||||||
951 | BasicBlock *Dominator = | ||||||
952 | DT->getNode(I->getParent())->getIDom()->getBlock(); | ||||||
953 | if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) { | ||||||
954 | if (HoistPoint) | ||||||
955 | assert(DT->dominates(Dominator, HoistPoint->getParent()) &&((DT->dominates(Dominator, HoistPoint->getParent()) && "New hoist point expected to dominate old hoist point") ? static_cast <void> (0) : __assert_fail ("DT->dominates(Dominator, HoistPoint->getParent()) && \"New hoist point expected to dominate old hoist point\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 956, __PRETTY_FUNCTION__)) | ||||||
956 | "New hoist point expected to dominate old hoist point")((DT->dominates(Dominator, HoistPoint->getParent()) && "New hoist point expected to dominate old hoist point") ? static_cast <void> (0) : __assert_fail ("DT->dominates(Dominator, HoistPoint->getParent()) && \"New hoist point expected to dominate old hoist point\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 956, __PRETTY_FUNCTION__)); | ||||||
957 | HoistPoint = Dominator->getTerminator(); | ||||||
958 | } | ||||||
959 | LLVM_DEBUG(dbgs() << "LICM rehoisting to "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint ->getParent()->getName() << ": " << *I << "\n"; } } while (false) | ||||||
960 | << HoistPoint->getParent()->getName()do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint ->getParent()->getName() << ": " << *I << "\n"; } } while (false) | ||||||
961 | << ": " << *I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM rehoisting to " << HoistPoint ->getParent()->getName() << ": " << *I << "\n"; } } while (false); | ||||||
962 | moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE); | ||||||
963 | HoistPoint = I; | ||||||
964 | Changed = true; | ||||||
965 | } | ||||||
966 | } | ||||||
967 | } | ||||||
968 | if (MSSAU && VerifyMemorySSA) | ||||||
969 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||
970 | |||||||
971 | // Now that we've finished hoisting make sure that LI and DT are still | ||||||
972 | // valid. | ||||||
973 | #ifdef EXPENSIVE_CHECKS | ||||||
974 | if (Changed) { | ||||||
975 | assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&((DT->verify(DominatorTree::VerificationLevel::Fast) && "Dominator tree verification failed") ? static_cast<void> (0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast) && \"Dominator tree verification failed\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 976, __PRETTY_FUNCTION__)) | ||||||
976 | "Dominator tree verification failed")((DT->verify(DominatorTree::VerificationLevel::Fast) && "Dominator tree verification failed") ? static_cast<void> (0) : __assert_fail ("DT->verify(DominatorTree::VerificationLevel::Fast) && \"Dominator tree verification failed\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 976, __PRETTY_FUNCTION__)); | ||||||
977 | LI->verify(*DT); | ||||||
978 | } | ||||||
979 | #endif | ||||||
980 | |||||||
981 | return Changed; | ||||||
982 | } | ||||||
983 | |||||||
984 | // Return true if LI is invariant within scope of the loop. LI is invariant if | ||||||
985 | // CurLoop is dominated by an invariant.start representing the same memory | ||||||
986 | // location and size as the memory location LI loads from, and also the | ||||||
987 | // invariant.start has no uses. | ||||||
988 | static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT, | ||||||
989 | Loop *CurLoop) { | ||||||
990 | Value *Addr = LI->getOperand(0); | ||||||
991 | const DataLayout &DL = LI->getModule()->getDataLayout(); | ||||||
992 | const TypeSize LocSizeInBits = DL.getTypeSizeInBits(LI->getType()); | ||||||
993 | |||||||
994 | // It is not currently possible for clang to generate an invariant.start | ||||||
995 | // intrinsic with scalable vector types because we don't support thread local | ||||||
996 | // sizeless types and we don't permit sizeless types in structs or classes. | ||||||
997 | // Furthermore, even if support is added for this in future the intrinsic | ||||||
998 | // itself is defined to have a size of -1 for variable sized objects. This | ||||||
999 | // makes it impossible to verify if the intrinsic envelops our region of | ||||||
1000 | // interest. For example, both <vscale x 32 x i8> and <vscale x 16 x i8> | ||||||
1001 | // types would have a -1 parameter, but the former is clearly double the size | ||||||
1002 | // of the latter. | ||||||
1003 | if (LocSizeInBits.isScalable()) | ||||||
1004 | return false; | ||||||
1005 | |||||||
1006 | // if the type is i8 addrspace(x)*, we know this is the type of | ||||||
1007 | // llvm.invariant.start operand | ||||||
1008 | auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()), | ||||||
1009 | LI->getPointerAddressSpace()); | ||||||
1010 | unsigned BitcastsVisited = 0; | ||||||
1011 | // Look through bitcasts until we reach the i8* type (this is invariant.start | ||||||
1012 | // operand type). | ||||||
1013 | while (Addr->getType() != PtrInt8Ty) { | ||||||
1014 | auto *BC = dyn_cast<BitCastInst>(Addr); | ||||||
1015 | // Avoid traversing high number of bitcast uses. | ||||||
1016 | if (++BitcastsVisited > MaxNumUsesTraversed || !BC) | ||||||
1017 | return false; | ||||||
1018 | Addr = BC->getOperand(0); | ||||||
1019 | } | ||||||
1020 | |||||||
1021 | unsigned UsesVisited = 0; | ||||||
1022 | // Traverse all uses of the load operand value, to see if invariant.start is | ||||||
1023 | // one of the uses, and whether it dominates the load instruction. | ||||||
1024 | for (auto *U : Addr->users()) { | ||||||
1025 | // Avoid traversing for Load operand with high number of users. | ||||||
1026 | if (++UsesVisited > MaxNumUsesTraversed) | ||||||
1027 | return false; | ||||||
1028 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); | ||||||
1029 | // If there are escaping uses of invariant.start instruction, the load maybe | ||||||
1030 | // non-invariant. | ||||||
1031 | if (!II || II->getIntrinsicID() != Intrinsic::invariant_start || | ||||||
1032 | !II->use_empty()) | ||||||
1033 | continue; | ||||||
1034 | ConstantInt *InvariantSize = cast<ConstantInt>(II->getArgOperand(0)); | ||||||
1035 | // The intrinsic supports having a -1 argument for variable sized objects | ||||||
1036 | // so we should check for that here. | ||||||
1037 | if (InvariantSize->isNegative()) | ||||||
1038 | continue; | ||||||
1039 | uint64_t InvariantSizeInBits = InvariantSize->getSExtValue() * 8; | ||||||
1040 | // Confirm the invariant.start location size contains the load operand size | ||||||
1041 | // in bits. Also, the invariant.start should dominate the load, and we | ||||||
1042 | // should not hoist the load out of a loop that contains this dominating | ||||||
1043 | // invariant.start. | ||||||
1044 | if (LocSizeInBits.getFixedSize() <= InvariantSizeInBits && | ||||||
1045 | DT->properlyDominates(II->getParent(), CurLoop->getHeader())) | ||||||
1046 | return true; | ||||||
1047 | } | ||||||
1048 | |||||||
1049 | return false; | ||||||
1050 | } | ||||||
1051 | |||||||
1052 | namespace { | ||||||
1053 | /// Return true if-and-only-if we know how to (mechanically) both hoist and | ||||||
1054 | /// sink a given instruction out of a loop. Does not address legality | ||||||
1055 | /// concerns such as aliasing or speculation safety. | ||||||
1056 | bool isHoistableAndSinkableInst(Instruction &I) { | ||||||
1057 | // Only these instructions are hoistable/sinkable. | ||||||
1058 | return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) || | ||||||
1059 | isa<FenceInst>(I) || isa<CastInst>(I) || isa<UnaryOperator>(I) || | ||||||
1060 | isa<BinaryOperator>(I) || isa<SelectInst>(I) || | ||||||
1061 | isa<GetElementPtrInst>(I) || isa<CmpInst>(I) || | ||||||
1062 | isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) || | ||||||
1063 | isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) || | ||||||
1064 | isa<InsertValueInst>(I) || isa<FreezeInst>(I)); | ||||||
1065 | } | ||||||
1066 | /// Return true if all of the alias sets within this AST are known not to | ||||||
1067 | /// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop. | ||||||
1068 | bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU, | ||||||
1069 | const Loop *L) { | ||||||
1070 | if (CurAST) { | ||||||
1071 | for (AliasSet &AS : *CurAST) { | ||||||
1072 | if (!AS.isForwardingAliasSet() && AS.isMod()) { | ||||||
1073 | return false; | ||||||
1074 | } | ||||||
1075 | } | ||||||
1076 | return true; | ||||||
1077 | } else { /*MSSAU*/ | ||||||
1078 | for (auto *BB : L->getBlocks()) | ||||||
1079 | if (MSSAU->getMemorySSA()->getBlockDefs(BB)) | ||||||
1080 | return false; | ||||||
1081 | return true; | ||||||
1082 | } | ||||||
1083 | } | ||||||
1084 | |||||||
1085 | /// Return true if I is the only Instruction with a MemoryAccess in L. | ||||||
1086 | bool isOnlyMemoryAccess(const Instruction *I, const Loop *L, | ||||||
1087 | const MemorySSAUpdater *MSSAU) { | ||||||
1088 | for (auto *BB : L->getBlocks()) | ||||||
1089 | if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) { | ||||||
1090 | int NotAPhi = 0; | ||||||
1091 | for (const auto &Acc : *Accs) { | ||||||
1092 | if (isa<MemoryPhi>(&Acc)) | ||||||
1093 | continue; | ||||||
1094 | const auto *MUD = cast<MemoryUseOrDef>(&Acc); | ||||||
1095 | if (MUD->getMemoryInst() != I || NotAPhi++ == 1) | ||||||
1096 | return false; | ||||||
1097 | } | ||||||
1098 | } | ||||||
1099 | return true; | ||||||
1100 | } | ||||||
1101 | } | ||||||
1102 | |||||||
1103 | bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT, | ||||||
1104 | Loop *CurLoop, AliasSetTracker *CurAST, | ||||||
1105 | MemorySSAUpdater *MSSAU, | ||||||
1106 | bool TargetExecutesOncePerLoop, | ||||||
1107 | SinkAndHoistLICMFlags *Flags, | ||||||
1108 | OptimizationRemarkEmitter *ORE) { | ||||||
1109 | // If we don't understand the instruction, bail early. | ||||||
1110 | if (!isHoistableAndSinkableInst(I)) | ||||||
| |||||||
1111 | return false; | ||||||
1112 | |||||||
1113 | MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr; | ||||||
1114 | if (MSSA
| ||||||
1115 | assert(Flags != nullptr && "Flags cannot be null.")((Flags != nullptr && "Flags cannot be null.") ? static_cast <void> (0) : __assert_fail ("Flags != nullptr && \"Flags cannot be null.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1115, __PRETTY_FUNCTION__)); | ||||||
1116 | |||||||
1117 | // Loads have extra constraints we have to verify before we can hoist them. | ||||||
1118 | if (LoadInst *LI
| ||||||
1119 | if (!LI->isUnordered()) | ||||||
1120 | return false; // Don't sink/hoist volatile or ordered atomic loads! | ||||||
1121 | |||||||
1122 | // Loads from constant memory are always safe to move, even if they end up | ||||||
1123 | // in the same alias set as something that ends up being modified. | ||||||
1124 | if (AA->pointsToConstantMemory(LI->getOperand(0))) | ||||||
1125 | return true; | ||||||
1126 | if (LI->hasMetadata(LLVMContext::MD_invariant_load)) | ||||||
1127 | return true; | ||||||
1128 | |||||||
1129 | if (LI->isAtomic() && !TargetExecutesOncePerLoop) | ||||||
1130 | return false; // Don't risk duplicating unordered loads | ||||||
1131 | |||||||
1132 | // This checks for an invariant.start dominating the load. | ||||||
1133 | if (isLoadInvariantInLoop(LI, DT, CurLoop)) | ||||||
1134 | return true; | ||||||
1135 | |||||||
1136 | bool Invalidated; | ||||||
1137 | if (CurAST) | ||||||
1138 | Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST, | ||||||
1139 | CurLoop, AA); | ||||||
1140 | else | ||||||
1141 | Invalidated = pointerInvalidatedByLoopWithMSSA( | ||||||
1142 | MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, *Flags); | ||||||
| |||||||
1143 | // Check loop-invariant address because this may also be a sinkable load | ||||||
1144 | // whose address is not necessarily loop-invariant. | ||||||
1145 | if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand())) | ||||||
1146 | ORE->emit([&]() { | ||||||
1147 | return OptimizationRemarkMissed( | ||||||
1148 | DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressInvalidated", LI) | ||||||
1149 | << "failed to move load with loop-invariant address " | ||||||
1150 | "because the loop may invalidate its value"; | ||||||
1151 | }); | ||||||
1152 | |||||||
1153 | return !Invalidated; | ||||||
1154 | } else if (CallInst *CI = dyn_cast<CallInst>(&I)) { | ||||||
1155 | // Don't sink or hoist dbg info; it's legal, but not useful. | ||||||
1156 | if (isa<DbgInfoIntrinsic>(I)) | ||||||
1157 | return false; | ||||||
1158 | |||||||
1159 | // Don't sink calls which can throw. | ||||||
1160 | if (CI->mayThrow()) | ||||||
1161 | return false; | ||||||
1162 | |||||||
1163 | using namespace PatternMatch; | ||||||
1164 | if (match(CI, m_Intrinsic<Intrinsic::assume>())) | ||||||
1165 | // Assumes don't actually alias anything or throw | ||||||
1166 | return true; | ||||||
1167 | |||||||
1168 | if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>())) | ||||||
1169 | // Widenable conditions don't actually alias anything or throw | ||||||
1170 | return true; | ||||||
1171 | |||||||
1172 | // Handle simple cases by querying alias analysis. | ||||||
1173 | FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI); | ||||||
1174 | if (Behavior == FMRB_DoesNotAccessMemory) | ||||||
1175 | return true; | ||||||
1176 | if (AAResults::onlyReadsMemory(Behavior)) { | ||||||
1177 | // A readonly argmemonly function only reads from memory pointed to by | ||||||
1178 | // it's arguments with arbitrary offsets. If we can prove there are no | ||||||
1179 | // writes to this memory in the loop, we can hoist or sink. | ||||||
1180 | if (AAResults::onlyAccessesArgPointees(Behavior)) { | ||||||
1181 | // TODO: expand to writeable arguments | ||||||
1182 | for (Value *Op : CI->arg_operands()) | ||||||
1183 | if (Op->getType()->isPointerTy()) { | ||||||
1184 | bool Invalidated; | ||||||
1185 | if (CurAST) | ||||||
1186 | Invalidated = pointerInvalidatedByLoop( | ||||||
1187 | MemoryLocation(Op, LocationSize::unknown(), AAMDNodes()), | ||||||
1188 | CurAST, CurLoop, AA); | ||||||
1189 | else | ||||||
1190 | Invalidated = pointerInvalidatedByLoopWithMSSA( | ||||||
1191 | MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop, | ||||||
1192 | *Flags); | ||||||
1193 | if (Invalidated) | ||||||
1194 | return false; | ||||||
1195 | } | ||||||
1196 | return true; | ||||||
1197 | } | ||||||
1198 | |||||||
1199 | // If this call only reads from memory and there are no writes to memory | ||||||
1200 | // in the loop, we can hoist or sink the call as appropriate. | ||||||
1201 | if (isReadOnly(CurAST, MSSAU, CurLoop)) | ||||||
1202 | return true; | ||||||
1203 | } | ||||||
1204 | |||||||
1205 | // FIXME: This should use mod/ref information to see if we can hoist or | ||||||
1206 | // sink the call. | ||||||
1207 | |||||||
1208 | return false; | ||||||
1209 | } else if (auto *FI = dyn_cast<FenceInst>(&I)) { | ||||||
1210 | // Fences alias (most) everything to provide ordering. For the moment, | ||||||
1211 | // just give up if there are any other memory operations in the loop. | ||||||
1212 | if (CurAST) { | ||||||
1213 | auto Begin = CurAST->begin(); | ||||||
1214 | assert(Begin != CurAST->end() && "must contain FI")((Begin != CurAST->end() && "must contain FI") ? static_cast <void> (0) : __assert_fail ("Begin != CurAST->end() && \"must contain FI\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1214, __PRETTY_FUNCTION__)); | ||||||
1215 | if (std::next(Begin) != CurAST->end()) | ||||||
1216 | // constant memory for instance, TODO: handle better | ||||||
1217 | return false; | ||||||
1218 | auto *UniqueI = Begin->getUniqueInstruction(); | ||||||
1219 | if (!UniqueI) | ||||||
1220 | // other memory op, give up | ||||||
1221 | return false; | ||||||
1222 | (void)FI; // suppress unused variable warning | ||||||
1223 | assert(UniqueI == FI && "AS must contain FI")((UniqueI == FI && "AS must contain FI") ? static_cast <void> (0) : __assert_fail ("UniqueI == FI && \"AS must contain FI\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1223, __PRETTY_FUNCTION__)); | ||||||
1224 | return true; | ||||||
1225 | } else // MSSAU | ||||||
1226 | return isOnlyMemoryAccess(FI, CurLoop, MSSAU); | ||||||
1227 | } else if (auto *SI = dyn_cast<StoreInst>(&I)) { | ||||||
1228 | if (!SI->isUnordered()) | ||||||
1229 | return false; // Don't sink/hoist volatile or ordered atomic store! | ||||||
1230 | |||||||
1231 | // We can only hoist a store that we can prove writes a value which is not | ||||||
1232 | // read or overwritten within the loop. For those cases, we fallback to | ||||||
1233 | // load store promotion instead. TODO: We can extend this to cases where | ||||||
1234 | // there is exactly one write to the location and that write dominates an | ||||||
1235 | // arbitrary number of reads in the loop. | ||||||
1236 | if (CurAST) { | ||||||
1237 | auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI)); | ||||||
1238 | |||||||
1239 | if (AS.isRef() || !AS.isMustAlias()) | ||||||
1240 | // Quick exit test, handled by the full path below as well. | ||||||
1241 | return false; | ||||||
1242 | auto *UniqueI = AS.getUniqueInstruction(); | ||||||
1243 | if (!UniqueI) | ||||||
1244 | // other memory op, give up | ||||||
1245 | return false; | ||||||
1246 | assert(UniqueI == SI && "AS must contain SI")((UniqueI == SI && "AS must contain SI") ? static_cast <void> (0) : __assert_fail ("UniqueI == SI && \"AS must contain SI\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1246, __PRETTY_FUNCTION__)); | ||||||
1247 | return true; | ||||||
1248 | } else { // MSSAU | ||||||
1249 | if (isOnlyMemoryAccess(SI, CurLoop, MSSAU)) | ||||||
1250 | return true; | ||||||
1251 | // If there are more accesses than the Promotion cap, give up, we're not | ||||||
1252 | // walking a list that long. | ||||||
1253 | if (Flags->NoOfMemAccTooLarge) | ||||||
1254 | return false; | ||||||
1255 | // Check store only if there's still "quota" to check clobber. | ||||||
1256 | if (Flags->LicmMssaOptCounter >= Flags->LicmMssaOptCap) | ||||||
1257 | return false; | ||||||
1258 | // If there are interfering Uses (i.e. their defining access is in the | ||||||
1259 | // loop), or ordered loads (stored as Defs!), don't move this store. | ||||||
1260 | // Could do better here, but this is conservatively correct. | ||||||
1261 | // TODO: Cache set of Uses on the first walk in runOnLoop, update when | ||||||
1262 | // moving accesses. Can also extend to dominating uses. | ||||||
1263 | auto *SIMD = MSSA->getMemoryAccess(SI); | ||||||
1264 | for (auto *BB : CurLoop->getBlocks()) | ||||||
1265 | if (auto *Accesses = MSSA->getBlockAccesses(BB)) { | ||||||
1266 | for (const auto &MA : *Accesses) | ||||||
1267 | if (const auto *MU = dyn_cast<MemoryUse>(&MA)) { | ||||||
1268 | auto *MD = MU->getDefiningAccess(); | ||||||
1269 | if (!MSSA->isLiveOnEntryDef(MD) && | ||||||
1270 | CurLoop->contains(MD->getBlock())) | ||||||
1271 | return false; | ||||||
1272 | // Disable hoisting past potentially interfering loads. Optimized | ||||||
1273 | // Uses may point to an access outside the loop, as getClobbering | ||||||
1274 | // checks the previous iteration when walking the backedge. | ||||||
1275 | // FIXME: More precise: no Uses that alias SI. | ||||||
1276 | if (!Flags->IsSink && !MSSA->dominates(SIMD, MU)) | ||||||
1277 | return false; | ||||||
1278 | } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) { | ||||||
1279 | if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) { | ||||||
1280 | (void)LI; // Silence warning. | ||||||
1281 | assert(!LI->isUnordered() && "Expected unordered load")((!LI->isUnordered() && "Expected unordered load") ? static_cast<void> (0) : __assert_fail ("!LI->isUnordered() && \"Expected unordered load\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1281, __PRETTY_FUNCTION__)); | ||||||
1282 | return false; | ||||||
1283 | } | ||||||
1284 | // Any call, while it may not be clobbering SI, it may be a use. | ||||||
1285 | if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) { | ||||||
1286 | // Check if the call may read from the memory locattion written | ||||||
1287 | // to by SI. Check CI's attributes and arguments; the number of | ||||||
1288 | // such checks performed is limited above by NoOfMemAccTooLarge. | ||||||
1289 | ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI)); | ||||||
1290 | if (isModOrRefSet(MRI)) | ||||||
1291 | return false; | ||||||
1292 | } | ||||||
1293 | } | ||||||
1294 | } | ||||||
1295 | |||||||
1296 | auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI); | ||||||
1297 | Flags->LicmMssaOptCounter++; | ||||||
1298 | // If there are no clobbering Defs in the loop, store is safe to hoist. | ||||||
1299 | return MSSA->isLiveOnEntryDef(Source) || | ||||||
1300 | !CurLoop->contains(Source->getBlock()); | ||||||
1301 | } | ||||||
1302 | } | ||||||
1303 | |||||||
1304 | assert(!I.mayReadOrWriteMemory() && "unhandled aliasing")((!I.mayReadOrWriteMemory() && "unhandled aliasing") ? static_cast<void> (0) : __assert_fail ("!I.mayReadOrWriteMemory() && \"unhandled aliasing\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1304, __PRETTY_FUNCTION__)); | ||||||
1305 | |||||||
1306 | // We've established mechanical ability and aliasing, it's up to the caller | ||||||
1307 | // to check fault safety | ||||||
1308 | return true; | ||||||
1309 | } | ||||||
1310 | |||||||
1311 | /// Returns true if a PHINode is a trivially replaceable with an | ||||||
1312 | /// Instruction. | ||||||
1313 | /// This is true when all incoming values are that instruction. | ||||||
1314 | /// This pattern occurs most often with LCSSA PHI nodes. | ||||||
1315 | /// | ||||||
1316 | static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) { | ||||||
1317 | for (const Value *IncValue : PN.incoming_values()) | ||||||
1318 | if (IncValue != &I) | ||||||
1319 | return false; | ||||||
1320 | |||||||
1321 | return true; | ||||||
1322 | } | ||||||
1323 | |||||||
1324 | /// Return true if the instruction is free in the loop. | ||||||
1325 | static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop, | ||||||
1326 | const TargetTransformInfo *TTI) { | ||||||
1327 | |||||||
1328 | if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) { | ||||||
1329 | if (TTI->getUserCost(GEP, TargetTransformInfo::TCK_SizeAndLatency) != | ||||||
1330 | TargetTransformInfo::TCC_Free) | ||||||
1331 | return false; | ||||||
1332 | // For a GEP, we cannot simply use getUserCost because currently it | ||||||
1333 | // optimistically assume that a GEP will fold into addressing mode | ||||||
1334 | // regardless of its users. | ||||||
1335 | const BasicBlock *BB = GEP->getParent(); | ||||||
1336 | for (const User *U : GEP->users()) { | ||||||
1337 | const Instruction *UI = cast<Instruction>(U); | ||||||
1338 | if (CurLoop->contains(UI) && | ||||||
1339 | (BB != UI->getParent() || | ||||||
1340 | (!isa<StoreInst>(UI) && !isa<LoadInst>(UI)))) | ||||||
1341 | return false; | ||||||
1342 | } | ||||||
1343 | return true; | ||||||
1344 | } else | ||||||
1345 | return TTI->getUserCost(&I, TargetTransformInfo::TCK_SizeAndLatency) == | ||||||
1346 | TargetTransformInfo::TCC_Free; | ||||||
1347 | } | ||||||
1348 | |||||||
1349 | /// Return true if the only users of this instruction are outside of | ||||||
1350 | /// the loop. If this is true, we can sink the instruction to the exit | ||||||
1351 | /// blocks of the loop. | ||||||
1352 | /// | ||||||
1353 | /// We also return true if the instruction could be folded away in lowering. | ||||||
1354 | /// (e.g., a GEP can be folded into a load as an addressing mode in the loop). | ||||||
1355 | static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop, | ||||||
1356 | const LoopSafetyInfo *SafetyInfo, | ||||||
1357 | TargetTransformInfo *TTI, bool &FreeInLoop) { | ||||||
1358 | const auto &BlockColors = SafetyInfo->getBlockColors(); | ||||||
1359 | bool IsFree = isFreeInLoop(I, CurLoop, TTI); | ||||||
1360 | for (const User *U : I.users()) { | ||||||
1361 | const Instruction *UI = cast<Instruction>(U); | ||||||
1362 | if (const PHINode *PN = dyn_cast<PHINode>(UI)) { | ||||||
1363 | const BasicBlock *BB = PN->getParent(); | ||||||
1364 | // We cannot sink uses in catchswitches. | ||||||
1365 | if (isa<CatchSwitchInst>(BB->getTerminator())) | ||||||
1366 | return false; | ||||||
1367 | |||||||
1368 | // We need to sink a callsite to a unique funclet. Avoid sinking if the | ||||||
1369 | // phi use is too muddled. | ||||||
1370 | if (isa<CallInst>(I)) | ||||||
1371 | if (!BlockColors.empty() && | ||||||
1372 | BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1) | ||||||
1373 | return false; | ||||||
1374 | } | ||||||
1375 | |||||||
1376 | if (CurLoop->contains(UI)) { | ||||||
1377 | if (IsFree) { | ||||||
1378 | FreeInLoop = true; | ||||||
1379 | continue; | ||||||
1380 | } | ||||||
1381 | return false; | ||||||
1382 | } | ||||||
1383 | } | ||||||
1384 | return true; | ||||||
1385 | } | ||||||
1386 | |||||||
1387 | static Instruction *cloneInstructionInExitBlock( | ||||||
1388 | Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI, | ||||||
1389 | const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) { | ||||||
1390 | Instruction *New; | ||||||
1391 | if (auto *CI = dyn_cast<CallInst>(&I)) { | ||||||
1392 | const auto &BlockColors = SafetyInfo->getBlockColors(); | ||||||
1393 | |||||||
1394 | // Sinking call-sites need to be handled differently from other | ||||||
1395 | // instructions. The cloned call-site needs a funclet bundle operand | ||||||
1396 | // appropriate for its location in the CFG. | ||||||
1397 | SmallVector<OperandBundleDef, 1> OpBundles; | ||||||
1398 | for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles(); | ||||||
1399 | BundleIdx != BundleEnd; ++BundleIdx) { | ||||||
1400 | OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx); | ||||||
1401 | if (Bundle.getTagID() == LLVMContext::OB_funclet) | ||||||
1402 | continue; | ||||||
1403 | |||||||
1404 | OpBundles.emplace_back(Bundle); | ||||||
1405 | } | ||||||
1406 | |||||||
1407 | if (!BlockColors.empty()) { | ||||||
1408 | const ColorVector &CV = BlockColors.find(&ExitBlock)->second; | ||||||
1409 | assert(CV.size() == 1 && "non-unique color for exit block!")((CV.size() == 1 && "non-unique color for exit block!" ) ? static_cast<void> (0) : __assert_fail ("CV.size() == 1 && \"non-unique color for exit block!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1409, __PRETTY_FUNCTION__)); | ||||||
1410 | BasicBlock *BBColor = CV.front(); | ||||||
1411 | Instruction *EHPad = BBColor->getFirstNonPHI(); | ||||||
1412 | if (EHPad->isEHPad()) | ||||||
1413 | OpBundles.emplace_back("funclet", EHPad); | ||||||
1414 | } | ||||||
1415 | |||||||
1416 | New = CallInst::Create(CI, OpBundles); | ||||||
1417 | } else { | ||||||
1418 | New = I.clone(); | ||||||
1419 | } | ||||||
1420 | |||||||
1421 | ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New); | ||||||
1422 | if (!I.getName().empty()) | ||||||
1423 | New->setName(I.getName() + ".le"); | ||||||
1424 | |||||||
1425 | if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) { | ||||||
1426 | // Create a new MemoryAccess and let MemorySSA set its defining access. | ||||||
1427 | MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB( | ||||||
1428 | New, nullptr, New->getParent(), MemorySSA::Beginning); | ||||||
1429 | if (NewMemAcc) { | ||||||
1430 | if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc)) | ||||||
1431 | MSSAU->insertDef(MemDef, /*RenameUses=*/true); | ||||||
1432 | else { | ||||||
1433 | auto *MemUse = cast<MemoryUse>(NewMemAcc); | ||||||
1434 | MSSAU->insertUse(MemUse, /*RenameUses=*/true); | ||||||
1435 | } | ||||||
1436 | } | ||||||
1437 | } | ||||||
1438 | |||||||
1439 | // Build LCSSA PHI nodes for any in-loop operands. Note that this is | ||||||
1440 | // particularly cheap because we can rip off the PHI node that we're | ||||||
1441 | // replacing for the number and blocks of the predecessors. | ||||||
1442 | // OPT: If this shows up in a profile, we can instead finish sinking all | ||||||
1443 | // invariant instructions, and then walk their operands to re-establish | ||||||
1444 | // LCSSA. That will eliminate creating PHI nodes just to nuke them when | ||||||
1445 | // sinking bottom-up. | ||||||
1446 | for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE; | ||||||
1447 | ++OI) | ||||||
1448 | if (Instruction *OInst = dyn_cast<Instruction>(*OI)) | ||||||
1449 | if (Loop *OLoop = LI->getLoopFor(OInst->getParent())) | ||||||
1450 | if (!OLoop->contains(&PN)) { | ||||||
1451 | PHINode *OpPN = | ||||||
1452 | PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), | ||||||
1453 | OInst->getName() + ".lcssa", &ExitBlock.front()); | ||||||
1454 | for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) | ||||||
1455 | OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); | ||||||
1456 | *OI = OpPN; | ||||||
1457 | } | ||||||
1458 | return New; | ||||||
1459 | } | ||||||
1460 | |||||||
1461 | static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo, | ||||||
1462 | AliasSetTracker *AST, MemorySSAUpdater *MSSAU) { | ||||||
1463 | if (AST) | ||||||
1464 | AST->deleteValue(&I); | ||||||
1465 | if (MSSAU) | ||||||
1466 | MSSAU->removeMemoryAccess(&I); | ||||||
1467 | SafetyInfo.removeInstruction(&I); | ||||||
1468 | I.eraseFromParent(); | ||||||
1469 | } | ||||||
1470 | |||||||
1471 | static void moveInstructionBefore(Instruction &I, Instruction &Dest, | ||||||
1472 | ICFLoopSafetyInfo &SafetyInfo, | ||||||
1473 | MemorySSAUpdater *MSSAU, | ||||||
1474 | ScalarEvolution *SE) { | ||||||
1475 | SafetyInfo.removeInstruction(&I); | ||||||
1476 | SafetyInfo.insertInstructionTo(&I, Dest.getParent()); | ||||||
1477 | I.moveBefore(&Dest); | ||||||
1478 | if (MSSAU) | ||||||
1479 | if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>( | ||||||
1480 | MSSAU->getMemorySSA()->getMemoryAccess(&I))) | ||||||
1481 | MSSAU->moveToPlace(OldMemAcc, Dest.getParent(), | ||||||
1482 | MemorySSA::BeforeTerminator); | ||||||
1483 | if (SE) | ||||||
1484 | SE->forgetValue(&I); | ||||||
1485 | } | ||||||
1486 | |||||||
1487 | static Instruction *sinkThroughTriviallyReplaceablePHI( | ||||||
1488 | PHINode *TPN, Instruction *I, LoopInfo *LI, | ||||||
1489 | SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies, | ||||||
1490 | const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop, | ||||||
1491 | MemorySSAUpdater *MSSAU) { | ||||||
1492 | assert(isTriviallyReplaceablePHI(*TPN, *I) &&((isTriviallyReplaceablePHI(*TPN, *I) && "Expect only trivially replaceable PHI" ) ? static_cast<void> (0) : __assert_fail ("isTriviallyReplaceablePHI(*TPN, *I) && \"Expect only trivially replaceable PHI\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1493, __PRETTY_FUNCTION__)) | ||||||
1493 | "Expect only trivially replaceable PHI")((isTriviallyReplaceablePHI(*TPN, *I) && "Expect only trivially replaceable PHI" ) ? static_cast<void> (0) : __assert_fail ("isTriviallyReplaceablePHI(*TPN, *I) && \"Expect only trivially replaceable PHI\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1493, __PRETTY_FUNCTION__)); | ||||||
1494 | BasicBlock *ExitBlock = TPN->getParent(); | ||||||
1495 | Instruction *New; | ||||||
1496 | auto It = SunkCopies.find(ExitBlock); | ||||||
1497 | if (It != SunkCopies.end()) | ||||||
1498 | New = It->second; | ||||||
1499 | else | ||||||
1500 | New = SunkCopies[ExitBlock] = cloneInstructionInExitBlock( | ||||||
1501 | *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU); | ||||||
1502 | return New; | ||||||
1503 | } | ||||||
1504 | |||||||
1505 | static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) { | ||||||
1506 | BasicBlock *BB = PN->getParent(); | ||||||
1507 | if (!BB->canSplitPredecessors()) | ||||||
1508 | return false; | ||||||
1509 | // It's not impossible to split EHPad blocks, but if BlockColors already exist | ||||||
1510 | // it require updating BlockColors for all offspring blocks accordingly. By | ||||||
1511 | // skipping such corner case, we can make updating BlockColors after splitting | ||||||
1512 | // predecessor fairly simple. | ||||||
1513 | if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad()) | ||||||
1514 | return false; | ||||||
1515 | for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) { | ||||||
1516 | BasicBlock *BBPred = *PI; | ||||||
1517 | if (isa<IndirectBrInst>(BBPred->getTerminator()) || | ||||||
1518 | isa<CallBrInst>(BBPred->getTerminator())) | ||||||
1519 | return false; | ||||||
1520 | } | ||||||
1521 | return true; | ||||||
1522 | } | ||||||
1523 | |||||||
1524 | static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT, | ||||||
1525 | LoopInfo *LI, const Loop *CurLoop, | ||||||
1526 | LoopSafetyInfo *SafetyInfo, | ||||||
1527 | MemorySSAUpdater *MSSAU) { | ||||||
1528 | #ifndef NDEBUG | ||||||
1529 | SmallVector<BasicBlock *, 32> ExitBlocks; | ||||||
1530 | CurLoop->getUniqueExitBlocks(ExitBlocks); | ||||||
1531 | SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), | ||||||
1532 | ExitBlocks.end()); | ||||||
1533 | #endif | ||||||
1534 | BasicBlock *ExitBB = PN->getParent(); | ||||||
1535 | assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.")((ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block." ) ? static_cast<void> (0) : __assert_fail ("ExitBlockSet.count(ExitBB) && \"Expect the PHI is in an exit block.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1535, __PRETTY_FUNCTION__)); | ||||||
1536 | |||||||
1537 | // Split predecessors of the loop exit to make instructions in the loop are | ||||||
1538 | // exposed to exit blocks through trivially replaceable PHIs while keeping the | ||||||
1539 | // loop in the canonical form where each predecessor of each exit block should | ||||||
1540 | // be contained within the loop. For example, this will convert the loop below | ||||||
1541 | // from | ||||||
1542 | // | ||||||
1543 | // LB1: | ||||||
1544 | // %v1 = | ||||||
1545 | // br %LE, %LB2 | ||||||
1546 | // LB2: | ||||||
1547 | // %v2 = | ||||||
1548 | // br %LE, %LB1 | ||||||
1549 | // LE: | ||||||
1550 | // %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable | ||||||
1551 | // | ||||||
1552 | // to | ||||||
1553 | // | ||||||
1554 | // LB1: | ||||||
1555 | // %v1 = | ||||||
1556 | // br %LE.split, %LB2 | ||||||
1557 | // LB2: | ||||||
1558 | // %v2 = | ||||||
1559 | // br %LE.split2, %LB1 | ||||||
1560 | // LE.split: | ||||||
1561 | // %p1 = phi [%v1, %LB1] <-- trivially replaceable | ||||||
1562 | // br %LE | ||||||
1563 | // LE.split2: | ||||||
1564 | // %p2 = phi [%v2, %LB2] <-- trivially replaceable | ||||||
1565 | // br %LE | ||||||
1566 | // LE: | ||||||
1567 | // %p = phi [%p1, %LE.split], [%p2, %LE.split2] | ||||||
1568 | // | ||||||
1569 | const auto &BlockColors = SafetyInfo->getBlockColors(); | ||||||
1570 | SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB)); | ||||||
1571 | while (!PredBBs.empty()) { | ||||||
1572 | BasicBlock *PredBB = *PredBBs.begin(); | ||||||
1573 | assert(CurLoop->contains(PredBB) &&((CurLoop->contains(PredBB) && "Expect all predecessors are in the loop" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->contains(PredBB) && \"Expect all predecessors are in the loop\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1574, __PRETTY_FUNCTION__)) | ||||||
1574 | "Expect all predecessors are in the loop")((CurLoop->contains(PredBB) && "Expect all predecessors are in the loop" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->contains(PredBB) && \"Expect all predecessors are in the loop\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1574, __PRETTY_FUNCTION__)); | ||||||
1575 | if (PN->getBasicBlockIndex(PredBB) >= 0) { | ||||||
1576 | BasicBlock *NewPred = SplitBlockPredecessors( | ||||||
1577 | ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true); | ||||||
1578 | // Since we do not allow splitting EH-block with BlockColors in | ||||||
1579 | // canSplitPredecessors(), we can simply assign predecessor's color to | ||||||
1580 | // the new block. | ||||||
1581 | if (!BlockColors.empty()) | ||||||
1582 | // Grab a reference to the ColorVector to be inserted before getting the | ||||||
1583 | // reference to the vector we are copying because inserting the new | ||||||
1584 | // element in BlockColors might cause the map to be reallocated. | ||||||
1585 | SafetyInfo->copyColors(NewPred, PredBB); | ||||||
1586 | } | ||||||
1587 | PredBBs.remove(PredBB); | ||||||
1588 | } | ||||||
1589 | } | ||||||
1590 | |||||||
1591 | /// When an instruction is found to only be used outside of the loop, this | ||||||
1592 | /// function moves it to the exit blocks and patches up SSA form as needed. | ||||||
1593 | /// This method is guaranteed to remove the original instruction from its | ||||||
1594 | /// position, and may either delete it or move it to outside of the loop. | ||||||
1595 | /// | ||||||
1596 | static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT, | ||||||
1597 | BlockFrequencyInfo *BFI, const Loop *CurLoop, | ||||||
1598 | ICFLoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU, | ||||||
1599 | OptimizationRemarkEmitter *ORE) { | ||||||
1600 | LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM sinking instruction: " << I << "\n"; } } while (false); | ||||||
1601 | ORE->emit([&]() { | ||||||
1602 | return OptimizationRemark(DEBUG_TYPE"licm", "InstSunk", &I) | ||||||
1603 | << "sinking " << ore::NV("Inst", &I); | ||||||
1604 | }); | ||||||
1605 | bool Changed = false; | ||||||
1606 | if (isa<LoadInst>(I)) | ||||||
1607 | ++NumMovedLoads; | ||||||
1608 | else if (isa<CallInst>(I)) | ||||||
1609 | ++NumMovedCalls; | ||||||
1610 | ++NumSunk; | ||||||
1611 | |||||||
1612 | // Iterate over users to be ready for actual sinking. Replace users via | ||||||
1613 | // unreachable blocks with undef and make all user PHIs trivially replaceable. | ||||||
1614 | SmallPtrSet<Instruction *, 8> VisitedUsers; | ||||||
1615 | for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) { | ||||||
1616 | auto *User = cast<Instruction>(*UI); | ||||||
1617 | Use &U = UI.getUse(); | ||||||
1618 | ++UI; | ||||||
1619 | |||||||
1620 | if (VisitedUsers.count(User) || CurLoop->contains(User)) | ||||||
1621 | continue; | ||||||
1622 | |||||||
1623 | if (!DT->isReachableFromEntry(User->getParent())) { | ||||||
1624 | U = UndefValue::get(I.getType()); | ||||||
1625 | Changed = true; | ||||||
1626 | continue; | ||||||
1627 | } | ||||||
1628 | |||||||
1629 | // The user must be a PHI node. | ||||||
1630 | PHINode *PN = cast<PHINode>(User); | ||||||
1631 | |||||||
1632 | // Surprisingly, instructions can be used outside of loops without any | ||||||
1633 | // exits. This can only happen in PHI nodes if the incoming block is | ||||||
1634 | // unreachable. | ||||||
1635 | BasicBlock *BB = PN->getIncomingBlock(U); | ||||||
1636 | if (!DT->isReachableFromEntry(BB)) { | ||||||
1637 | U = UndefValue::get(I.getType()); | ||||||
1638 | Changed = true; | ||||||
1639 | continue; | ||||||
1640 | } | ||||||
1641 | |||||||
1642 | VisitedUsers.insert(PN); | ||||||
1643 | if (isTriviallyReplaceablePHI(*PN, I)) | ||||||
1644 | continue; | ||||||
1645 | |||||||
1646 | if (!canSplitPredecessors(PN, SafetyInfo)) | ||||||
1647 | return Changed; | ||||||
1648 | |||||||
1649 | // Split predecessors of the PHI so that we can make users trivially | ||||||
1650 | // replaceable. | ||||||
1651 | splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU); | ||||||
1652 | |||||||
1653 | // Should rebuild the iterators, as they may be invalidated by | ||||||
1654 | // splitPredecessorsOfLoopExit(). | ||||||
1655 | UI = I.user_begin(); | ||||||
1656 | UE = I.user_end(); | ||||||
1657 | } | ||||||
1658 | |||||||
1659 | if (VisitedUsers.empty()) | ||||||
1660 | return Changed; | ||||||
1661 | |||||||
1662 | #ifndef NDEBUG | ||||||
1663 | SmallVector<BasicBlock *, 32> ExitBlocks; | ||||||
1664 | CurLoop->getUniqueExitBlocks(ExitBlocks); | ||||||
1665 | SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(), | ||||||
1666 | ExitBlocks.end()); | ||||||
1667 | #endif | ||||||
1668 | |||||||
1669 | // Clones of this instruction. Don't create more than one per exit block! | ||||||
1670 | SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies; | ||||||
1671 | |||||||
1672 | // If this instruction is only used outside of the loop, then all users are | ||||||
1673 | // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of | ||||||
1674 | // the instruction. | ||||||
1675 | // First check if I is worth sinking for all uses. Sink only when it is worth | ||||||
1676 | // across all uses. | ||||||
1677 | SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end()); | ||||||
1678 | SmallVector<PHINode *, 8> ExitPNs; | ||||||
1679 | for (auto *UI : Users) { | ||||||
1680 | auto *User = cast<Instruction>(UI); | ||||||
1681 | |||||||
1682 | if (CurLoop->contains(User)) | ||||||
1683 | continue; | ||||||
1684 | |||||||
1685 | PHINode *PN = cast<PHINode>(User); | ||||||
1686 | assert(ExitBlockSet.count(PN->getParent()) &&((ExitBlockSet.count(PN->getParent()) && "The LCSSA PHI is not in an exit block!" ) ? static_cast<void> (0) : __assert_fail ("ExitBlockSet.count(PN->getParent()) && \"The LCSSA PHI is not in an exit block!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1687, __PRETTY_FUNCTION__)) | ||||||
1687 | "The LCSSA PHI is not in an exit block!")((ExitBlockSet.count(PN->getParent()) && "The LCSSA PHI is not in an exit block!" ) ? static_cast<void> (0) : __assert_fail ("ExitBlockSet.count(PN->getParent()) && \"The LCSSA PHI is not in an exit block!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1687, __PRETTY_FUNCTION__)); | ||||||
1688 | if (!worthSinkOrHoistInst(I, PN->getParent(), ORE, BFI)) { | ||||||
1689 | return Changed; | ||||||
1690 | } | ||||||
1691 | |||||||
1692 | ExitPNs.push_back(PN); | ||||||
1693 | } | ||||||
1694 | |||||||
1695 | for (auto *PN : ExitPNs) { | ||||||
1696 | |||||||
1697 | // The PHI must be trivially replaceable. | ||||||
1698 | Instruction *New = sinkThroughTriviallyReplaceablePHI( | ||||||
1699 | PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU); | ||||||
1700 | PN->replaceAllUsesWith(New); | ||||||
1701 | eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr); | ||||||
1702 | Changed = true; | ||||||
1703 | } | ||||||
1704 | return Changed; | ||||||
1705 | } | ||||||
1706 | |||||||
1707 | /// When an instruction is found to only use loop invariant operands that | ||||||
1708 | /// is safe to hoist, this instruction is called to do the dirty work. | ||||||
1709 | /// | ||||||
1710 | static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop, | ||||||
1711 | BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo, | ||||||
1712 | MemorySSAUpdater *MSSAU, ScalarEvolution *SE, | ||||||
1713 | OptimizationRemarkEmitter *ORE) { | ||||||
1714 | LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getName() << ": " << Ido { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM hoisting to " << Dest ->getName() << ": " << I << "\n"; } } while (false) | ||||||
1715 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM hoisting to " << Dest ->getName() << ": " << I << "\n"; } } while (false); | ||||||
1716 | ORE->emit([&]() { | ||||||
1717 | return OptimizationRemark(DEBUG_TYPE"licm", "Hoisted", &I) << "hoisting " | ||||||
1718 | << ore::NV("Inst", &I); | ||||||
1719 | }); | ||||||
1720 | |||||||
1721 | // Metadata can be dependent on conditions we are hoisting above. | ||||||
1722 | // Conservatively strip all metadata on the instruction unless we were | ||||||
1723 | // guaranteed to execute I if we entered the loop, in which case the metadata | ||||||
1724 | // is valid in the loop preheader. | ||||||
1725 | if (I.hasMetadataOtherThanDebugLoc() && | ||||||
1726 | // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning | ||||||
1727 | // time in isGuaranteedToExecute if we don't actually have anything to | ||||||
1728 | // drop. It is a compile time optimization, not required for correctness. | ||||||
1729 | !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop)) | ||||||
1730 | I.dropUnknownNonDebugMetadata(); | ||||||
1731 | |||||||
1732 | if (isa<PHINode>(I)) | ||||||
1733 | // Move the new node to the end of the phi list in the destination block. | ||||||
1734 | moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE); | ||||||
1735 | else | ||||||
1736 | // Move the new node to the destination block, before its terminator. | ||||||
1737 | moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE); | ||||||
1738 | |||||||
1739 | I.updateLocationAfterHoist(); | ||||||
1740 | |||||||
1741 | if (isa<LoadInst>(I)) | ||||||
1742 | ++NumMovedLoads; | ||||||
1743 | else if (isa<CallInst>(I)) | ||||||
1744 | ++NumMovedCalls; | ||||||
1745 | ++NumHoisted; | ||||||
1746 | } | ||||||
1747 | |||||||
1748 | /// Only sink or hoist an instruction if it is not a trapping instruction, | ||||||
1749 | /// or if the instruction is known not to trap when moved to the preheader. | ||||||
1750 | /// or if it is a trapping instruction and is guaranteed to execute. | ||||||
1751 | static bool isSafeToExecuteUnconditionally(Instruction &Inst, | ||||||
1752 | const DominatorTree *DT, | ||||||
1753 | const Loop *CurLoop, | ||||||
1754 | const LoopSafetyInfo *SafetyInfo, | ||||||
1755 | OptimizationRemarkEmitter *ORE, | ||||||
1756 | const Instruction *CtxI) { | ||||||
1757 | if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT)) | ||||||
1758 | return true; | ||||||
1759 | |||||||
1760 | bool GuaranteedToExecute = | ||||||
1761 | SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop); | ||||||
1762 | |||||||
1763 | if (!GuaranteedToExecute) { | ||||||
1764 | auto *LI = dyn_cast<LoadInst>(&Inst); | ||||||
1765 | if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand())) | ||||||
1766 | ORE->emit([&]() { | ||||||
1767 | return OptimizationRemarkMissed( | ||||||
1768 | DEBUG_TYPE"licm", "LoadWithLoopInvariantAddressCondExecuted", LI) | ||||||
1769 | << "failed to hoist load with loop-invariant address " | ||||||
1770 | "because load is conditionally executed"; | ||||||
1771 | }); | ||||||
1772 | } | ||||||
1773 | |||||||
1774 | return GuaranteedToExecute; | ||||||
1775 | } | ||||||
1776 | |||||||
1777 | namespace { | ||||||
1778 | class LoopPromoter : public LoadAndStorePromoter { | ||||||
1779 | Value *SomePtr; // Designated pointer to store to. | ||||||
1780 | const SmallSetVector<Value *, 8> &PointerMustAliases; | ||||||
1781 | SmallVectorImpl<BasicBlock *> &LoopExitBlocks; | ||||||
1782 | SmallVectorImpl<Instruction *> &LoopInsertPts; | ||||||
1783 | SmallVectorImpl<MemoryAccess *> &MSSAInsertPts; | ||||||
1784 | PredIteratorCache &PredCache; | ||||||
1785 | AliasSetTracker *AST; | ||||||
1786 | MemorySSAUpdater *MSSAU; | ||||||
1787 | LoopInfo &LI; | ||||||
1788 | DebugLoc DL; | ||||||
1789 | int Alignment; | ||||||
1790 | bool UnorderedAtomic; | ||||||
1791 | AAMDNodes AATags; | ||||||
1792 | ICFLoopSafetyInfo &SafetyInfo; | ||||||
1793 | |||||||
1794 | Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const { | ||||||
1795 | if (Instruction *I = dyn_cast<Instruction>(V)) | ||||||
1796 | if (Loop *L = LI.getLoopFor(I->getParent())) | ||||||
1797 | if (!L->contains(BB)) { | ||||||
1798 | // We need to create an LCSSA PHI node for the incoming value and | ||||||
1799 | // store that. | ||||||
1800 | PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB), | ||||||
1801 | I->getName() + ".lcssa", &BB->front()); | ||||||
1802 | for (BasicBlock *Pred : PredCache.get(BB)) | ||||||
1803 | PN->addIncoming(I, Pred); | ||||||
1804 | return PN; | ||||||
1805 | } | ||||||
1806 | return V; | ||||||
1807 | } | ||||||
1808 | |||||||
1809 | public: | ||||||
1810 | LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S, | ||||||
1811 | const SmallSetVector<Value *, 8> &PMA, | ||||||
1812 | SmallVectorImpl<BasicBlock *> &LEB, | ||||||
1813 | SmallVectorImpl<Instruction *> &LIP, | ||||||
1814 | SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC, | ||||||
1815 | AliasSetTracker *ast, MemorySSAUpdater *MSSAU, LoopInfo &li, | ||||||
1816 | DebugLoc dl, int alignment, bool UnorderedAtomic, | ||||||
1817 | const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo) | ||||||
1818 | : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA), | ||||||
1819 | LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP), | ||||||
1820 | PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)), | ||||||
1821 | Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags), | ||||||
1822 | SafetyInfo(SafetyInfo) {} | ||||||
1823 | |||||||
1824 | bool isInstInList(Instruction *I, | ||||||
1825 | const SmallVectorImpl<Instruction *> &) const override { | ||||||
1826 | Value *Ptr; | ||||||
1827 | if (LoadInst *LI = dyn_cast<LoadInst>(I)) | ||||||
1828 | Ptr = LI->getOperand(0); | ||||||
1829 | else | ||||||
1830 | Ptr = cast<StoreInst>(I)->getPointerOperand(); | ||||||
1831 | return PointerMustAliases.count(Ptr); | ||||||
1832 | } | ||||||
1833 | |||||||
1834 | void doExtraRewritesBeforeFinalDeletion() override { | ||||||
1835 | // Insert stores after in the loop exit blocks. Each exit block gets a | ||||||
1836 | // store of the live-out values that feed them. Since we've already told | ||||||
1837 | // the SSA updater about the defs in the loop and the preheader | ||||||
1838 | // definition, it is all set and we can start using it. | ||||||
1839 | for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) { | ||||||
1840 | BasicBlock *ExitBlock = LoopExitBlocks[i]; | ||||||
1841 | Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock); | ||||||
1842 | LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock); | ||||||
1843 | Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock); | ||||||
1844 | Instruction *InsertPos = LoopInsertPts[i]; | ||||||
1845 | StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos); | ||||||
1846 | if (UnorderedAtomic) | ||||||
1847 | NewSI->setOrdering(AtomicOrdering::Unordered); | ||||||
1848 | NewSI->setAlignment(Align(Alignment)); | ||||||
1849 | NewSI->setDebugLoc(DL); | ||||||
1850 | if (AATags) | ||||||
1851 | NewSI->setAAMetadata(AATags); | ||||||
1852 | |||||||
1853 | if (MSSAU) { | ||||||
1854 | MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i]; | ||||||
1855 | MemoryAccess *NewMemAcc; | ||||||
1856 | if (!MSSAInsertPoint) { | ||||||
1857 | NewMemAcc = MSSAU->createMemoryAccessInBB( | ||||||
1858 | NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning); | ||||||
1859 | } else { | ||||||
1860 | NewMemAcc = | ||||||
1861 | MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint); | ||||||
1862 | } | ||||||
1863 | MSSAInsertPts[i] = NewMemAcc; | ||||||
1864 | MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true); | ||||||
1865 | // FIXME: true for safety, false may still be correct. | ||||||
1866 | } | ||||||
1867 | } | ||||||
1868 | } | ||||||
1869 | |||||||
1870 | void replaceLoadWithValue(LoadInst *LI, Value *V) const override { | ||||||
1871 | // Update alias analysis. | ||||||
1872 | if (AST) | ||||||
1873 | AST->copyValue(LI, V); | ||||||
1874 | } | ||||||
1875 | void instructionDeleted(Instruction *I) const override { | ||||||
1876 | SafetyInfo.removeInstruction(I); | ||||||
1877 | if (AST) | ||||||
1878 | AST->deleteValue(I); | ||||||
1879 | if (MSSAU) | ||||||
1880 | MSSAU->removeMemoryAccess(I); | ||||||
1881 | } | ||||||
1882 | }; | ||||||
1883 | |||||||
1884 | |||||||
1885 | /// Return true iff we can prove that a caller of this function can not inspect | ||||||
1886 | /// the contents of the provided object in a well defined program. | ||||||
1887 | bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) { | ||||||
1888 | if (isa<AllocaInst>(Object)) | ||||||
1889 | // Since the alloca goes out of scope, we know the caller can't retain a | ||||||
1890 | // reference to it and be well defined. Thus, we don't need to check for | ||||||
1891 | // capture. | ||||||
1892 | return true; | ||||||
1893 | |||||||
1894 | // For all other objects we need to know that the caller can't possibly | ||||||
1895 | // have gotten a reference to the object. There are two components of | ||||||
1896 | // that: | ||||||
1897 | // 1) Object can't be escaped by this function. This is what | ||||||
1898 | // PointerMayBeCaptured checks. | ||||||
1899 | // 2) Object can't have been captured at definition site. For this, we | ||||||
1900 | // need to know the return value is noalias. At the moment, we use a | ||||||
1901 | // weaker condition and handle only AllocLikeFunctions (which are | ||||||
1902 | // known to be noalias). TODO | ||||||
1903 | return isAllocLikeFn(Object, TLI) && | ||||||
1904 | !PointerMayBeCaptured(Object, true, true); | ||||||
1905 | } | ||||||
1906 | |||||||
1907 | } // namespace | ||||||
1908 | |||||||
1909 | /// Try to promote memory values to scalars by sinking stores out of the | ||||||
1910 | /// loop and moving loads to before the loop. We do this by looping over | ||||||
1911 | /// the stores in the loop, looking for stores to Must pointers which are | ||||||
1912 | /// loop invariant. | ||||||
1913 | /// | ||||||
1914 | bool llvm::promoteLoopAccessesToScalars( | ||||||
1915 | const SmallSetVector<Value *, 8> &PointerMustAliases, | ||||||
1916 | SmallVectorImpl<BasicBlock *> &ExitBlocks, | ||||||
1917 | SmallVectorImpl<Instruction *> &InsertPts, | ||||||
1918 | SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC, | ||||||
1919 | LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI, | ||||||
1920 | Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU, | ||||||
1921 | ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) { | ||||||
1922 | // Verify inputs. | ||||||
1923 | assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&((LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected Input to promoteLoopAccessesToScalars" ) ? static_cast<void> (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1925, __PRETTY_FUNCTION__)) | ||||||
1924 | SafetyInfo != nullptr &&((LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected Input to promoteLoopAccessesToScalars" ) ? static_cast<void> (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1925, __PRETTY_FUNCTION__)) | ||||||
1925 | "Unexpected Input to promoteLoopAccessesToScalars")((LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && "Unexpected Input to promoteLoopAccessesToScalars" ) ? static_cast<void> (0) : __assert_fail ("LI != nullptr && DT != nullptr && CurLoop != nullptr && SafetyInfo != nullptr && \"Unexpected Input to promoteLoopAccessesToScalars\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 1925, __PRETTY_FUNCTION__)); | ||||||
1926 | |||||||
1927 | Value *SomePtr = *PointerMustAliases.begin(); | ||||||
1928 | BasicBlock *Preheader = CurLoop->getLoopPreheader(); | ||||||
1929 | |||||||
1930 | // It is not safe to promote a load/store from the loop if the load/store is | ||||||
1931 | // conditional. For example, turning: | ||||||
1932 | // | ||||||
1933 | // for () { if (c) *P += 1; } | ||||||
1934 | // | ||||||
1935 | // into: | ||||||
1936 | // | ||||||
1937 | // tmp = *P; for () { if (c) tmp +=1; } *P = tmp; | ||||||
1938 | // | ||||||
1939 | // is not safe, because *P may only be valid to access if 'c' is true. | ||||||
1940 | // | ||||||
1941 | // The safety property divides into two parts: | ||||||
1942 | // p1) The memory may not be dereferenceable on entry to the loop. In this | ||||||
1943 | // case, we can't insert the required load in the preheader. | ||||||
1944 | // p2) The memory model does not allow us to insert a store along any dynamic | ||||||
1945 | // path which did not originally have one. | ||||||
1946 | // | ||||||
1947 | // If at least one store is guaranteed to execute, both properties are | ||||||
1948 | // satisfied, and promotion is legal. | ||||||
1949 | // | ||||||
1950 | // This, however, is not a necessary condition. Even if no store/load is | ||||||
1951 | // guaranteed to execute, we can still establish these properties. | ||||||
1952 | // We can establish (p1) by proving that hoisting the load into the preheader | ||||||
1953 | // is safe (i.e. proving dereferenceability on all paths through the loop). We | ||||||
1954 | // can use any access within the alias set to prove dereferenceability, | ||||||
1955 | // since they're all must alias. | ||||||
1956 | // | ||||||
1957 | // There are two ways establish (p2): | ||||||
1958 | // a) Prove the location is thread-local. In this case the memory model | ||||||
1959 | // requirement does not apply, and stores are safe to insert. | ||||||
1960 | // b) Prove a store dominates every exit block. In this case, if an exit | ||||||
1961 | // blocks is reached, the original dynamic path would have taken us through | ||||||
1962 | // the store, so inserting a store into the exit block is safe. Note that this | ||||||
1963 | // is different from the store being guaranteed to execute. For instance, | ||||||
1964 | // if an exception is thrown on the first iteration of the loop, the original | ||||||
1965 | // store is never executed, but the exit blocks are not executed either. | ||||||
1966 | |||||||
1967 | bool DereferenceableInPH = false; | ||||||
1968 | bool SafeToInsertStore = false; | ||||||
1969 | |||||||
1970 | SmallVector<Instruction *, 64> LoopUses; | ||||||
1971 | |||||||
1972 | // We start with an alignment of one and try to find instructions that allow | ||||||
1973 | // us to prove better alignment. | ||||||
1974 | Align Alignment; | ||||||
1975 | // Keep track of which types of access we see | ||||||
1976 | bool SawUnorderedAtomic = false; | ||||||
1977 | bool SawNotAtomic = false; | ||||||
1978 | AAMDNodes AATags; | ||||||
1979 | |||||||
1980 | const DataLayout &MDL = Preheader->getModule()->getDataLayout(); | ||||||
1981 | |||||||
1982 | bool IsKnownThreadLocalObject = false; | ||||||
1983 | if (SafetyInfo->anyBlockMayThrow()) { | ||||||
1984 | // If a loop can throw, we have to insert a store along each unwind edge. | ||||||
1985 | // That said, we can't actually make the unwind edge explicit. Therefore, | ||||||
1986 | // we have to prove that the store is dead along the unwind edge. We do | ||||||
1987 | // this by proving that the caller can't have a reference to the object | ||||||
1988 | // after return and thus can't possibly load from the object. | ||||||
1989 | Value *Object = getUnderlyingObject(SomePtr); | ||||||
1990 | if (!isKnownNonEscaping(Object, TLI)) | ||||||
1991 | return false; | ||||||
1992 | // Subtlety: Alloca's aren't visible to callers, but *are* potentially | ||||||
1993 | // visible to other threads if captured and used during their lifetimes. | ||||||
1994 | IsKnownThreadLocalObject = !isa<AllocaInst>(Object); | ||||||
1995 | } | ||||||
1996 | |||||||
1997 | // Check that all of the pointers in the alias set have the same type. We | ||||||
1998 | // cannot (yet) promote a memory location that is loaded and stored in | ||||||
1999 | // different sizes. While we are at it, collect alignment and AA info. | ||||||
2000 | for (Value *ASIV : PointerMustAliases) { | ||||||
2001 | // Check that all of the pointers in the alias set have the same type. We | ||||||
2002 | // cannot (yet) promote a memory location that is loaded and stored in | ||||||
2003 | // different sizes. | ||||||
2004 | if (SomePtr->getType() != ASIV->getType()) | ||||||
2005 | return false; | ||||||
2006 | |||||||
2007 | for (User *U : ASIV->users()) { | ||||||
2008 | // Ignore instructions that are outside the loop. | ||||||
2009 | Instruction *UI = dyn_cast<Instruction>(U); | ||||||
2010 | if (!UI || !CurLoop->contains(UI)) | ||||||
2011 | continue; | ||||||
2012 | |||||||
2013 | // If there is an non-load/store instruction in the loop, we can't promote | ||||||
2014 | // it. | ||||||
2015 | if (LoadInst *Load = dyn_cast<LoadInst>(UI)) { | ||||||
2016 | if (!Load->isUnordered()) | ||||||
2017 | return false; | ||||||
2018 | |||||||
2019 | SawUnorderedAtomic |= Load->isAtomic(); | ||||||
2020 | SawNotAtomic |= !Load->isAtomic(); | ||||||
2021 | |||||||
2022 | Align InstAlignment = Load->getAlign(); | ||||||
2023 | |||||||
2024 | // Note that proving a load safe to speculate requires proving | ||||||
2025 | // sufficient alignment at the target location. Proving it guaranteed | ||||||
2026 | // to execute does as well. Thus we can increase our guaranteed | ||||||
2027 | // alignment as well. | ||||||
2028 | if (!DereferenceableInPH || (InstAlignment > Alignment)) | ||||||
2029 | if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo, | ||||||
2030 | ORE, Preheader->getTerminator())) { | ||||||
2031 | DereferenceableInPH = true; | ||||||
2032 | Alignment = std::max(Alignment, InstAlignment); | ||||||
2033 | } | ||||||
2034 | } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) { | ||||||
2035 | // Stores *of* the pointer are not interesting, only stores *to* the | ||||||
2036 | // pointer. | ||||||
2037 | if (UI->getOperand(1) != ASIV) | ||||||
2038 | continue; | ||||||
2039 | if (!Store->isUnordered()) | ||||||
2040 | return false; | ||||||
2041 | |||||||
2042 | SawUnorderedAtomic |= Store->isAtomic(); | ||||||
2043 | SawNotAtomic |= !Store->isAtomic(); | ||||||
2044 | |||||||
2045 | // If the store is guaranteed to execute, both properties are satisfied. | ||||||
2046 | // We may want to check if a store is guaranteed to execute even if we | ||||||
2047 | // already know that promotion is safe, since it may have higher | ||||||
2048 | // alignment than any other guaranteed stores, in which case we can | ||||||
2049 | // raise the alignment on the promoted store. | ||||||
2050 | Align InstAlignment = Store->getAlign(); | ||||||
2051 | |||||||
2052 | if (!DereferenceableInPH || !SafeToInsertStore || | ||||||
2053 | (InstAlignment > Alignment)) { | ||||||
2054 | if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) { | ||||||
2055 | DereferenceableInPH = true; | ||||||
2056 | SafeToInsertStore = true; | ||||||
2057 | Alignment = std::max(Alignment, InstAlignment); | ||||||
2058 | } | ||||||
2059 | } | ||||||
2060 | |||||||
2061 | // If a store dominates all exit blocks, it is safe to sink. | ||||||
2062 | // As explained above, if an exit block was executed, a dominating | ||||||
2063 | // store must have been executed at least once, so we are not | ||||||
2064 | // introducing stores on paths that did not have them. | ||||||
2065 | // Note that this only looks at explicit exit blocks. If we ever | ||||||
2066 | // start sinking stores into unwind edges (see above), this will break. | ||||||
2067 | if (!SafeToInsertStore) | ||||||
2068 | SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) { | ||||||
2069 | return DT->dominates(Store->getParent(), Exit); | ||||||
2070 | }); | ||||||
2071 | |||||||
2072 | // If the store is not guaranteed to execute, we may still get | ||||||
2073 | // deref info through it. | ||||||
2074 | if (!DereferenceableInPH) { | ||||||
2075 | DereferenceableInPH = isDereferenceableAndAlignedPointer( | ||||||
2076 | Store->getPointerOperand(), Store->getValueOperand()->getType(), | ||||||
2077 | Store->getAlign(), MDL, Preheader->getTerminator(), DT); | ||||||
2078 | } | ||||||
2079 | } else | ||||||
2080 | return false; // Not a load or store. | ||||||
2081 | |||||||
2082 | // Merge the AA tags. | ||||||
2083 | if (LoopUses.empty()) { | ||||||
2084 | // On the first load/store, just take its AA tags. | ||||||
2085 | UI->getAAMetadata(AATags); | ||||||
2086 | } else if (AATags) { | ||||||
2087 | UI->getAAMetadata(AATags, /* Merge = */ true); | ||||||
2088 | } | ||||||
2089 | |||||||
2090 | LoopUses.push_back(UI); | ||||||
2091 | } | ||||||
2092 | } | ||||||
2093 | |||||||
2094 | // If we found both an unordered atomic instruction and a non-atomic memory | ||||||
2095 | // access, bail. We can't blindly promote non-atomic to atomic since we | ||||||
2096 | // might not be able to lower the result. We can't downgrade since that | ||||||
2097 | // would violate memory model. Also, align 0 is an error for atomics. | ||||||
2098 | if (SawUnorderedAtomic && SawNotAtomic) | ||||||
2099 | return false; | ||||||
2100 | |||||||
2101 | // If we're inserting an atomic load in the preheader, we must be able to | ||||||
2102 | // lower it. We're only guaranteed to be able to lower naturally aligned | ||||||
2103 | // atomics. | ||||||
2104 | auto *SomePtrElemType = SomePtr->getType()->getPointerElementType(); | ||||||
2105 | if (SawUnorderedAtomic && | ||||||
2106 | Alignment < MDL.getTypeStoreSize(SomePtrElemType)) | ||||||
2107 | return false; | ||||||
2108 | |||||||
2109 | // If we couldn't prove we can hoist the load, bail. | ||||||
2110 | if (!DereferenceableInPH) | ||||||
2111 | return false; | ||||||
2112 | |||||||
2113 | // We know we can hoist the load, but don't have a guaranteed store. | ||||||
2114 | // Check whether the location is thread-local. If it is, then we can insert | ||||||
2115 | // stores along paths which originally didn't have them without violating the | ||||||
2116 | // memory model. | ||||||
2117 | if (!SafeToInsertStore) { | ||||||
2118 | if (IsKnownThreadLocalObject) | ||||||
2119 | SafeToInsertStore = true; | ||||||
2120 | else { | ||||||
2121 | Value *Object = getUnderlyingObject(SomePtr); | ||||||
2122 | SafeToInsertStore = | ||||||
2123 | (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) && | ||||||
2124 | !PointerMayBeCaptured(Object, true, true); | ||||||
2125 | } | ||||||
2126 | } | ||||||
2127 | |||||||
2128 | // If we've still failed to prove we can sink the store, give up. | ||||||
2129 | if (!SafeToInsertStore) | ||||||
2130 | return false; | ||||||
2131 | |||||||
2132 | // Otherwise, this is safe to promote, lets do it! | ||||||
2133 | LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtrdo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr << '\n'; } } while (false) | ||||||
2134 | << '\n')do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr << '\n'; } } while (false); | ||||||
2135 | ORE->emit([&]() { | ||||||
2136 | return OptimizationRemark(DEBUG_TYPE"licm", "PromoteLoopAccessesToScalar", | ||||||
2137 | LoopUses[0]) | ||||||
2138 | << "Moving accesses to memory location out of the loop"; | ||||||
2139 | }); | ||||||
2140 | ++NumPromoted; | ||||||
2141 | |||||||
2142 | // Look at all the loop uses, and try to merge their locations. | ||||||
2143 | std::vector<const DILocation *> LoopUsesLocs; | ||||||
2144 | for (auto U : LoopUses) | ||||||
2145 | LoopUsesLocs.push_back(U->getDebugLoc().get()); | ||||||
2146 | auto DL = DebugLoc(DILocation::getMergedLocations(LoopUsesLocs)); | ||||||
2147 | |||||||
2148 | // We use the SSAUpdater interface to insert phi nodes as required. | ||||||
2149 | SmallVector<PHINode *, 16> NewPHIs; | ||||||
2150 | SSAUpdater SSA(&NewPHIs); | ||||||
2151 | LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks, | ||||||
2152 | InsertPts, MSSAInsertPts, PIC, CurAST, MSSAU, *LI, DL, | ||||||
2153 | Alignment.value(), SawUnorderedAtomic, AATags, | ||||||
2154 | *SafetyInfo); | ||||||
2155 | |||||||
2156 | // Set up the preheader to have a definition of the value. It is the live-out | ||||||
2157 | // value from the preheader that uses in the loop will use. | ||||||
2158 | LoadInst *PreheaderLoad = new LoadInst( | ||||||
2159 | SomePtr->getType()->getPointerElementType(), SomePtr, | ||||||
2160 | SomePtr->getName() + ".promoted", Preheader->getTerminator()); | ||||||
2161 | if (SawUnorderedAtomic) | ||||||
2162 | PreheaderLoad->setOrdering(AtomicOrdering::Unordered); | ||||||
2163 | PreheaderLoad->setAlignment(Alignment); | ||||||
2164 | PreheaderLoad->setDebugLoc(DebugLoc()); | ||||||
2165 | if (AATags) | ||||||
2166 | PreheaderLoad->setAAMetadata(AATags); | ||||||
2167 | SSA.AddAvailableValue(Preheader, PreheaderLoad); | ||||||
2168 | |||||||
2169 | if (MSSAU) { | ||||||
2170 | MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB( | ||||||
2171 | PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End); | ||||||
2172 | MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess); | ||||||
2173 | MSSAU->insertUse(NewMemUse, /*RenameUses=*/true); | ||||||
2174 | } | ||||||
2175 | |||||||
2176 | if (MSSAU && VerifyMemorySSA) | ||||||
2177 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||
2178 | // Rewrite all the loads in the loop and remember all the definitions from | ||||||
2179 | // stores in the loop. | ||||||
2180 | Promoter.run(LoopUses); | ||||||
2181 | |||||||
2182 | if (MSSAU && VerifyMemorySSA) | ||||||
2183 | MSSAU->getMemorySSA()->verifyMemorySSA(); | ||||||
2184 | // If the SSAUpdater didn't use the load in the preheader, just zap it now. | ||||||
2185 | if (PreheaderLoad->use_empty()) | ||||||
2186 | eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU); | ||||||
2187 | |||||||
2188 | return true; | ||||||
2189 | } | ||||||
2190 | |||||||
2191 | /// Returns an owning pointer to an alias set which incorporates aliasing info | ||||||
2192 | /// from L and all subloops of L. | ||||||
2193 | std::unique_ptr<AliasSetTracker> | ||||||
2194 | LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI, | ||||||
2195 | AAResults *AA) { | ||||||
2196 | auto CurAST = std::make_unique<AliasSetTracker>(*AA); | ||||||
2197 | |||||||
2198 | // Add everything from all the sub loops. | ||||||
2199 | for (Loop *InnerL : L->getSubLoops()) | ||||||
2200 | for (BasicBlock *BB : InnerL->blocks()) | ||||||
2201 | CurAST->add(*BB); | ||||||
2202 | |||||||
2203 | // And merge in this loop (without anything from inner loops). | ||||||
2204 | for (BasicBlock *BB : L->blocks()) | ||||||
2205 | if (LI->getLoopFor(BB) == L) | ||||||
2206 | CurAST->add(*BB); | ||||||
2207 | |||||||
2208 | return CurAST; | ||||||
2209 | } | ||||||
2210 | |||||||
2211 | std::unique_ptr<AliasSetTracker> | ||||||
2212 | LoopInvariantCodeMotion::collectAliasInfoForLoopWithMSSA( | ||||||
2213 | Loop *L, AAResults *AA, MemorySSAUpdater *MSSAU) { | ||||||
2214 | auto *MSSA = MSSAU->getMemorySSA(); | ||||||
2215 | auto CurAST = std::make_unique<AliasSetTracker>(*AA, MSSA, L); | ||||||
2216 | CurAST->addAllInstructionsInLoopUsingMSSA(); | ||||||
2217 | return CurAST; | ||||||
2218 | } | ||||||
2219 | |||||||
2220 | static bool pointerInvalidatedByLoop(MemoryLocation MemLoc, | ||||||
2221 | AliasSetTracker *CurAST, Loop *CurLoop, | ||||||
2222 | AAResults *AA) { | ||||||
2223 | // First check to see if any of the basic blocks in CurLoop invalidate *V. | ||||||
2224 | bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod(); | ||||||
2225 | |||||||
2226 | if (!isInvalidatedAccordingToAST || !LICMN2Theshold) | ||||||
2227 | return isInvalidatedAccordingToAST; | ||||||
2228 | |||||||
2229 | // Check with a diagnostic analysis if we can refine the information above. | ||||||
2230 | // This is to identify the limitations of using the AST. | ||||||
2231 | // The alias set mechanism used by LICM has a major weakness in that it | ||||||
2232 | // combines all things which may alias into a single set *before* asking | ||||||
2233 | // modref questions. As a result, a single readonly call within a loop will | ||||||
2234 | // collapse all loads and stores into a single alias set and report | ||||||
2235 | // invalidation if the loop contains any store. For example, readonly calls | ||||||
2236 | // with deopt states have this form and create a general alias set with all | ||||||
2237 | // loads and stores. In order to get any LICM in loops containing possible | ||||||
2238 | // deopt states we need a more precise invalidation of checking the mod ref | ||||||
2239 | // info of each instruction within the loop and LI. This has a complexity of | ||||||
2240 | // O(N^2), so currently, it is used only as a diagnostic tool since the | ||||||
2241 | // default value of LICMN2Threshold is zero. | ||||||
2242 | |||||||
2243 | // Don't look at nested loops. | ||||||
2244 | if (CurLoop->begin() != CurLoop->end()) | ||||||
2245 | return true; | ||||||
2246 | |||||||
2247 | int N = 0; | ||||||
2248 | for (BasicBlock *BB : CurLoop->getBlocks()) | ||||||
2249 | for (Instruction &I : *BB) { | ||||||
2250 | if (N >= LICMN2Theshold) { | ||||||
2251 | LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Alasing N2 threshold exhausted for " << *(MemLoc.Ptr) << "\n"; } } while (false) | ||||||
2252 | << *(MemLoc.Ptr) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Alasing N2 threshold exhausted for " << *(MemLoc.Ptr) << "\n"; } } while (false); | ||||||
2253 | return true; | ||||||
2254 | } | ||||||
2255 | N++; | ||||||
2256 | auto Res = AA->getModRefInfo(&I, MemLoc); | ||||||
2257 | if (isModSet(Res)) { | ||||||
2258 | LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Aliasing failed on " << I << " for " << *(MemLoc.Ptr) << "\n"; } } while (false ) | ||||||
2259 | << *(MemLoc.Ptr) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Aliasing failed on " << I << " for " << *(MemLoc.Ptr) << "\n"; } } while (false ); | ||||||
2260 | return true; | ||||||
2261 | } | ||||||
2262 | } | ||||||
2263 | LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("licm")) { dbgs() << "Aliasing okay for " << *(MemLoc .Ptr) << "\n"; } } while (false); | ||||||
2264 | return false; | ||||||
2265 | } | ||||||
2266 | |||||||
2267 | static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU, | ||||||
2268 | Loop *CurLoop, | ||||||
2269 | SinkAndHoistLICMFlags &Flags) { | ||||||
2270 | // For hoisting, use the walker to determine safety | ||||||
2271 | if (!Flags.IsSink) { | ||||||
2272 | MemoryAccess *Source; | ||||||
2273 | // See declaration of SetLicmMssaOptCap for usage details. | ||||||
2274 | if (Flags.LicmMssaOptCounter >= Flags.LicmMssaOptCap) | ||||||
2275 | Source = MU->getDefiningAccess(); | ||||||
2276 | else { | ||||||
2277 | Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU); | ||||||
2278 | Flags.LicmMssaOptCounter++; | ||||||
2279 | } | ||||||
2280 | return !MSSA->isLiveOnEntryDef(Source) && | ||||||
2281 | CurLoop->contains(Source->getBlock()); | ||||||
2282 | } | ||||||
2283 | |||||||
2284 | // For sinking, we'd need to check all Defs below this use. The getClobbering | ||||||
2285 | // call will look on the backedge of the loop, but will check aliasing with | ||||||
2286 | // the instructions on the previous iteration. | ||||||
2287 | // For example: | ||||||
2288 | // for (i ... ) | ||||||
2289 | // load a[i] ( Use (LoE) | ||||||
2290 | // store a[i] ( 1 = Def (2), with 2 = Phi for the loop. | ||||||
2291 | // i++; | ||||||
2292 | // The load sees no clobbering inside the loop, as the backedge alias check | ||||||
2293 | // does phi translation, and will check aliasing against store a[i-1]. | ||||||
2294 | // However sinking the load outside the loop, below the store is incorrect. | ||||||
2295 | |||||||
2296 | // For now, only sink if there are no Defs in the loop, and the existing ones | ||||||
2297 | // precede the use and are in the same block. | ||||||
2298 | // FIXME: Increase precision: Safe to sink if Use post dominates the Def; | ||||||
2299 | // needs PostDominatorTreeAnalysis. | ||||||
2300 | // FIXME: More precise: no Defs that alias this Use. | ||||||
2301 | if (Flags.NoOfMemAccTooLarge) | ||||||
2302 | return true; | ||||||
2303 | for (auto *BB : CurLoop->getBlocks()) | ||||||
2304 | if (auto *Accesses = MSSA->getBlockDefs(BB)) | ||||||
2305 | for (const auto &MA : *Accesses) | ||||||
2306 | if (const auto *MD = dyn_cast<MemoryDef>(&MA)) | ||||||
2307 | if (MU->getBlock() != MD->getBlock() || | ||||||
2308 | !MSSA->locallyDominates(MD, MU)) | ||||||
2309 | return true; | ||||||
2310 | return false; | ||||||
2311 | } | ||||||
2312 | |||||||
2313 | /// Little predicate that returns true if the specified basic block is in | ||||||
2314 | /// a subloop of the current one, not the current one itself. | ||||||
2315 | /// | ||||||
2316 | static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) { | ||||||
2317 | assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop")((CurLoop->contains(BB) && "Only valid if BB is IN the loop" ) ? static_cast<void> (0) : __assert_fail ("CurLoop->contains(BB) && \"Only valid if BB is IN the loop\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/lib/Transforms/Scalar/LICM.cpp" , 2317, __PRETTY_FUNCTION__)); | ||||||
2318 | return LI->getLoopFor(BB) != CurLoop; | ||||||
2319 | } |
1 | //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file exposes the class definitions of all of the subclasses of the |
10 | // Instruction class. This is meant to be an easy way to get access to all |
11 | // instruction subclasses. |
12 | // |
13 | //===----------------------------------------------------------------------===// |
14 | |
15 | #ifndef LLVM_IR_INSTRUCTIONS_H |
16 | #define LLVM_IR_INSTRUCTIONS_H |
17 | |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/Bitfields.h" |
20 | #include "llvm/ADT/None.h" |
21 | #include "llvm/ADT/STLExtras.h" |
22 | #include "llvm/ADT/SmallVector.h" |
23 | #include "llvm/ADT/StringRef.h" |
24 | #include "llvm/ADT/Twine.h" |
25 | #include "llvm/ADT/iterator.h" |
26 | #include "llvm/ADT/iterator_range.h" |
27 | #include "llvm/IR/Attributes.h" |
28 | #include "llvm/IR/BasicBlock.h" |
29 | #include "llvm/IR/CallingConv.h" |
30 | #include "llvm/IR/CFG.h" |
31 | #include "llvm/IR/Constant.h" |
32 | #include "llvm/IR/DerivedTypes.h" |
33 | #include "llvm/IR/Function.h" |
34 | #include "llvm/IR/InstrTypes.h" |
35 | #include "llvm/IR/Instruction.h" |
36 | #include "llvm/IR/OperandTraits.h" |
37 | #include "llvm/IR/Type.h" |
38 | #include "llvm/IR/Use.h" |
39 | #include "llvm/IR/User.h" |
40 | #include "llvm/IR/Value.h" |
41 | #include "llvm/Support/AtomicOrdering.h" |
42 | #include "llvm/Support/Casting.h" |
43 | #include "llvm/Support/ErrorHandling.h" |
44 | #include <cassert> |
45 | #include <cstddef> |
46 | #include <cstdint> |
47 | #include <iterator> |
48 | |
49 | namespace llvm { |
50 | |
51 | class APInt; |
52 | class ConstantInt; |
53 | class DataLayout; |
54 | class LLVMContext; |
55 | |
56 | //===----------------------------------------------------------------------===// |
57 | // AllocaInst Class |
58 | //===----------------------------------------------------------------------===// |
59 | |
60 | /// an instruction to allocate memory on the stack |
61 | class AllocaInst : public UnaryInstruction { |
62 | Type *AllocatedType; |
63 | |
64 | using AlignmentField = AlignmentBitfieldElementT<0>; |
65 | using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; |
66 | using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; |
67 | static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, |
68 | SwiftErrorField>(), |
69 | "Bitfields must be contiguous"); |
70 | |
71 | protected: |
72 | // Note: Instruction needs to be a friend here to call cloneImpl. |
73 | friend class Instruction; |
74 | |
75 | AllocaInst *cloneImpl() const; |
76 | |
77 | public: |
78 | explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, |
79 | const Twine &Name, Instruction *InsertBefore); |
80 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, |
81 | const Twine &Name, BasicBlock *InsertAtEnd); |
82 | |
83 | AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, |
84 | Instruction *InsertBefore); |
85 | AllocaInst(Type *Ty, unsigned AddrSpace, |
86 | const Twine &Name, BasicBlock *InsertAtEnd); |
87 | |
88 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, |
89 | const Twine &Name = "", Instruction *InsertBefore = nullptr); |
90 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, |
91 | const Twine &Name, BasicBlock *InsertAtEnd); |
92 | |
93 | /// Return true if there is an allocation size parameter to the allocation |
94 | /// instruction that is not 1. |
95 | bool isArrayAllocation() const; |
96 | |
97 | /// Get the number of elements allocated. For a simple allocation of a single |
98 | /// element, this will return a constant 1 value. |
99 | const Value *getArraySize() const { return getOperand(0); } |
100 | Value *getArraySize() { return getOperand(0); } |
101 | |
102 | /// Overload to return most specific pointer type. |
103 | PointerType *getType() const { |
104 | return cast<PointerType>(Instruction::getType()); |
105 | } |
106 | |
107 | /// Get allocation size in bits. Returns None if size can't be determined, |
108 | /// e.g. in case of a VLA. |
109 | Optional<uint64_t> getAllocationSizeInBits(const DataLayout &DL) const; |
110 | |
111 | /// Return the type that is being allocated by the instruction. |
112 | Type *getAllocatedType() const { return AllocatedType; } |
113 | /// for use only in special circumstances that need to generically |
114 | /// transform a whole instruction (eg: IR linking and vectorization). |
115 | void setAllocatedType(Type *Ty) { AllocatedType = Ty; } |
116 | |
117 | /// Return the alignment of the memory that is being allocated by the |
118 | /// instruction. |
119 | Align getAlign() const { |
120 | return Align(1ULL << getSubclassData<AlignmentField>()); |
121 | } |
122 | |
123 | void setAlignment(Align Align) { |
124 | setSubclassData<AlignmentField>(Log2(Align)); |
125 | } |
126 | |
127 | // FIXME: Remove this one transition to Align is over. |
128 | unsigned getAlignment() const { return getAlign().value(); } |
129 | |
130 | /// Return true if this alloca is in the entry block of the function and is a |
131 | /// constant size. If so, the code generator will fold it into the |
132 | /// prolog/epilog code, so it is basically free. |
133 | bool isStaticAlloca() const; |
134 | |
135 | /// Return true if this alloca is used as an inalloca argument to a call. Such |
136 | /// allocas are never considered static even if they are in the entry block. |
137 | bool isUsedWithInAlloca() const { |
138 | return getSubclassData<UsedWithInAllocaField>(); |
139 | } |
140 | |
141 | /// Specify whether this alloca is used to represent the arguments to a call. |
142 | void setUsedWithInAlloca(bool V) { |
143 | setSubclassData<UsedWithInAllocaField>(V); |
144 | } |
145 | |
146 | /// Return true if this alloca is used as a swifterror argument to a call. |
147 | bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } |
148 | /// Specify whether this alloca is used to represent a swifterror. |
149 | void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } |
150 | |
151 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
152 | static bool classof(const Instruction *I) { |
153 | return (I->getOpcode() == Instruction::Alloca); |
154 | } |
155 | static bool classof(const Value *V) { |
156 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
157 | } |
158 | |
159 | private: |
160 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
161 | // method so that subclasses cannot accidentally use it. |
162 | template <typename Bitfield> |
163 | void setSubclassData(typename Bitfield::Type Value) { |
164 | Instruction::setSubclassData<Bitfield>(Value); |
165 | } |
166 | }; |
167 | |
168 | //===----------------------------------------------------------------------===// |
169 | // LoadInst Class |
170 | //===----------------------------------------------------------------------===// |
171 | |
172 | /// An instruction for reading from memory. This uses the SubclassData field in |
173 | /// Value to store whether or not the load is volatile. |
174 | class LoadInst : public UnaryInstruction { |
175 | using VolatileField = BoolBitfieldElementT<0>; |
176 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; |
177 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; |
178 | static_assert( |
179 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), |
180 | "Bitfields must be contiguous"); |
181 | |
182 | void AssertOK(); |
183 | |
184 | protected: |
185 | // Note: Instruction needs to be a friend here to call cloneImpl. |
186 | friend class Instruction; |
187 | |
188 | LoadInst *cloneImpl() const; |
189 | |
190 | public: |
191 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, |
192 | Instruction *InsertBefore); |
193 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); |
194 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
195 | Instruction *InsertBefore); |
196 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
197 | BasicBlock *InsertAtEnd); |
198 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
199 | Align Align, Instruction *InsertBefore = nullptr); |
200 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
201 | Align Align, BasicBlock *InsertAtEnd); |
202 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
203 | Align Align, AtomicOrdering Order, |
204 | SyncScope::ID SSID = SyncScope::System, |
205 | Instruction *InsertBefore = nullptr); |
206 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, |
207 | Align Align, AtomicOrdering Order, SyncScope::ID SSID, |
208 | BasicBlock *InsertAtEnd); |
209 | |
210 | /// Return true if this is a load from a volatile memory location. |
211 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
212 | |
213 | /// Specify whether this is a volatile load or not. |
214 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
215 | |
216 | /// Return the alignment of the access that is being performed. |
217 | /// FIXME: Remove this function once transition to Align is over. |
218 | /// Use getAlign() instead. |
219 | unsigned getAlignment() const { return getAlign().value(); } |
220 | |
221 | /// Return the alignment of the access that is being performed. |
222 | Align getAlign() const { |
223 | return Align(1ULL << (getSubclassData<AlignmentField>())); |
224 | } |
225 | |
226 | void setAlignment(Align Align) { |
227 | setSubclassData<AlignmentField>(Log2(Align)); |
228 | } |
229 | |
230 | /// Returns the ordering constraint of this load instruction. |
231 | AtomicOrdering getOrdering() const { |
232 | return getSubclassData<OrderingField>(); |
233 | } |
234 | /// Sets the ordering constraint of this load instruction. May not be Release |
235 | /// or AcquireRelease. |
236 | void setOrdering(AtomicOrdering Ordering) { |
237 | setSubclassData<OrderingField>(Ordering); |
238 | } |
239 | |
240 | /// Returns the synchronization scope ID of this load instruction. |
241 | SyncScope::ID getSyncScopeID() const { |
242 | return SSID; |
243 | } |
244 | |
245 | /// Sets the synchronization scope ID of this load instruction. |
246 | void setSyncScopeID(SyncScope::ID SSID) { |
247 | this->SSID = SSID; |
248 | } |
249 | |
250 | /// Sets the ordering constraint and the synchronization scope ID of this load |
251 | /// instruction. |
252 | void setAtomic(AtomicOrdering Ordering, |
253 | SyncScope::ID SSID = SyncScope::System) { |
254 | setOrdering(Ordering); |
255 | setSyncScopeID(SSID); |
256 | } |
257 | |
258 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
259 | |
260 | bool isUnordered() const { |
261 | return (getOrdering() == AtomicOrdering::NotAtomic || |
262 | getOrdering() == AtomicOrdering::Unordered) && |
263 | !isVolatile(); |
264 | } |
265 | |
266 | Value *getPointerOperand() { return getOperand(0); } |
267 | const Value *getPointerOperand() const { return getOperand(0); } |
268 | static unsigned getPointerOperandIndex() { return 0U; } |
269 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
270 | |
271 | /// Returns the address space of the pointer operand. |
272 | unsigned getPointerAddressSpace() const { |
273 | return getPointerOperandType()->getPointerAddressSpace(); |
274 | } |
275 | |
276 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
277 | static bool classof(const Instruction *I) { |
278 | return I->getOpcode() == Instruction::Load; |
279 | } |
280 | static bool classof(const Value *V) { |
281 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
282 | } |
283 | |
284 | private: |
285 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
286 | // method so that subclasses cannot accidentally use it. |
287 | template <typename Bitfield> |
288 | void setSubclassData(typename Bitfield::Type Value) { |
289 | Instruction::setSubclassData<Bitfield>(Value); |
290 | } |
291 | |
292 | /// The synchronization scope ID of this load instruction. Not quite enough |
293 | /// room in SubClassData for everything, so synchronization scope ID gets its |
294 | /// own field. |
295 | SyncScope::ID SSID; |
296 | }; |
297 | |
298 | //===----------------------------------------------------------------------===// |
299 | // StoreInst Class |
300 | //===----------------------------------------------------------------------===// |
301 | |
302 | /// An instruction for storing to memory. |
303 | class StoreInst : public Instruction { |
304 | using VolatileField = BoolBitfieldElementT<0>; |
305 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; |
306 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; |
307 | static_assert( |
308 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), |
309 | "Bitfields must be contiguous"); |
310 | |
311 | void AssertOK(); |
312 | |
313 | protected: |
314 | // Note: Instruction needs to be a friend here to call cloneImpl. |
315 | friend class Instruction; |
316 | |
317 | StoreInst *cloneImpl() const; |
318 | |
319 | public: |
320 | StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); |
321 | StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); |
322 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); |
323 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); |
324 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
325 | Instruction *InsertBefore = nullptr); |
326 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
327 | BasicBlock *InsertAtEnd); |
328 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
329 | AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, |
330 | Instruction *InsertBefore = nullptr); |
331 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, |
332 | AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); |
333 | |
334 | // allocate space for exactly two operands |
335 | void *operator new(size_t s) { |
336 | return User::operator new(s, 2); |
337 | } |
338 | |
339 | /// Return true if this is a store to a volatile memory location. |
340 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
341 | |
342 | /// Specify whether this is a volatile store or not. |
343 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
344 | |
345 | /// Transparently provide more efficient getOperand methods. |
346 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
347 | |
348 | /// Return the alignment of the access that is being performed |
349 | /// FIXME: Remove this function once transition to Align is over. |
350 | /// Use getAlign() instead. |
351 | unsigned getAlignment() const { return getAlign().value(); } |
352 | |
353 | Align getAlign() const { |
354 | return Align(1ULL << (getSubclassData<AlignmentField>())); |
355 | } |
356 | |
357 | void setAlignment(Align Align) { |
358 | setSubclassData<AlignmentField>(Log2(Align)); |
359 | } |
360 | |
361 | /// Returns the ordering constraint of this store instruction. |
362 | AtomicOrdering getOrdering() const { |
363 | return getSubclassData<OrderingField>(); |
364 | } |
365 | |
366 | /// Sets the ordering constraint of this store instruction. May not be |
367 | /// Acquire or AcquireRelease. |
368 | void setOrdering(AtomicOrdering Ordering) { |
369 | setSubclassData<OrderingField>(Ordering); |
370 | } |
371 | |
372 | /// Returns the synchronization scope ID of this store instruction. |
373 | SyncScope::ID getSyncScopeID() const { |
374 | return SSID; |
375 | } |
376 | |
377 | /// Sets the synchronization scope ID of this store instruction. |
378 | void setSyncScopeID(SyncScope::ID SSID) { |
379 | this->SSID = SSID; |
380 | } |
381 | |
382 | /// Sets the ordering constraint and the synchronization scope ID of this |
383 | /// store instruction. |
384 | void setAtomic(AtomicOrdering Ordering, |
385 | SyncScope::ID SSID = SyncScope::System) { |
386 | setOrdering(Ordering); |
387 | setSyncScopeID(SSID); |
388 | } |
389 | |
390 | bool isSimple() const { return !isAtomic() && !isVolatile(); } |
391 | |
392 | bool isUnordered() const { |
393 | return (getOrdering() == AtomicOrdering::NotAtomic || |
394 | getOrdering() == AtomicOrdering::Unordered) && |
395 | !isVolatile(); |
396 | } |
397 | |
398 | Value *getValueOperand() { return getOperand(0); } |
399 | const Value *getValueOperand() const { return getOperand(0); } |
400 | |
401 | Value *getPointerOperand() { return getOperand(1); } |
402 | const Value *getPointerOperand() const { return getOperand(1); } |
403 | static unsigned getPointerOperandIndex() { return 1U; } |
404 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } |
405 | |
406 | /// Returns the address space of the pointer operand. |
407 | unsigned getPointerAddressSpace() const { |
408 | return getPointerOperandType()->getPointerAddressSpace(); |
409 | } |
410 | |
411 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
412 | static bool classof(const Instruction *I) { |
413 | return I->getOpcode() == Instruction::Store; |
414 | } |
415 | static bool classof(const Value *V) { |
416 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
417 | } |
418 | |
419 | private: |
420 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
421 | // method so that subclasses cannot accidentally use it. |
422 | template <typename Bitfield> |
423 | void setSubclassData(typename Bitfield::Type Value) { |
424 | Instruction::setSubclassData<Bitfield>(Value); |
425 | } |
426 | |
427 | /// The synchronization scope ID of this store instruction. Not quite enough |
428 | /// room in SubClassData for everything, so synchronization scope ID gets its |
429 | /// own field. |
430 | SyncScope::ID SSID; |
431 | }; |
432 | |
433 | template <> |
434 | struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { |
435 | }; |
436 | |
437 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits <StoreInst>::op_begin(this); } StoreInst::const_op_iterator StoreInst::op_begin() const { return OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this)); } StoreInst ::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst >::op_end(this); } StoreInst::const_op_iterator StoreInst:: op_end() const { return OperandTraits<StoreInst>::op_end (const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand (unsigned i_nocapture) const { ((i_nocapture < OperandTraits <StoreInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 437, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<StoreInst>::op_begin(const_cast<StoreInst *>(this))[i_nocapture].get()); } void StoreInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 437, __PRETTY_FUNCTION__)); OperandTraits<StoreInst>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned StoreInst ::getNumOperands() const { return OperandTraits<StoreInst> ::operands(this); } template <int Idx_nocapture> Use & StoreInst::Op() { return this->OpFrom<Idx_nocapture> (this); } template <int Idx_nocapture> const Use &StoreInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
438 | |
439 | //===----------------------------------------------------------------------===// |
440 | // FenceInst Class |
441 | //===----------------------------------------------------------------------===// |
442 | |
443 | /// An instruction for ordering other memory operations. |
444 | class FenceInst : public Instruction { |
445 | using OrderingField = AtomicOrderingBitfieldElementT<0>; |
446 | |
447 | void Init(AtomicOrdering Ordering, SyncScope::ID SSID); |
448 | |
449 | protected: |
450 | // Note: Instruction needs to be a friend here to call cloneImpl. |
451 | friend class Instruction; |
452 | |
453 | FenceInst *cloneImpl() const; |
454 | |
455 | public: |
456 | // Ordering may only be Acquire, Release, AcquireRelease, or |
457 | // SequentiallyConsistent. |
458 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, |
459 | SyncScope::ID SSID = SyncScope::System, |
460 | Instruction *InsertBefore = nullptr); |
461 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, |
462 | BasicBlock *InsertAtEnd); |
463 | |
464 | // allocate space for exactly zero operands |
465 | void *operator new(size_t s) { |
466 | return User::operator new(s, 0); |
467 | } |
468 | |
469 | /// Returns the ordering constraint of this fence instruction. |
470 | AtomicOrdering getOrdering() const { |
471 | return getSubclassData<OrderingField>(); |
472 | } |
473 | |
474 | /// Sets the ordering constraint of this fence instruction. May only be |
475 | /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. |
476 | void setOrdering(AtomicOrdering Ordering) { |
477 | setSubclassData<OrderingField>(Ordering); |
478 | } |
479 | |
480 | /// Returns the synchronization scope ID of this fence instruction. |
481 | SyncScope::ID getSyncScopeID() const { |
482 | return SSID; |
483 | } |
484 | |
485 | /// Sets the synchronization scope ID of this fence instruction. |
486 | void setSyncScopeID(SyncScope::ID SSID) { |
487 | this->SSID = SSID; |
488 | } |
489 | |
490 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
491 | static bool classof(const Instruction *I) { |
492 | return I->getOpcode() == Instruction::Fence; |
493 | } |
494 | static bool classof(const Value *V) { |
495 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
496 | } |
497 | |
498 | private: |
499 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
500 | // method so that subclasses cannot accidentally use it. |
501 | template <typename Bitfield> |
502 | void setSubclassData(typename Bitfield::Type Value) { |
503 | Instruction::setSubclassData<Bitfield>(Value); |
504 | } |
505 | |
506 | /// The synchronization scope ID of this fence instruction. Not quite enough |
507 | /// room in SubClassData for everything, so synchronization scope ID gets its |
508 | /// own field. |
509 | SyncScope::ID SSID; |
510 | }; |
511 | |
512 | //===----------------------------------------------------------------------===// |
513 | // AtomicCmpXchgInst Class |
514 | //===----------------------------------------------------------------------===// |
515 | |
516 | /// An instruction that atomically checks whether a |
517 | /// specified value is in a memory location, and, if it is, stores a new value |
518 | /// there. The value returned by this instruction is a pair containing the |
519 | /// original value as first element, and an i1 indicating success (true) or |
520 | /// failure (false) as second element. |
521 | /// |
522 | class AtomicCmpXchgInst : public Instruction { |
523 | void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, |
524 | AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, |
525 | SyncScope::ID SSID); |
526 | |
527 | template <unsigned Offset> |
528 | using AtomicOrderingBitfieldElement = |
529 | typename Bitfield::Element<AtomicOrdering, Offset, 3, |
530 | AtomicOrdering::LAST>; |
531 | |
532 | protected: |
533 | // Note: Instruction needs to be a friend here to call cloneImpl. |
534 | friend class Instruction; |
535 | |
536 | AtomicCmpXchgInst *cloneImpl() const; |
537 | |
538 | public: |
539 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, |
540 | AtomicOrdering SuccessOrdering, |
541 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, |
542 | Instruction *InsertBefore = nullptr); |
543 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, |
544 | AtomicOrdering SuccessOrdering, |
545 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, |
546 | BasicBlock *InsertAtEnd); |
547 | |
548 | // allocate space for exactly three operands |
549 | void *operator new(size_t s) { |
550 | return User::operator new(s, 3); |
551 | } |
552 | |
553 | using VolatileField = BoolBitfieldElementT<0>; |
554 | using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; |
555 | using SuccessOrderingField = |
556 | AtomicOrderingBitfieldElementT<WeakField::NextBit>; |
557 | using FailureOrderingField = |
558 | AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; |
559 | using AlignmentField = |
560 | AlignmentBitfieldElementT<FailureOrderingField::NextBit>; |
561 | static_assert( |
562 | Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, |
563 | FailureOrderingField, AlignmentField>(), |
564 | "Bitfields must be contiguous"); |
565 | |
566 | /// Return the alignment of the memory that is being allocated by the |
567 | /// instruction. |
568 | Align getAlign() const { |
569 | return Align(1ULL << getSubclassData<AlignmentField>()); |
570 | } |
571 | |
572 | void setAlignment(Align Align) { |
573 | setSubclassData<AlignmentField>(Log2(Align)); |
574 | } |
575 | |
576 | /// Return true if this is a cmpxchg from a volatile memory |
577 | /// location. |
578 | /// |
579 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
580 | |
581 | /// Specify whether this is a volatile cmpxchg. |
582 | /// |
583 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
584 | |
585 | /// Return true if this cmpxchg may spuriously fail. |
586 | bool isWeak() const { return getSubclassData<WeakField>(); } |
587 | |
588 | void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } |
589 | |
590 | /// Transparently provide more efficient getOperand methods. |
591 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
592 | |
593 | /// Returns the success ordering constraint of this cmpxchg instruction. |
594 | AtomicOrdering getSuccessOrdering() const { |
595 | return getSubclassData<SuccessOrderingField>(); |
596 | } |
597 | |
598 | /// Sets the success ordering constraint of this cmpxchg instruction. |
599 | void setSuccessOrdering(AtomicOrdering Ordering) { |
600 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 601, __PRETTY_FUNCTION__)) |
601 | "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 601, __PRETTY_FUNCTION__)); |
602 | setSubclassData<SuccessOrderingField>(Ordering); |
603 | } |
604 | |
605 | /// Returns the failure ordering constraint of this cmpxchg instruction. |
606 | AtomicOrdering getFailureOrdering() const { |
607 | return getSubclassData<FailureOrderingField>(); |
608 | } |
609 | |
610 | /// Sets the failure ordering constraint of this cmpxchg instruction. |
611 | void setFailureOrdering(AtomicOrdering Ordering) { |
612 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 613, __PRETTY_FUNCTION__)) |
613 | "CmpXchg instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "CmpXchg instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"CmpXchg instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 613, __PRETTY_FUNCTION__)); |
614 | setSubclassData<FailureOrderingField>(Ordering); |
615 | } |
616 | |
617 | /// Returns the synchronization scope ID of this cmpxchg instruction. |
618 | SyncScope::ID getSyncScopeID() const { |
619 | return SSID; |
620 | } |
621 | |
622 | /// Sets the synchronization scope ID of this cmpxchg instruction. |
623 | void setSyncScopeID(SyncScope::ID SSID) { |
624 | this->SSID = SSID; |
625 | } |
626 | |
627 | Value *getPointerOperand() { return getOperand(0); } |
628 | const Value *getPointerOperand() const { return getOperand(0); } |
629 | static unsigned getPointerOperandIndex() { return 0U; } |
630 | |
631 | Value *getCompareOperand() { return getOperand(1); } |
632 | const Value *getCompareOperand() const { return getOperand(1); } |
633 | |
634 | Value *getNewValOperand() { return getOperand(2); } |
635 | const Value *getNewValOperand() const { return getOperand(2); } |
636 | |
637 | /// Returns the address space of the pointer operand. |
638 | unsigned getPointerAddressSpace() const { |
639 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
640 | } |
641 | |
642 | /// Returns the strongest permitted ordering on failure, given the |
643 | /// desired ordering on success. |
644 | /// |
645 | /// If the comparison in a cmpxchg operation fails, there is no atomic store |
646 | /// so release semantics cannot be provided. So this function drops explicit |
647 | /// Release requests from the AtomicOrdering. A SequentiallyConsistent |
648 | /// operation would remain SequentiallyConsistent. |
649 | static AtomicOrdering |
650 | getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { |
651 | switch (SuccessOrdering) { |
652 | default: |
653 | llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 653); |
654 | case AtomicOrdering::Release: |
655 | case AtomicOrdering::Monotonic: |
656 | return AtomicOrdering::Monotonic; |
657 | case AtomicOrdering::AcquireRelease: |
658 | case AtomicOrdering::Acquire: |
659 | return AtomicOrdering::Acquire; |
660 | case AtomicOrdering::SequentiallyConsistent: |
661 | return AtomicOrdering::SequentiallyConsistent; |
662 | } |
663 | } |
664 | |
665 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
666 | static bool classof(const Instruction *I) { |
667 | return I->getOpcode() == Instruction::AtomicCmpXchg; |
668 | } |
669 | static bool classof(const Value *V) { |
670 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
671 | } |
672 | |
673 | private: |
674 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
675 | // method so that subclasses cannot accidentally use it. |
676 | template <typename Bitfield> |
677 | void setSubclassData(typename Bitfield::Type Value) { |
678 | Instruction::setSubclassData<Bitfield>(Value); |
679 | } |
680 | |
681 | /// The synchronization scope ID of this cmpxchg instruction. Not quite |
682 | /// enough room in SubClassData for everything, so synchronization scope ID |
683 | /// gets its own field. |
684 | SyncScope::ID SSID; |
685 | }; |
686 | |
687 | template <> |
688 | struct OperandTraits<AtomicCmpXchgInst> : |
689 | public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { |
690 | }; |
691 | |
692 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() { return OperandTraits<AtomicCmpXchgInst>::op_begin(this ); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst:: op_begin() const { return OperandTraits<AtomicCmpXchgInst> ::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst ::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits <AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst:: const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits <AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst *>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<AtomicCmpXchgInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 692, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<AtomicCmpXchgInst>::op_begin(const_cast <AtomicCmpXchgInst*>(this))[i_nocapture].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<AtomicCmpXchgInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 692, __PRETTY_FUNCTION__)); OperandTraits<AtomicCmpXchgInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands() const { return OperandTraits <AtomicCmpXchgInst>::operands(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &AtomicCmpXchgInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
693 | |
694 | //===----------------------------------------------------------------------===// |
695 | // AtomicRMWInst Class |
696 | //===----------------------------------------------------------------------===// |
697 | |
698 | /// an instruction that atomically reads a memory location, |
699 | /// combines it with another value, and then stores the result back. Returns |
700 | /// the old value. |
701 | /// |
702 | class AtomicRMWInst : public Instruction { |
703 | protected: |
704 | // Note: Instruction needs to be a friend here to call cloneImpl. |
705 | friend class Instruction; |
706 | |
707 | AtomicRMWInst *cloneImpl() const; |
708 | |
709 | public: |
710 | /// This enumeration lists the possible modifications atomicrmw can make. In |
711 | /// the descriptions, 'p' is the pointer to the instruction's memory location, |
712 | /// 'old' is the initial value of *p, and 'v' is the other value passed to the |
713 | /// instruction. These instructions always return 'old'. |
714 | enum BinOp : unsigned { |
715 | /// *p = v |
716 | Xchg, |
717 | /// *p = old + v |
718 | Add, |
719 | /// *p = old - v |
720 | Sub, |
721 | /// *p = old & v |
722 | And, |
723 | /// *p = ~(old & v) |
724 | Nand, |
725 | /// *p = old | v |
726 | Or, |
727 | /// *p = old ^ v |
728 | Xor, |
729 | /// *p = old >signed v ? old : v |
730 | Max, |
731 | /// *p = old <signed v ? old : v |
732 | Min, |
733 | /// *p = old >unsigned v ? old : v |
734 | UMax, |
735 | /// *p = old <unsigned v ? old : v |
736 | UMin, |
737 | |
738 | /// *p = old + v |
739 | FAdd, |
740 | |
741 | /// *p = old - v |
742 | FSub, |
743 | |
744 | FIRST_BINOP = Xchg, |
745 | LAST_BINOP = FSub, |
746 | BAD_BINOP |
747 | }; |
748 | |
749 | private: |
750 | template <unsigned Offset> |
751 | using AtomicOrderingBitfieldElement = |
752 | typename Bitfield::Element<AtomicOrdering, Offset, 3, |
753 | AtomicOrdering::LAST>; |
754 | |
755 | template <unsigned Offset> |
756 | using BinOpBitfieldElement = |
757 | typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; |
758 | |
759 | public: |
760 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, |
761 | AtomicOrdering Ordering, SyncScope::ID SSID, |
762 | Instruction *InsertBefore = nullptr); |
763 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, |
764 | AtomicOrdering Ordering, SyncScope::ID SSID, |
765 | BasicBlock *InsertAtEnd); |
766 | |
767 | // allocate space for exactly two operands |
768 | void *operator new(size_t s) { |
769 | return User::operator new(s, 2); |
770 | } |
771 | |
772 | using VolatileField = BoolBitfieldElementT<0>; |
773 | using AtomicOrderingField = |
774 | AtomicOrderingBitfieldElementT<VolatileField::NextBit>; |
775 | using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; |
776 | using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; |
777 | static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, |
778 | OperationField, AlignmentField>(), |
779 | "Bitfields must be contiguous"); |
780 | |
781 | BinOp getOperation() const { return getSubclassData<OperationField>(); } |
782 | |
783 | static StringRef getOperationName(BinOp Op); |
784 | |
785 | static bool isFPOperation(BinOp Op) { |
786 | switch (Op) { |
787 | case AtomicRMWInst::FAdd: |
788 | case AtomicRMWInst::FSub: |
789 | return true; |
790 | default: |
791 | return false; |
792 | } |
793 | } |
794 | |
795 | void setOperation(BinOp Operation) { |
796 | setSubclassData<OperationField>(Operation); |
797 | } |
798 | |
799 | /// Return the alignment of the memory that is being allocated by the |
800 | /// instruction. |
801 | Align getAlign() const { |
802 | return Align(1ULL << getSubclassData<AlignmentField>()); |
803 | } |
804 | |
805 | void setAlignment(Align Align) { |
806 | setSubclassData<AlignmentField>(Log2(Align)); |
807 | } |
808 | |
809 | /// Return true if this is a RMW on a volatile memory location. |
810 | /// |
811 | bool isVolatile() const { return getSubclassData<VolatileField>(); } |
812 | |
813 | /// Specify whether this is a volatile RMW or not. |
814 | /// |
815 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } |
816 | |
817 | /// Transparently provide more efficient getOperand methods. |
818 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
819 | |
820 | /// Returns the ordering constraint of this rmw instruction. |
821 | AtomicOrdering getOrdering() const { |
822 | return getSubclassData<AtomicOrderingField>(); |
823 | } |
824 | |
825 | /// Sets the ordering constraint of this rmw instruction. |
826 | void setOrdering(AtomicOrdering Ordering) { |
827 | assert(Ordering != AtomicOrdering::NotAtomic &&((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 828, __PRETTY_FUNCTION__)) |
828 | "atomicrmw instructions can only be atomic.")((Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic." ) ? static_cast<void> (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 828, __PRETTY_FUNCTION__)); |
829 | setSubclassData<AtomicOrderingField>(Ordering); |
830 | } |
831 | |
832 | /// Returns the synchronization scope ID of this rmw instruction. |
833 | SyncScope::ID getSyncScopeID() const { |
834 | return SSID; |
835 | } |
836 | |
837 | /// Sets the synchronization scope ID of this rmw instruction. |
838 | void setSyncScopeID(SyncScope::ID SSID) { |
839 | this->SSID = SSID; |
840 | } |
841 | |
842 | Value *getPointerOperand() { return getOperand(0); } |
843 | const Value *getPointerOperand() const { return getOperand(0); } |
844 | static unsigned getPointerOperandIndex() { return 0U; } |
845 | |
846 | Value *getValOperand() { return getOperand(1); } |
847 | const Value *getValOperand() const { return getOperand(1); } |
848 | |
849 | /// Returns the address space of the pointer operand. |
850 | unsigned getPointerAddressSpace() const { |
851 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
852 | } |
853 | |
854 | bool isFloatingPointOperation() const { |
855 | return isFPOperation(getOperation()); |
856 | } |
857 | |
858 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
859 | static bool classof(const Instruction *I) { |
860 | return I->getOpcode() == Instruction::AtomicRMW; |
861 | } |
862 | static bool classof(const Value *V) { |
863 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
864 | } |
865 | |
866 | private: |
867 | void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, |
868 | AtomicOrdering Ordering, SyncScope::ID SSID); |
869 | |
870 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
871 | // method so that subclasses cannot accidentally use it. |
872 | template <typename Bitfield> |
873 | void setSubclassData(typename Bitfield::Type Value) { |
874 | Instruction::setSubclassData<Bitfield>(Value); |
875 | } |
876 | |
877 | /// The synchronization scope ID of this rmw instruction. Not quite enough |
878 | /// room in SubClassData for everything, so synchronization scope ID gets its |
879 | /// own field. |
880 | SyncScope::ID SSID; |
881 | }; |
882 | |
883 | template <> |
884 | struct OperandTraits<AtomicRMWInst> |
885 | : public FixedNumOperandTraits<AtomicRMWInst,2> { |
886 | }; |
887 | |
888 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst ::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits <AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*> (this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end() { return OperandTraits<AtomicRMWInst>::op_end(this); } AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const { return OperandTraits<AtomicRMWInst>::op_end(const_cast <AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand (unsigned i_nocapture) const { ((i_nocapture < OperandTraits <AtomicRMWInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 888, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<AtomicRMWInst>::op_begin(const_cast< AtomicRMWInst*>(this))[i_nocapture].get()); } void AtomicRMWInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<AtomicRMWInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 888, __PRETTY_FUNCTION__)); OperandTraits<AtomicRMWInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned AtomicRMWInst ::getNumOperands() const { return OperandTraits<AtomicRMWInst >::operands(this); } template <int Idx_nocapture> Use &AtomicRMWInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & AtomicRMWInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
889 | |
890 | //===----------------------------------------------------------------------===// |
891 | // GetElementPtrInst Class |
892 | //===----------------------------------------------------------------------===// |
893 | |
894 | // checkGEPType - Simple wrapper function to give a better assertion failure |
895 | // message on bad indexes for a gep instruction. |
896 | // |
897 | inline Type *checkGEPType(Type *Ty) { |
898 | assert(Ty && "Invalid GetElementPtrInst indices for type!")((Ty && "Invalid GetElementPtrInst indices for type!" ) ? static_cast<void> (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 898, __PRETTY_FUNCTION__)); |
899 | return Ty; |
900 | } |
901 | |
902 | /// an instruction for type-safe pointer arithmetic to |
903 | /// access elements of arrays and structs |
904 | /// |
905 | class GetElementPtrInst : public Instruction { |
906 | Type *SourceElementType; |
907 | Type *ResultElementType; |
908 | |
909 | GetElementPtrInst(const GetElementPtrInst &GEPI); |
910 | |
911 | /// Constructors - Create a getelementptr instruction with a base pointer an |
912 | /// list of indices. The first ctor can optionally insert before an existing |
913 | /// instruction, the second appends the new instruction to the specified |
914 | /// BasicBlock. |
915 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
916 | ArrayRef<Value *> IdxList, unsigned Values, |
917 | const Twine &NameStr, Instruction *InsertBefore); |
918 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, |
919 | ArrayRef<Value *> IdxList, unsigned Values, |
920 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
921 | |
922 | void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); |
923 | |
924 | protected: |
925 | // Note: Instruction needs to be a friend here to call cloneImpl. |
926 | friend class Instruction; |
927 | |
928 | GetElementPtrInst *cloneImpl() const; |
929 | |
930 | public: |
931 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
932 | ArrayRef<Value *> IdxList, |
933 | const Twine &NameStr = "", |
934 | Instruction *InsertBefore = nullptr) { |
935 | unsigned Values = 1 + unsigned(IdxList.size()); |
936 | if (!PointeeType) |
937 | PointeeType = |
938 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
939 | else |
940 | assert(((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 942, __PRETTY_FUNCTION__)) |
941 | PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 942, __PRETTY_FUNCTION__)) |
942 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 942, __PRETTY_FUNCTION__)); |
943 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
944 | NameStr, InsertBefore); |
945 | } |
946 | |
947 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, |
948 | ArrayRef<Value *> IdxList, |
949 | const Twine &NameStr, |
950 | BasicBlock *InsertAtEnd) { |
951 | unsigned Values = 1 + unsigned(IdxList.size()); |
952 | if (!PointeeType) |
953 | PointeeType = |
954 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType(); |
955 | else |
956 | assert(((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 958, __PRETTY_FUNCTION__)) |
957 | PointeeType ==((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 958, __PRETTY_FUNCTION__)) |
958 | cast<PointerType>(Ptr->getType()->getScalarType())->getElementType())((PointeeType == cast<PointerType>(Ptr->getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("PointeeType == cast<PointerType>(Ptr->getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 958, __PRETTY_FUNCTION__)); |
959 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, |
960 | NameStr, InsertAtEnd); |
961 | } |
962 | |
963 | /// Create an "inbounds" getelementptr. See the documentation for the |
964 | /// "inbounds" flag in LangRef.html for details. |
965 | static GetElementPtrInst *CreateInBounds(Value *Ptr, |
966 | ArrayRef<Value *> IdxList, |
967 | const Twine &NameStr = "", |
968 | Instruction *InsertBefore = nullptr){ |
969 | return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertBefore); |
970 | } |
971 | |
972 | static GetElementPtrInst * |
973 | CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, |
974 | const Twine &NameStr = "", |
975 | Instruction *InsertBefore = nullptr) { |
976 | GetElementPtrInst *GEP = |
977 | Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); |
978 | GEP->setIsInBounds(true); |
979 | return GEP; |
980 | } |
981 | |
982 | static GetElementPtrInst *CreateInBounds(Value *Ptr, |
983 | ArrayRef<Value *> IdxList, |
984 | const Twine &NameStr, |
985 | BasicBlock *InsertAtEnd) { |
986 | return CreateInBounds(nullptr, Ptr, IdxList, NameStr, InsertAtEnd); |
987 | } |
988 | |
989 | static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, |
990 | ArrayRef<Value *> IdxList, |
991 | const Twine &NameStr, |
992 | BasicBlock *InsertAtEnd) { |
993 | GetElementPtrInst *GEP = |
994 | Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); |
995 | GEP->setIsInBounds(true); |
996 | return GEP; |
997 | } |
998 | |
999 | /// Transparently provide more efficient getOperand methods. |
1000 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1001 | |
1002 | Type *getSourceElementType() const { return SourceElementType; } |
1003 | |
1004 | void setSourceElementType(Type *Ty) { SourceElementType = Ty; } |
1005 | void setResultElementType(Type *Ty) { ResultElementType = Ty; } |
1006 | |
1007 | Type *getResultElementType() const { |
1008 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1009, __PRETTY_FUNCTION__)) |
1009 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1009, __PRETTY_FUNCTION__)); |
1010 | return ResultElementType; |
1011 | } |
1012 | |
1013 | /// Returns the address space of this instruction's pointer type. |
1014 | unsigned getAddressSpace() const { |
1015 | // Note that this is always the same as the pointer operand's address space |
1016 | // and that is cheaper to compute, so cheat here. |
1017 | return getPointerAddressSpace(); |
1018 | } |
1019 | |
1020 | /// Returns the result type of a getelementptr with the given source |
1021 | /// element type and indexes. |
1022 | /// |
1023 | /// Null is returned if the indices are invalid for the specified |
1024 | /// source element type. |
1025 | static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); |
1026 | static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); |
1027 | static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); |
1028 | |
1029 | /// Return the type of the element at the given index of an indexable |
1030 | /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". |
1031 | /// |
1032 | /// Returns null if the type can't be indexed, or the given index is not |
1033 | /// legal for the given type. |
1034 | static Type *getTypeAtIndex(Type *Ty, Value *Idx); |
1035 | static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); |
1036 | |
1037 | inline op_iterator idx_begin() { return op_begin()+1; } |
1038 | inline const_op_iterator idx_begin() const { return op_begin()+1; } |
1039 | inline op_iterator idx_end() { return op_end(); } |
1040 | inline const_op_iterator idx_end() const { return op_end(); } |
1041 | |
1042 | inline iterator_range<op_iterator> indices() { |
1043 | return make_range(idx_begin(), idx_end()); |
1044 | } |
1045 | |
1046 | inline iterator_range<const_op_iterator> indices() const { |
1047 | return make_range(idx_begin(), idx_end()); |
1048 | } |
1049 | |
1050 | Value *getPointerOperand() { |
1051 | return getOperand(0); |
1052 | } |
1053 | const Value *getPointerOperand() const { |
1054 | return getOperand(0); |
1055 | } |
1056 | static unsigned getPointerOperandIndex() { |
1057 | return 0U; // get index for modifying correct operand. |
1058 | } |
1059 | |
1060 | /// Method to return the pointer operand as a |
1061 | /// PointerType. |
1062 | Type *getPointerOperandType() const { |
1063 | return getPointerOperand()->getType(); |
1064 | } |
1065 | |
1066 | /// Returns the address space of the pointer operand. |
1067 | unsigned getPointerAddressSpace() const { |
1068 | return getPointerOperandType()->getPointerAddressSpace(); |
1069 | } |
1070 | |
1071 | /// Returns the pointer type returned by the GEP |
1072 | /// instruction, which may be a vector of pointers. |
1073 | static Type *getGEPReturnType(Type *ElTy, Value *Ptr, |
1074 | ArrayRef<Value *> IdxList) { |
1075 | Type *PtrTy = PointerType::get(checkGEPType(getIndexedType(ElTy, IdxList)), |
1076 | Ptr->getType()->getPointerAddressSpace()); |
1077 | // Vector GEP |
1078 | if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { |
1079 | ElementCount EltCount = PtrVTy->getElementCount(); |
1080 | return VectorType::get(PtrTy, EltCount); |
1081 | } |
1082 | for (Value *Index : IdxList) |
1083 | if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { |
1084 | ElementCount EltCount = IndexVTy->getElementCount(); |
1085 | return VectorType::get(PtrTy, EltCount); |
1086 | } |
1087 | // Scalar GEP |
1088 | return PtrTy; |
1089 | } |
1090 | |
1091 | unsigned getNumIndices() const { // Note: always non-negative |
1092 | return getNumOperands() - 1; |
1093 | } |
1094 | |
1095 | bool hasIndices() const { |
1096 | return getNumOperands() > 1; |
1097 | } |
1098 | |
1099 | /// Return true if all of the indices of this GEP are |
1100 | /// zeros. If so, the result pointer and the first operand have the same |
1101 | /// value, just potentially different types. |
1102 | bool hasAllZeroIndices() const; |
1103 | |
1104 | /// Return true if all of the indices of this GEP are |
1105 | /// constant integers. If so, the result pointer and the first operand have |
1106 | /// a constant offset between them. |
1107 | bool hasAllConstantIndices() const; |
1108 | |
1109 | /// Set or clear the inbounds flag on this GEP instruction. |
1110 | /// See LangRef.html for the meaning of inbounds on a getelementptr. |
1111 | void setIsInBounds(bool b = true); |
1112 | |
1113 | /// Determine whether the GEP has the inbounds flag. |
1114 | bool isInBounds() const; |
1115 | |
1116 | /// Accumulate the constant address offset of this GEP if possible. |
1117 | /// |
1118 | /// This routine accepts an APInt into which it will accumulate the constant |
1119 | /// offset of this GEP if the GEP is in fact constant. If the GEP is not |
1120 | /// all-constant, it returns false and the value of the offset APInt is |
1121 | /// undefined (it is *not* preserved!). The APInt passed into this routine |
1122 | /// must be at least as wide as the IntPtr type for the address space of |
1123 | /// the base GEP pointer. |
1124 | bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; |
1125 | |
1126 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1127 | static bool classof(const Instruction *I) { |
1128 | return (I->getOpcode() == Instruction::GetElementPtr); |
1129 | } |
1130 | static bool classof(const Value *V) { |
1131 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1132 | } |
1133 | }; |
1134 | |
1135 | template <> |
1136 | struct OperandTraits<GetElementPtrInst> : |
1137 | public VariadicOperandTraits<GetElementPtrInst, 1> { |
1138 | }; |
1139 | |
1140 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1141 | ArrayRef<Value *> IdxList, unsigned Values, |
1142 | const Twine &NameStr, |
1143 | Instruction *InsertBefore) |
1144 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1145 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1146 | Values, InsertBefore), |
1147 | SourceElementType(PointeeType), |
1148 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1149 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1150, __PRETTY_FUNCTION__)) |
1150 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1150, __PRETTY_FUNCTION__)); |
1151 | init(Ptr, IdxList, NameStr); |
1152 | } |
1153 | |
1154 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, |
1155 | ArrayRef<Value *> IdxList, unsigned Values, |
1156 | const Twine &NameStr, |
1157 | BasicBlock *InsertAtEnd) |
1158 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, |
1159 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, |
1160 | Values, InsertAtEnd), |
1161 | SourceElementType(PointeeType), |
1162 | ResultElementType(getIndexedType(PointeeType, IdxList)) { |
1163 | assert(ResultElementType ==((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1164, __PRETTY_FUNCTION__)) |
1164 | cast<PointerType>(getType()->getScalarType())->getElementType())((ResultElementType == cast<PointerType>(getType()-> getScalarType())->getElementType()) ? static_cast<void> (0) : __assert_fail ("ResultElementType == cast<PointerType>(getType()->getScalarType())->getElementType()" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1164, __PRETTY_FUNCTION__)); |
1165 | init(Ptr, IdxList, NameStr); |
1166 | } |
1167 | |
1168 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() { return OperandTraits<GetElementPtrInst>::op_begin(this ); } GetElementPtrInst::const_op_iterator GetElementPtrInst:: op_begin() const { return OperandTraits<GetElementPtrInst> ::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst ::op_iterator GetElementPtrInst::op_end() { return OperandTraits <GetElementPtrInst>::op_end(this); } GetElementPtrInst:: const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits <GetElementPtrInst>::op_end(const_cast<GetElementPtrInst *>(this)); } Value *GetElementPtrInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<GetElementPtrInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1168, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<GetElementPtrInst>::op_begin(const_cast <GetElementPtrInst*>(this))[i_nocapture].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<GetElementPtrInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1168, __PRETTY_FUNCTION__)); OperandTraits<GetElementPtrInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands() const { return OperandTraits <GetElementPtrInst>::operands(this); } template <int Idx_nocapture> Use &GetElementPtrInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &GetElementPtrInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
1169 | |
1170 | //===----------------------------------------------------------------------===// |
1171 | // ICmpInst Class |
1172 | //===----------------------------------------------------------------------===// |
1173 | |
1174 | /// This instruction compares its operands according to the predicate given |
1175 | /// to the constructor. It only operates on integers or pointers. The operands |
1176 | /// must be identical types. |
1177 | /// Represent an integer comparison operator. |
1178 | class ICmpInst: public CmpInst { |
1179 | void AssertOK() { |
1180 | assert(isIntPredicate() &&((isIntPredicate() && "Invalid ICmp predicate value") ? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1181, __PRETTY_FUNCTION__)) |
1181 | "Invalid ICmp predicate value")((isIntPredicate() && "Invalid ICmp predicate value") ? static_cast<void> (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1181, __PRETTY_FUNCTION__)); |
1182 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1183, __PRETTY_FUNCTION__)) |
1183 | "Both operands to ICmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1183, __PRETTY_FUNCTION__)); |
1184 | // Check that the operands are the right type |
1185 | assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1187, __PRETTY_FUNCTION__)) |
1186 | getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1187, __PRETTY_FUNCTION__)) |
1187 | "Invalid operand types for ICmp instruction")(((getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand (0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction" ) ? static_cast<void> (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1187, __PRETTY_FUNCTION__)); |
1188 | } |
1189 | |
1190 | protected: |
1191 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1192 | friend class Instruction; |
1193 | |
1194 | /// Clone an identical ICmpInst |
1195 | ICmpInst *cloneImpl() const; |
1196 | |
1197 | public: |
1198 | /// Constructor with insert-before-instruction semantics. |
1199 | ICmpInst( |
1200 | Instruction *InsertBefore, ///< Where to insert |
1201 | Predicate pred, ///< The predicate to use for the comparison |
1202 | Value *LHS, ///< The left-hand-side of the expression |
1203 | Value *RHS, ///< The right-hand-side of the expression |
1204 | const Twine &NameStr = "" ///< Name of the instruction |
1205 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1206 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1207 | InsertBefore) { |
1208 | #ifndef NDEBUG |
1209 | AssertOK(); |
1210 | #endif |
1211 | } |
1212 | |
1213 | /// Constructor with insert-at-end semantics. |
1214 | ICmpInst( |
1215 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1216 | Predicate pred, ///< The predicate to use for the comparison |
1217 | Value *LHS, ///< The left-hand-side of the expression |
1218 | Value *RHS, ///< The right-hand-side of the expression |
1219 | const Twine &NameStr = "" ///< Name of the instruction |
1220 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1221 | Instruction::ICmp, pred, LHS, RHS, NameStr, |
1222 | &InsertAtEnd) { |
1223 | #ifndef NDEBUG |
1224 | AssertOK(); |
1225 | #endif |
1226 | } |
1227 | |
1228 | /// Constructor with no-insertion semantics |
1229 | ICmpInst( |
1230 | Predicate pred, ///< The predicate to use for the comparison |
1231 | Value *LHS, ///< The left-hand-side of the expression |
1232 | Value *RHS, ///< The right-hand-side of the expression |
1233 | const Twine &NameStr = "" ///< Name of the instruction |
1234 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1235 | Instruction::ICmp, pred, LHS, RHS, NameStr) { |
1236 | #ifndef NDEBUG |
1237 | AssertOK(); |
1238 | #endif |
1239 | } |
1240 | |
1241 | /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. |
1242 | /// @returns the predicate that would be the result if the operand were |
1243 | /// regarded as signed. |
1244 | /// Return the signed version of the predicate |
1245 | Predicate getSignedPredicate() const { |
1246 | return getSignedPredicate(getPredicate()); |
1247 | } |
1248 | |
1249 | /// This is a static version that you can use without an instruction. |
1250 | /// Return the signed version of the predicate. |
1251 | static Predicate getSignedPredicate(Predicate pred); |
1252 | |
1253 | /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. |
1254 | /// @returns the predicate that would be the result if the operand were |
1255 | /// regarded as unsigned. |
1256 | /// Return the unsigned version of the predicate |
1257 | Predicate getUnsignedPredicate() const { |
1258 | return getUnsignedPredicate(getPredicate()); |
1259 | } |
1260 | |
1261 | /// This is a static version that you can use without an instruction. |
1262 | /// Return the unsigned version of the predicate. |
1263 | static Predicate getUnsignedPredicate(Predicate pred); |
1264 | |
1265 | /// Return true if this predicate is either EQ or NE. This also |
1266 | /// tests for commutativity. |
1267 | static bool isEquality(Predicate P) { |
1268 | return P == ICMP_EQ || P == ICMP_NE; |
1269 | } |
1270 | |
1271 | /// Return true if this predicate is either EQ or NE. This also |
1272 | /// tests for commutativity. |
1273 | bool isEquality() const { |
1274 | return isEquality(getPredicate()); |
1275 | } |
1276 | |
1277 | /// @returns true if the predicate of this ICmpInst is commutative |
1278 | /// Determine if this relation is commutative. |
1279 | bool isCommutative() const { return isEquality(); } |
1280 | |
1281 | /// Return true if the predicate is relational (not EQ or NE). |
1282 | /// |
1283 | bool isRelational() const { |
1284 | return !isEquality(); |
1285 | } |
1286 | |
1287 | /// Return true if the predicate is relational (not EQ or NE). |
1288 | /// |
1289 | static bool isRelational(Predicate P) { |
1290 | return !isEquality(P); |
1291 | } |
1292 | |
1293 | /// Return true if the predicate is SGT or UGT. |
1294 | /// |
1295 | static bool isGT(Predicate P) { |
1296 | return P == ICMP_SGT || P == ICMP_UGT; |
1297 | } |
1298 | |
1299 | /// Return true if the predicate is SLT or ULT. |
1300 | /// |
1301 | static bool isLT(Predicate P) { |
1302 | return P == ICMP_SLT || P == ICMP_ULT; |
1303 | } |
1304 | |
1305 | /// Return true if the predicate is SGE or UGE. |
1306 | /// |
1307 | static bool isGE(Predicate P) { |
1308 | return P == ICMP_SGE || P == ICMP_UGE; |
1309 | } |
1310 | |
1311 | /// Return true if the predicate is SLE or ULE. |
1312 | /// |
1313 | static bool isLE(Predicate P) { |
1314 | return P == ICMP_SLE || P == ICMP_ULE; |
1315 | } |
1316 | |
1317 | /// Exchange the two operands to this instruction in such a way that it does |
1318 | /// not modify the semantics of the instruction. The predicate value may be |
1319 | /// changed to retain the same result if the predicate is order dependent |
1320 | /// (e.g. ult). |
1321 | /// Swap operands and adjust predicate. |
1322 | void swapOperands() { |
1323 | setPredicate(getSwappedPredicate()); |
1324 | Op<0>().swap(Op<1>()); |
1325 | } |
1326 | |
1327 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1328 | static bool classof(const Instruction *I) { |
1329 | return I->getOpcode() == Instruction::ICmp; |
1330 | } |
1331 | static bool classof(const Value *V) { |
1332 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1333 | } |
1334 | }; |
1335 | |
1336 | //===----------------------------------------------------------------------===// |
1337 | // FCmpInst Class |
1338 | //===----------------------------------------------------------------------===// |
1339 | |
1340 | /// This instruction compares its operands according to the predicate given |
1341 | /// to the constructor. It only operates on floating point values or packed |
1342 | /// vectors of floating point values. The operands must be identical types. |
1343 | /// Represents a floating point comparison operator. |
1344 | class FCmpInst: public CmpInst { |
1345 | void AssertOK() { |
1346 | assert(isFPPredicate() && "Invalid FCmp predicate value")((isFPPredicate() && "Invalid FCmp predicate value") ? static_cast<void> (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1346, __PRETTY_FUNCTION__)); |
1347 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1348, __PRETTY_FUNCTION__)) |
1348 | "Both operands to FCmp instruction are not of the same type!")((getOperand(0)->getType() == getOperand(1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? static_cast<void> (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1348, __PRETTY_FUNCTION__)); |
1349 | // Check that the operands are the right type |
1350 | assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&((getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction") ? static_cast< void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1351, __PRETTY_FUNCTION__)) |
1351 | "Invalid operand types for FCmp instruction")((getOperand(0)->getType()->isFPOrFPVectorTy() && "Invalid operand types for FCmp instruction") ? static_cast< void> (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1351, __PRETTY_FUNCTION__)); |
1352 | } |
1353 | |
1354 | protected: |
1355 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1356 | friend class Instruction; |
1357 | |
1358 | /// Clone an identical FCmpInst |
1359 | FCmpInst *cloneImpl() const; |
1360 | |
1361 | public: |
1362 | /// Constructor with insert-before-instruction semantics. |
1363 | FCmpInst( |
1364 | Instruction *InsertBefore, ///< Where to insert |
1365 | Predicate pred, ///< The predicate to use for the comparison |
1366 | Value *LHS, ///< The left-hand-side of the expression |
1367 | Value *RHS, ///< The right-hand-side of the expression |
1368 | const Twine &NameStr = "" ///< Name of the instruction |
1369 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1370 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1371 | InsertBefore) { |
1372 | AssertOK(); |
1373 | } |
1374 | |
1375 | /// Constructor with insert-at-end semantics. |
1376 | FCmpInst( |
1377 | BasicBlock &InsertAtEnd, ///< Block to insert into. |
1378 | Predicate pred, ///< The predicate to use for the comparison |
1379 | Value *LHS, ///< The left-hand-side of the expression |
1380 | Value *RHS, ///< The right-hand-side of the expression |
1381 | const Twine &NameStr = "" ///< Name of the instruction |
1382 | ) : CmpInst(makeCmpResultType(LHS->getType()), |
1383 | Instruction::FCmp, pred, LHS, RHS, NameStr, |
1384 | &InsertAtEnd) { |
1385 | AssertOK(); |
1386 | } |
1387 | |
1388 | /// Constructor with no-insertion semantics |
1389 | FCmpInst( |
1390 | Predicate Pred, ///< The predicate to use for the comparison |
1391 | Value *LHS, ///< The left-hand-side of the expression |
1392 | Value *RHS, ///< The right-hand-side of the expression |
1393 | const Twine &NameStr = "", ///< Name of the instruction |
1394 | Instruction *FlagsSource = nullptr |
1395 | ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, |
1396 | RHS, NameStr, nullptr, FlagsSource) { |
1397 | AssertOK(); |
1398 | } |
1399 | |
1400 | /// @returns true if the predicate of this instruction is EQ or NE. |
1401 | /// Determine if this is an equality predicate. |
1402 | static bool isEquality(Predicate Pred) { |
1403 | return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || |
1404 | Pred == FCMP_UNE; |
1405 | } |
1406 | |
1407 | /// @returns true if the predicate of this instruction is EQ or NE. |
1408 | /// Determine if this is an equality predicate. |
1409 | bool isEquality() const { return isEquality(getPredicate()); } |
1410 | |
1411 | /// @returns true if the predicate of this instruction is commutative. |
1412 | /// Determine if this is a commutative predicate. |
1413 | bool isCommutative() const { |
1414 | return isEquality() || |
1415 | getPredicate() == FCMP_FALSE || |
1416 | getPredicate() == FCMP_TRUE || |
1417 | getPredicate() == FCMP_ORD || |
1418 | getPredicate() == FCMP_UNO; |
1419 | } |
1420 | |
1421 | /// @returns true if the predicate is relational (not EQ or NE). |
1422 | /// Determine if this a relational predicate. |
1423 | bool isRelational() const { return !isEquality(); } |
1424 | |
1425 | /// Exchange the two operands to this instruction in such a way that it does |
1426 | /// not modify the semantics of the instruction. The predicate value may be |
1427 | /// changed to retain the same result if the predicate is order dependent |
1428 | /// (e.g. ult). |
1429 | /// Swap operands and adjust predicate. |
1430 | void swapOperands() { |
1431 | setPredicate(getSwappedPredicate()); |
1432 | Op<0>().swap(Op<1>()); |
1433 | } |
1434 | |
1435 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
1436 | static bool classof(const Instruction *I) { |
1437 | return I->getOpcode() == Instruction::FCmp; |
1438 | } |
1439 | static bool classof(const Value *V) { |
1440 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1441 | } |
1442 | }; |
1443 | |
1444 | //===----------------------------------------------------------------------===// |
1445 | /// This class represents a function call, abstracting a target |
1446 | /// machine's calling convention. This class uses low bit of the SubClassData |
1447 | /// field to indicate whether or not this is a tail call. The rest of the bits |
1448 | /// hold the calling convention of the call. |
1449 | /// |
1450 | class CallInst : public CallBase { |
1451 | CallInst(const CallInst &CI); |
1452 | |
1453 | /// Construct a CallInst given a range of arguments. |
1454 | /// Construct a CallInst from a range of arguments |
1455 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1456 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1457 | Instruction *InsertBefore); |
1458 | |
1459 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1460 | const Twine &NameStr, Instruction *InsertBefore) |
1461 | : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} |
1462 | |
1463 | /// Construct a CallInst given a range of arguments. |
1464 | /// Construct a CallInst from a range of arguments |
1465 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1466 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1467 | BasicBlock *InsertAtEnd); |
1468 | |
1469 | explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, |
1470 | Instruction *InsertBefore); |
1471 | |
1472 | CallInst(FunctionType *ty, Value *F, const Twine &NameStr, |
1473 | BasicBlock *InsertAtEnd); |
1474 | |
1475 | void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, |
1476 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
1477 | void init(FunctionType *FTy, Value *Func, const Twine &NameStr); |
1478 | |
1479 | /// Compute the number of operands to allocate. |
1480 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
1481 | // We need one operand for the called function, plus the input operand |
1482 | // counts provided. |
1483 | return 1 + NumArgs + NumBundleInputs; |
1484 | } |
1485 | |
1486 | protected: |
1487 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1488 | friend class Instruction; |
1489 | |
1490 | CallInst *cloneImpl() const; |
1491 | |
1492 | public: |
1493 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", |
1494 | Instruction *InsertBefore = nullptr) { |
1495 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); |
1496 | } |
1497 | |
1498 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1499 | const Twine &NameStr, |
1500 | Instruction *InsertBefore = nullptr) { |
1501 | return new (ComputeNumOperands(Args.size())) |
1502 | CallInst(Ty, Func, Args, None, NameStr, InsertBefore); |
1503 | } |
1504 | |
1505 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1506 | ArrayRef<OperandBundleDef> Bundles = None, |
1507 | const Twine &NameStr = "", |
1508 | Instruction *InsertBefore = nullptr) { |
1509 | const int NumOperands = |
1510 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1511 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1512 | |
1513 | return new (NumOperands, DescriptorBytes) |
1514 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); |
1515 | } |
1516 | |
1517 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, |
1518 | BasicBlock *InsertAtEnd) { |
1519 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); |
1520 | } |
1521 | |
1522 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1523 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1524 | return new (ComputeNumOperands(Args.size())) |
1525 | CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); |
1526 | } |
1527 | |
1528 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1529 | ArrayRef<OperandBundleDef> Bundles, |
1530 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1531 | const int NumOperands = |
1532 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
1533 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
1534 | |
1535 | return new (NumOperands, DescriptorBytes) |
1536 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); |
1537 | } |
1538 | |
1539 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", |
1540 | Instruction *InsertBefore = nullptr) { |
1541 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1542 | InsertBefore); |
1543 | } |
1544 | |
1545 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1546 | ArrayRef<OperandBundleDef> Bundles = None, |
1547 | const Twine &NameStr = "", |
1548 | Instruction *InsertBefore = nullptr) { |
1549 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1550 | NameStr, InsertBefore); |
1551 | } |
1552 | |
1553 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1554 | const Twine &NameStr, |
1555 | Instruction *InsertBefore = nullptr) { |
1556 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1557 | InsertBefore); |
1558 | } |
1559 | |
1560 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr, |
1561 | BasicBlock *InsertAtEnd) { |
1562 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, |
1563 | InsertAtEnd); |
1564 | } |
1565 | |
1566 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1567 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1568 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, |
1569 | InsertAtEnd); |
1570 | } |
1571 | |
1572 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, |
1573 | ArrayRef<OperandBundleDef> Bundles, |
1574 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
1575 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, |
1576 | NameStr, InsertAtEnd); |
1577 | } |
1578 | |
1579 | /// Create a clone of \p CI with a different set of operand bundles and |
1580 | /// insert it before \p InsertPt. |
1581 | /// |
1582 | /// The returned call instruction is identical \p CI in every way except that |
1583 | /// the operand bundles for the new instruction are set to the operand bundles |
1584 | /// in \p Bundles. |
1585 | static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, |
1586 | Instruction *InsertPt = nullptr); |
1587 | |
1588 | /// Create a clone of \p CI with a different set of operand bundles and |
1589 | /// insert it before \p InsertPt. |
1590 | /// |
1591 | /// The returned call instruction is identical \p CI in every way except that |
1592 | /// the operand bundle for the new instruction is set to the operand bundle |
1593 | /// in \p Bundle. |
1594 | static CallInst *CreateWithReplacedBundle(CallInst *CI, |
1595 | OperandBundleDef Bundle, |
1596 | Instruction *InsertPt = nullptr); |
1597 | |
1598 | /// Generate the IR for a call to malloc: |
1599 | /// 1. Compute the malloc call's argument as the specified type's size, |
1600 | /// possibly multiplied by the array size if the array size is not |
1601 | /// constant 1. |
1602 | /// 2. Call malloc with that argument. |
1603 | /// 3. Bitcast the result of the malloc call to the specified type. |
1604 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1605 | Type *AllocTy, Value *AllocSize, |
1606 | Value *ArraySize = nullptr, |
1607 | Function *MallocF = nullptr, |
1608 | const Twine &Name = ""); |
1609 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1610 | Type *AllocTy, Value *AllocSize, |
1611 | Value *ArraySize = nullptr, |
1612 | Function *MallocF = nullptr, |
1613 | const Twine &Name = ""); |
1614 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, |
1615 | Type *AllocTy, Value *AllocSize, |
1616 | Value *ArraySize = nullptr, |
1617 | ArrayRef<OperandBundleDef> Bundles = None, |
1618 | Function *MallocF = nullptr, |
1619 | const Twine &Name = ""); |
1620 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, |
1621 | Type *AllocTy, Value *AllocSize, |
1622 | Value *ArraySize = nullptr, |
1623 | ArrayRef<OperandBundleDef> Bundles = None, |
1624 | Function *MallocF = nullptr, |
1625 | const Twine &Name = ""); |
1626 | /// Generate the IR for a call to the builtin free function. |
1627 | static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); |
1628 | static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); |
1629 | static Instruction *CreateFree(Value *Source, |
1630 | ArrayRef<OperandBundleDef> Bundles, |
1631 | Instruction *InsertBefore); |
1632 | static Instruction *CreateFree(Value *Source, |
1633 | ArrayRef<OperandBundleDef> Bundles, |
1634 | BasicBlock *InsertAtEnd); |
1635 | |
1636 | // Note that 'musttail' implies 'tail'. |
1637 | enum TailCallKind : unsigned { |
1638 | TCK_None = 0, |
1639 | TCK_Tail = 1, |
1640 | TCK_MustTail = 2, |
1641 | TCK_NoTail = 3, |
1642 | TCK_LAST = TCK_NoTail |
1643 | }; |
1644 | |
1645 | using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; |
1646 | static_assert( |
1647 | Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), |
1648 | "Bitfields must be contiguous"); |
1649 | |
1650 | TailCallKind getTailCallKind() const { |
1651 | return getSubclassData<TailCallKindField>(); |
1652 | } |
1653 | |
1654 | bool isTailCall() const { |
1655 | TailCallKind Kind = getTailCallKind(); |
1656 | return Kind == TCK_Tail || Kind == TCK_MustTail; |
1657 | } |
1658 | |
1659 | bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } |
1660 | |
1661 | bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } |
1662 | |
1663 | void setTailCallKind(TailCallKind TCK) { |
1664 | setSubclassData<TailCallKindField>(TCK); |
1665 | } |
1666 | |
1667 | void setTailCall(bool IsTc = true) { |
1668 | setTailCallKind(IsTc ? TCK_Tail : TCK_None); |
1669 | } |
1670 | |
1671 | /// Return true if the call can return twice |
1672 | bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } |
1673 | void setCanReturnTwice() { |
1674 | addAttribute(AttributeList::FunctionIndex, Attribute::ReturnsTwice); |
1675 | } |
1676 | |
1677 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1678 | static bool classof(const Instruction *I) { |
1679 | return I->getOpcode() == Instruction::Call; |
1680 | } |
1681 | static bool classof(const Value *V) { |
1682 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1683 | } |
1684 | |
1685 | /// Updates profile metadata by scaling it by \p S / \p T. |
1686 | void updateProfWeight(uint64_t S, uint64_t T); |
1687 | |
1688 | private: |
1689 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
1690 | // method so that subclasses cannot accidentally use it. |
1691 | template <typename Bitfield> |
1692 | void setSubclassData(typename Bitfield::Type Value) { |
1693 | Instruction::setSubclassData<Bitfield>(Value); |
1694 | } |
1695 | }; |
1696 | |
1697 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1698 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1699 | BasicBlock *InsertAtEnd) |
1700 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1701 | OperandTraits<CallBase>::op_end(this) - |
1702 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1703 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1704 | InsertAtEnd) { |
1705 | init(Ty, Func, Args, Bundles, NameStr); |
1706 | } |
1707 | |
1708 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, |
1709 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, |
1710 | Instruction *InsertBefore) |
1711 | : CallBase(Ty->getReturnType(), Instruction::Call, |
1712 | OperandTraits<CallBase>::op_end(this) - |
1713 | (Args.size() + CountBundleInputs(Bundles) + 1), |
1714 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), |
1715 | InsertBefore) { |
1716 | init(Ty, Func, Args, Bundles, NameStr); |
1717 | } |
1718 | |
1719 | //===----------------------------------------------------------------------===// |
1720 | // SelectInst Class |
1721 | //===----------------------------------------------------------------------===// |
1722 | |
1723 | /// This class represents the LLVM 'select' instruction. |
1724 | /// |
1725 | class SelectInst : public Instruction { |
1726 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1727 | Instruction *InsertBefore) |
1728 | : Instruction(S1->getType(), Instruction::Select, |
1729 | &Op<0>(), 3, InsertBefore) { |
1730 | init(C, S1, S2); |
1731 | setName(NameStr); |
1732 | } |
1733 | |
1734 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, |
1735 | BasicBlock *InsertAtEnd) |
1736 | : Instruction(S1->getType(), Instruction::Select, |
1737 | &Op<0>(), 3, InsertAtEnd) { |
1738 | init(C, S1, S2); |
1739 | setName(NameStr); |
1740 | } |
1741 | |
1742 | void init(Value *C, Value *S1, Value *S2) { |
1743 | assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")((!areInvalidOperands(C, S1, S2) && "Invalid operands for select" ) ? static_cast<void> (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1743, __PRETTY_FUNCTION__)); |
1744 | Op<0>() = C; |
1745 | Op<1>() = S1; |
1746 | Op<2>() = S2; |
1747 | } |
1748 | |
1749 | protected: |
1750 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1751 | friend class Instruction; |
1752 | |
1753 | SelectInst *cloneImpl() const; |
1754 | |
1755 | public: |
1756 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1757 | const Twine &NameStr = "", |
1758 | Instruction *InsertBefore = nullptr, |
1759 | Instruction *MDFrom = nullptr) { |
1760 | SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); |
1761 | if (MDFrom) |
1762 | Sel->copyMetadata(*MDFrom); |
1763 | return Sel; |
1764 | } |
1765 | |
1766 | static SelectInst *Create(Value *C, Value *S1, Value *S2, |
1767 | const Twine &NameStr, |
1768 | BasicBlock *InsertAtEnd) { |
1769 | return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); |
1770 | } |
1771 | |
1772 | const Value *getCondition() const { return Op<0>(); } |
1773 | const Value *getTrueValue() const { return Op<1>(); } |
1774 | const Value *getFalseValue() const { return Op<2>(); } |
1775 | Value *getCondition() { return Op<0>(); } |
1776 | Value *getTrueValue() { return Op<1>(); } |
1777 | Value *getFalseValue() { return Op<2>(); } |
1778 | |
1779 | void setCondition(Value *V) { Op<0>() = V; } |
1780 | void setTrueValue(Value *V) { Op<1>() = V; } |
1781 | void setFalseValue(Value *V) { Op<2>() = V; } |
1782 | |
1783 | /// Swap the true and false values of the select instruction. |
1784 | /// This doesn't swap prof metadata. |
1785 | void swapValues() { Op<1>().swap(Op<2>()); } |
1786 | |
1787 | /// Return a string if the specified operands are invalid |
1788 | /// for a select operation, otherwise return null. |
1789 | static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); |
1790 | |
1791 | /// Transparently provide more efficient getOperand methods. |
1792 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1793 | |
1794 | OtherOps getOpcode() const { |
1795 | return static_cast<OtherOps>(Instruction::getOpcode()); |
1796 | } |
1797 | |
1798 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1799 | static bool classof(const Instruction *I) { |
1800 | return I->getOpcode() == Instruction::Select; |
1801 | } |
1802 | static bool classof(const Value *V) { |
1803 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1804 | } |
1805 | }; |
1806 | |
1807 | template <> |
1808 | struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { |
1809 | }; |
1810 | |
1811 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits <SelectInst>::op_begin(this); } SelectInst::const_op_iterator SelectInst::op_begin() const { return OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this)); } SelectInst ::op_iterator SelectInst::op_end() { return OperandTraits< SelectInst>::op_end(this); } SelectInst::const_op_iterator SelectInst::op_end() const { return OperandTraits<SelectInst >::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<SelectInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1811, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<SelectInst>::op_begin(const_cast<SelectInst *>(this))[i_nocapture].get()); } void SelectInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1811, __PRETTY_FUNCTION__)); OperandTraits<SelectInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SelectInst ::getNumOperands() const { return OperandTraits<SelectInst >::operands(this); } template <int Idx_nocapture> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & SelectInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
1812 | |
1813 | //===----------------------------------------------------------------------===// |
1814 | // VAArgInst Class |
1815 | //===----------------------------------------------------------------------===// |
1816 | |
1817 | /// This class represents the va_arg llvm instruction, which returns |
1818 | /// an argument of the specified type given a va_list and increments that list |
1819 | /// |
1820 | class VAArgInst : public UnaryInstruction { |
1821 | protected: |
1822 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1823 | friend class Instruction; |
1824 | |
1825 | VAArgInst *cloneImpl() const; |
1826 | |
1827 | public: |
1828 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", |
1829 | Instruction *InsertBefore = nullptr) |
1830 | : UnaryInstruction(Ty, VAArg, List, InsertBefore) { |
1831 | setName(NameStr); |
1832 | } |
1833 | |
1834 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr, |
1835 | BasicBlock *InsertAtEnd) |
1836 | : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { |
1837 | setName(NameStr); |
1838 | } |
1839 | |
1840 | Value *getPointerOperand() { return getOperand(0); } |
1841 | const Value *getPointerOperand() const { return getOperand(0); } |
1842 | static unsigned getPointerOperandIndex() { return 0U; } |
1843 | |
1844 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1845 | static bool classof(const Instruction *I) { |
1846 | return I->getOpcode() == VAArg; |
1847 | } |
1848 | static bool classof(const Value *V) { |
1849 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1850 | } |
1851 | }; |
1852 | |
1853 | //===----------------------------------------------------------------------===// |
1854 | // ExtractElementInst Class |
1855 | //===----------------------------------------------------------------------===// |
1856 | |
1857 | /// This instruction extracts a single (scalar) |
1858 | /// element from a VectorType value |
1859 | /// |
1860 | class ExtractElementInst : public Instruction { |
1861 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", |
1862 | Instruction *InsertBefore = nullptr); |
1863 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, |
1864 | BasicBlock *InsertAtEnd); |
1865 | |
1866 | protected: |
1867 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1868 | friend class Instruction; |
1869 | |
1870 | ExtractElementInst *cloneImpl() const; |
1871 | |
1872 | public: |
1873 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1874 | const Twine &NameStr = "", |
1875 | Instruction *InsertBefore = nullptr) { |
1876 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); |
1877 | } |
1878 | |
1879 | static ExtractElementInst *Create(Value *Vec, Value *Idx, |
1880 | const Twine &NameStr, |
1881 | BasicBlock *InsertAtEnd) { |
1882 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); |
1883 | } |
1884 | |
1885 | /// Return true if an extractelement instruction can be |
1886 | /// formed with the specified operands. |
1887 | static bool isValidOperands(const Value *Vec, const Value *Idx); |
1888 | |
1889 | Value *getVectorOperand() { return Op<0>(); } |
1890 | Value *getIndexOperand() { return Op<1>(); } |
1891 | const Value *getVectorOperand() const { return Op<0>(); } |
1892 | const Value *getIndexOperand() const { return Op<1>(); } |
1893 | |
1894 | VectorType *getVectorOperandType() const { |
1895 | return cast<VectorType>(getVectorOperand()->getType()); |
1896 | } |
1897 | |
1898 | /// Transparently provide more efficient getOperand methods. |
1899 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1900 | |
1901 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1902 | static bool classof(const Instruction *I) { |
1903 | return I->getOpcode() == Instruction::ExtractElement; |
1904 | } |
1905 | static bool classof(const Value *V) { |
1906 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1907 | } |
1908 | }; |
1909 | |
1910 | template <> |
1911 | struct OperandTraits<ExtractElementInst> : |
1912 | public FixedNumOperandTraits<ExtractElementInst, 2> { |
1913 | }; |
1914 | |
1915 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin( ) { return OperandTraits<ExtractElementInst>::op_begin( this); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_begin() const { return OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this)); } ExtractElementInst::op_iterator ExtractElementInst::op_end() { return OperandTraits<ExtractElementInst>::op_end(this ); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_end() const { return OperandTraits<ExtractElementInst >::op_end(const_cast<ExtractElementInst*>(this)); } Value *ExtractElementInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ExtractElementInst>:: operands(this) && "getOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1915, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ExtractElementInst>::op_begin(const_cast <ExtractElementInst*>(this))[i_nocapture].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture, Value * Val_nocapture) { ((i_nocapture < OperandTraits<ExtractElementInst >::operands(this) && "setOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1915, __PRETTY_FUNCTION__)); OperandTraits<ExtractElementInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands() const { return OperandTraits <ExtractElementInst>::operands(this); } template <int Idx_nocapture> Use &ExtractElementInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ExtractElementInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } |
1916 | |
1917 | //===----------------------------------------------------------------------===// |
1918 | // InsertElementInst Class |
1919 | //===----------------------------------------------------------------------===// |
1920 | |
1921 | /// This instruction inserts a single (scalar) |
1922 | /// element into a VectorType value |
1923 | /// |
1924 | class InsertElementInst : public Instruction { |
1925 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, |
1926 | const Twine &NameStr = "", |
1927 | Instruction *InsertBefore = nullptr); |
1928 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, |
1929 | BasicBlock *InsertAtEnd); |
1930 | |
1931 | protected: |
1932 | // Note: Instruction needs to be a friend here to call cloneImpl. |
1933 | friend class Instruction; |
1934 | |
1935 | InsertElementInst *cloneImpl() const; |
1936 | |
1937 | public: |
1938 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1939 | const Twine &NameStr = "", |
1940 | Instruction *InsertBefore = nullptr) { |
1941 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); |
1942 | } |
1943 | |
1944 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, |
1945 | const Twine &NameStr, |
1946 | BasicBlock *InsertAtEnd) { |
1947 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); |
1948 | } |
1949 | |
1950 | /// Return true if an insertelement instruction can be |
1951 | /// formed with the specified operands. |
1952 | static bool isValidOperands(const Value *Vec, const Value *NewElt, |
1953 | const Value *Idx); |
1954 | |
1955 | /// Overload to return most specific vector type. |
1956 | /// |
1957 | VectorType *getType() const { |
1958 | return cast<VectorType>(Instruction::getType()); |
1959 | } |
1960 | |
1961 | /// Transparently provide more efficient getOperand methods. |
1962 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
1963 | |
1964 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
1965 | static bool classof(const Instruction *I) { |
1966 | return I->getOpcode() == Instruction::InsertElement; |
1967 | } |
1968 | static bool classof(const Value *V) { |
1969 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
1970 | } |
1971 | }; |
1972 | |
1973 | template <> |
1974 | struct OperandTraits<InsertElementInst> : |
1975 | public FixedNumOperandTraits<InsertElementInst, 3> { |
1976 | }; |
1977 | |
1978 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() { return OperandTraits<InsertElementInst>::op_begin(this ); } InsertElementInst::const_op_iterator InsertElementInst:: op_begin() const { return OperandTraits<InsertElementInst> ::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst ::op_iterator InsertElementInst::op_end() { return OperandTraits <InsertElementInst>::op_end(this); } InsertElementInst:: const_op_iterator InsertElementInst::op_end() const { return OperandTraits <InsertElementInst>::op_end(const_cast<InsertElementInst *>(this)); } Value *InsertElementInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<InsertElementInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1978, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<InsertElementInst>::op_begin(const_cast <InsertElementInst*>(this))[i_nocapture].get()); } void InsertElementInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<InsertElementInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 1978, __PRETTY_FUNCTION__)); OperandTraits<InsertElementInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned InsertElementInst::getNumOperands() const { return OperandTraits <InsertElementInst>::operands(this); } template <int Idx_nocapture> Use &InsertElementInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &InsertElementInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
1979 | |
1980 | //===----------------------------------------------------------------------===// |
1981 | // ShuffleVectorInst Class |
1982 | //===----------------------------------------------------------------------===// |
1983 | |
1984 | constexpr int UndefMaskElem = -1; |
1985 | |
1986 | /// This instruction constructs a fixed permutation of two |
1987 | /// input vectors. |
1988 | /// |
1989 | /// For each element of the result vector, the shuffle mask selects an element |
1990 | /// from one of the input vectors to copy to the result. Non-negative elements |
1991 | /// in the mask represent an index into the concatenated pair of input vectors. |
1992 | /// UndefMaskElem (-1) specifies that the result element is undefined. |
1993 | /// |
1994 | /// For scalable vectors, all the elements of the mask must be 0 or -1. This |
1995 | /// requirement may be relaxed in the future. |
1996 | class ShuffleVectorInst : public Instruction { |
1997 | SmallVector<int, 4> ShuffleMask; |
1998 | Constant *ShuffleMaskForBitcode; |
1999 | |
2000 | protected: |
2001 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2002 | friend class Instruction; |
2003 | |
2004 | ShuffleVectorInst *cloneImpl() const; |
2005 | |
2006 | public: |
2007 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
2008 | const Twine &NameStr = "", |
2009 | Instruction *InsertBefor = nullptr); |
2010 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, |
2011 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2012 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, |
2013 | const Twine &NameStr = "", |
2014 | Instruction *InsertBefor = nullptr); |
2015 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, |
2016 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2017 | |
2018 | void *operator new(size_t s) { return User::operator new(s, 2); } |
2019 | |
2020 | /// Swap the operands and adjust the mask to preserve the semantics |
2021 | /// of the instruction. |
2022 | void commute(); |
2023 | |
2024 | /// Return true if a shufflevector instruction can be |
2025 | /// formed with the specified operands. |
2026 | static bool isValidOperands(const Value *V1, const Value *V2, |
2027 | const Value *Mask); |
2028 | static bool isValidOperands(const Value *V1, const Value *V2, |
2029 | ArrayRef<int> Mask); |
2030 | |
2031 | /// Overload to return most specific vector type. |
2032 | /// |
2033 | VectorType *getType() const { |
2034 | return cast<VectorType>(Instruction::getType()); |
2035 | } |
2036 | |
2037 | /// Transparently provide more efficient getOperand methods. |
2038 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2039 | |
2040 | /// Return the shuffle mask value of this instruction for the given element |
2041 | /// index. Return UndefMaskElem if the element is undef. |
2042 | int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } |
2043 | |
2044 | /// Convert the input shuffle mask operand to a vector of integers. Undefined |
2045 | /// elements of the mask are returned as UndefMaskElem. |
2046 | static void getShuffleMask(const Constant *Mask, |
2047 | SmallVectorImpl<int> &Result); |
2048 | |
2049 | /// Return the mask for this instruction as a vector of integers. Undefined |
2050 | /// elements of the mask are returned as UndefMaskElem. |
2051 | void getShuffleMask(SmallVectorImpl<int> &Result) const { |
2052 | Result.assign(ShuffleMask.begin(), ShuffleMask.end()); |
2053 | } |
2054 | |
2055 | /// Return the mask for this instruction, for use in bitcode. |
2056 | /// |
2057 | /// TODO: This is temporary until we decide a new bitcode encoding for |
2058 | /// shufflevector. |
2059 | Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } |
2060 | |
2061 | static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, |
2062 | Type *ResultTy); |
2063 | |
2064 | void setShuffleMask(ArrayRef<int> Mask); |
2065 | |
2066 | ArrayRef<int> getShuffleMask() const { return ShuffleMask; } |
2067 | |
2068 | /// Return true if this shuffle returns a vector with a different number of |
2069 | /// elements than its source vectors. |
2070 | /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> |
2071 | /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> |
2072 | bool changesLength() const { |
2073 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) |
2074 | ->getElementCount() |
2075 | .getKnownMinValue(); |
2076 | unsigned NumMaskElts = ShuffleMask.size(); |
2077 | return NumSourceElts != NumMaskElts; |
2078 | } |
2079 | |
2080 | /// Return true if this shuffle returns a vector with a greater number of |
2081 | /// elements than its source vectors. |
2082 | /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> |
2083 | bool increasesLength() const { |
2084 | unsigned NumSourceElts = |
2085 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); |
2086 | unsigned NumMaskElts = ShuffleMask.size(); |
2087 | return NumSourceElts < NumMaskElts; |
2088 | } |
2089 | |
2090 | /// Return true if this shuffle mask chooses elements from exactly one source |
2091 | /// vector. |
2092 | /// Example: <7,5,undef,7> |
2093 | /// This assumes that vector operands are the same length as the mask. |
2094 | static bool isSingleSourceMask(ArrayRef<int> Mask); |
2095 | static bool isSingleSourceMask(const Constant *Mask) { |
2096 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2096, __PRETTY_FUNCTION__)); |
2097 | SmallVector<int, 16> MaskAsInts; |
2098 | getShuffleMask(Mask, MaskAsInts); |
2099 | return isSingleSourceMask(MaskAsInts); |
2100 | } |
2101 | |
2102 | /// Return true if this shuffle chooses elements from exactly one source |
2103 | /// vector without changing the length of that vector. |
2104 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> |
2105 | /// TODO: Optionally allow length-changing shuffles. |
2106 | bool isSingleSource() const { |
2107 | return !changesLength() && isSingleSourceMask(ShuffleMask); |
2108 | } |
2109 | |
2110 | /// Return true if this shuffle mask chooses elements from exactly one source |
2111 | /// vector without lane crossings. A shuffle using this mask is not |
2112 | /// necessarily a no-op because it may change the number of elements from its |
2113 | /// input vectors or it may provide demanded bits knowledge via undef lanes. |
2114 | /// Example: <undef,undef,2,3> |
2115 | static bool isIdentityMask(ArrayRef<int> Mask); |
2116 | static bool isIdentityMask(const Constant *Mask) { |
2117 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2117, __PRETTY_FUNCTION__)); |
2118 | SmallVector<int, 16> MaskAsInts; |
2119 | getShuffleMask(Mask, MaskAsInts); |
2120 | return isIdentityMask(MaskAsInts); |
2121 | } |
2122 | |
2123 | /// Return true if this shuffle chooses elements from exactly one source |
2124 | /// vector without lane crossings and does not change the number of elements |
2125 | /// from its input vectors. |
2126 | /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> |
2127 | bool isIdentity() const { |
2128 | return !changesLength() && isIdentityMask(ShuffleMask); |
2129 | } |
2130 | |
2131 | /// Return true if this shuffle lengthens exactly one source vector with |
2132 | /// undefs in the high elements. |
2133 | bool isIdentityWithPadding() const; |
2134 | |
2135 | /// Return true if this shuffle extracts the first N elements of exactly one |
2136 | /// source vector. |
2137 | bool isIdentityWithExtract() const; |
2138 | |
2139 | /// Return true if this shuffle concatenates its 2 source vectors. This |
2140 | /// returns false if either input is undefined. In that case, the shuffle is |
2141 | /// is better classified as an identity with padding operation. |
2142 | bool isConcat() const; |
2143 | |
2144 | /// Return true if this shuffle mask chooses elements from its source vectors |
2145 | /// without lane crossings. A shuffle using this mask would be |
2146 | /// equivalent to a vector select with a constant condition operand. |
2147 | /// Example: <4,1,6,undef> |
2148 | /// This returns false if the mask does not choose from both input vectors. |
2149 | /// In that case, the shuffle is better classified as an identity shuffle. |
2150 | /// This assumes that vector operands are the same length as the mask |
2151 | /// (a length-changing shuffle can never be equivalent to a vector select). |
2152 | static bool isSelectMask(ArrayRef<int> Mask); |
2153 | static bool isSelectMask(const Constant *Mask) { |
2154 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2154, __PRETTY_FUNCTION__)); |
2155 | SmallVector<int, 16> MaskAsInts; |
2156 | getShuffleMask(Mask, MaskAsInts); |
2157 | return isSelectMask(MaskAsInts); |
2158 | } |
2159 | |
2160 | /// Return true if this shuffle chooses elements from its source vectors |
2161 | /// without lane crossings and all operands have the same number of elements. |
2162 | /// In other words, this shuffle is equivalent to a vector select with a |
2163 | /// constant condition operand. |
2164 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> |
2165 | /// This returns false if the mask does not choose from both input vectors. |
2166 | /// In that case, the shuffle is better classified as an identity shuffle. |
2167 | /// TODO: Optionally allow length-changing shuffles. |
2168 | bool isSelect() const { |
2169 | return !changesLength() && isSelectMask(ShuffleMask); |
2170 | } |
2171 | |
2172 | /// Return true if this shuffle mask swaps the order of elements from exactly |
2173 | /// one source vector. |
2174 | /// Example: <7,6,undef,4> |
2175 | /// This assumes that vector operands are the same length as the mask. |
2176 | static bool isReverseMask(ArrayRef<int> Mask); |
2177 | static bool isReverseMask(const Constant *Mask) { |
2178 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2178, __PRETTY_FUNCTION__)); |
2179 | SmallVector<int, 16> MaskAsInts; |
2180 | getShuffleMask(Mask, MaskAsInts); |
2181 | return isReverseMask(MaskAsInts); |
2182 | } |
2183 | |
2184 | /// Return true if this shuffle swaps the order of elements from exactly |
2185 | /// one source vector. |
2186 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> |
2187 | /// TODO: Optionally allow length-changing shuffles. |
2188 | bool isReverse() const { |
2189 | return !changesLength() && isReverseMask(ShuffleMask); |
2190 | } |
2191 | |
2192 | /// Return true if this shuffle mask chooses all elements with the same value |
2193 | /// as the first element of exactly one source vector. |
2194 | /// Example: <4,undef,undef,4> |
2195 | /// This assumes that vector operands are the same length as the mask. |
2196 | static bool isZeroEltSplatMask(ArrayRef<int> Mask); |
2197 | static bool isZeroEltSplatMask(const Constant *Mask) { |
2198 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2198, __PRETTY_FUNCTION__)); |
2199 | SmallVector<int, 16> MaskAsInts; |
2200 | getShuffleMask(Mask, MaskAsInts); |
2201 | return isZeroEltSplatMask(MaskAsInts); |
2202 | } |
2203 | |
2204 | /// Return true if all elements of this shuffle are the same value as the |
2205 | /// first element of exactly one source vector without changing the length |
2206 | /// of that vector. |
2207 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> |
2208 | /// TODO: Optionally allow length-changing shuffles. |
2209 | /// TODO: Optionally allow splats from other elements. |
2210 | bool isZeroEltSplat() const { |
2211 | return !changesLength() && isZeroEltSplatMask(ShuffleMask); |
2212 | } |
2213 | |
2214 | /// Return true if this shuffle mask is a transpose mask. |
2215 | /// Transpose vector masks transpose a 2xn matrix. They read corresponding |
2216 | /// even- or odd-numbered vector elements from two n-dimensional source |
2217 | /// vectors and write each result into consecutive elements of an |
2218 | /// n-dimensional destination vector. Two shuffles are necessary to complete |
2219 | /// the transpose, one for the even elements and another for the odd elements. |
2220 | /// This description closely follows how the TRN1 and TRN2 AArch64 |
2221 | /// instructions operate. |
2222 | /// |
2223 | /// For example, a simple 2x2 matrix can be transposed with: |
2224 | /// |
2225 | /// ; Original matrix |
2226 | /// m0 = < a, b > |
2227 | /// m1 = < c, d > |
2228 | /// |
2229 | /// ; Transposed matrix |
2230 | /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > |
2231 | /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > |
2232 | /// |
2233 | /// For matrices having greater than n columns, the resulting nx2 transposed |
2234 | /// matrix is stored in two result vectors such that one vector contains |
2235 | /// interleaved elements from all the even-numbered rows and the other vector |
2236 | /// contains interleaved elements from all the odd-numbered rows. For example, |
2237 | /// a 2x4 matrix can be transposed with: |
2238 | /// |
2239 | /// ; Original matrix |
2240 | /// m0 = < a, b, c, d > |
2241 | /// m1 = < e, f, g, h > |
2242 | /// |
2243 | /// ; Transposed matrix |
2244 | /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > |
2245 | /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > |
2246 | static bool isTransposeMask(ArrayRef<int> Mask); |
2247 | static bool isTransposeMask(const Constant *Mask) { |
2248 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2248, __PRETTY_FUNCTION__)); |
2249 | SmallVector<int, 16> MaskAsInts; |
2250 | getShuffleMask(Mask, MaskAsInts); |
2251 | return isTransposeMask(MaskAsInts); |
2252 | } |
2253 | |
2254 | /// Return true if this shuffle transposes the elements of its inputs without |
2255 | /// changing the length of the vectors. This operation may also be known as a |
2256 | /// merge or interleave. See the description for isTransposeMask() for the |
2257 | /// exact specification. |
2258 | /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> |
2259 | bool isTranspose() const { |
2260 | return !changesLength() && isTransposeMask(ShuffleMask); |
2261 | } |
2262 | |
2263 | /// Return true if this shuffle mask is an extract subvector mask. |
2264 | /// A valid extract subvector mask returns a smaller vector from a single |
2265 | /// source operand. The base extraction index is returned as well. |
2266 | static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, |
2267 | int &Index); |
2268 | static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, |
2269 | int &Index) { |
2270 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")((Mask->getType()->isVectorTy() && "Shuffle needs vector constant." ) ? static_cast<void> (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2270, __PRETTY_FUNCTION__)); |
2271 | SmallVector<int, 16> MaskAsInts; |
2272 | getShuffleMask(Mask, MaskAsInts); |
2273 | return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); |
2274 | } |
2275 | |
2276 | /// Return true if this shuffle mask is an extract subvector mask. |
2277 | bool isExtractSubvectorMask(int &Index) const { |
2278 | int NumSrcElts = |
2279 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); |
2280 | return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); |
2281 | } |
2282 | |
2283 | /// Change values in a shuffle permute mask assuming the two vector operands |
2284 | /// of length InVecNumElts have swapped position. |
2285 | static void commuteShuffleMask(MutableArrayRef<int> Mask, |
2286 | unsigned InVecNumElts) { |
2287 | for (int &Idx : Mask) { |
2288 | if (Idx == -1) |
2289 | continue; |
2290 | Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; |
2291 | assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&((Idx >= 0 && Idx < (int)InVecNumElts * 2 && "shufflevector mask index out of range") ? static_cast<void > (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2292, __PRETTY_FUNCTION__)) |
2292 | "shufflevector mask index out of range")((Idx >= 0 && Idx < (int)InVecNumElts * 2 && "shufflevector mask index out of range") ? static_cast<void > (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2292, __PRETTY_FUNCTION__)); |
2293 | } |
2294 | } |
2295 | |
2296 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2297 | static bool classof(const Instruction *I) { |
2298 | return I->getOpcode() == Instruction::ShuffleVector; |
2299 | } |
2300 | static bool classof(const Value *V) { |
2301 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2302 | } |
2303 | }; |
2304 | |
2305 | template <> |
2306 | struct OperandTraits<ShuffleVectorInst> |
2307 | : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; |
2308 | |
2309 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() { return OperandTraits<ShuffleVectorInst>::op_begin(this ); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst:: op_begin() const { return OperandTraits<ShuffleVectorInst> ::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst ::op_iterator ShuffleVectorInst::op_end() { return OperandTraits <ShuffleVectorInst>::op_end(this); } ShuffleVectorInst:: const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits <ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst *>(this)); } Value *ShuffleVectorInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ShuffleVectorInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2309, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ShuffleVectorInst>::op_begin(const_cast <ShuffleVectorInst*>(this))[i_nocapture].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<ShuffleVectorInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2309, __PRETTY_FUNCTION__)); OperandTraits<ShuffleVectorInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands() const { return OperandTraits <ShuffleVectorInst>::operands(this); } template <int Idx_nocapture> Use &ShuffleVectorInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &ShuffleVectorInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
2310 | |
2311 | //===----------------------------------------------------------------------===// |
2312 | // ExtractValueInst Class |
2313 | //===----------------------------------------------------------------------===// |
2314 | |
2315 | /// This instruction extracts a struct member or array |
2316 | /// element value from an aggregate value. |
2317 | /// |
2318 | class ExtractValueInst : public UnaryInstruction { |
2319 | SmallVector<unsigned, 4> Indices; |
2320 | |
2321 | ExtractValueInst(const ExtractValueInst &EVI); |
2322 | |
2323 | /// Constructors - Create a extractvalue instruction with a base aggregate |
2324 | /// value and a list of indices. The first ctor can optionally insert before |
2325 | /// an existing instruction, the second appends the new instruction to the |
2326 | /// specified BasicBlock. |
2327 | inline ExtractValueInst(Value *Agg, |
2328 | ArrayRef<unsigned> Idxs, |
2329 | const Twine &NameStr, |
2330 | Instruction *InsertBefore); |
2331 | inline ExtractValueInst(Value *Agg, |
2332 | ArrayRef<unsigned> Idxs, |
2333 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2334 | |
2335 | void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); |
2336 | |
2337 | protected: |
2338 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2339 | friend class Instruction; |
2340 | |
2341 | ExtractValueInst *cloneImpl() const; |
2342 | |
2343 | public: |
2344 | static ExtractValueInst *Create(Value *Agg, |
2345 | ArrayRef<unsigned> Idxs, |
2346 | const Twine &NameStr = "", |
2347 | Instruction *InsertBefore = nullptr) { |
2348 | return new |
2349 | ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); |
2350 | } |
2351 | |
2352 | static ExtractValueInst *Create(Value *Agg, |
2353 | ArrayRef<unsigned> Idxs, |
2354 | const Twine &NameStr, |
2355 | BasicBlock *InsertAtEnd) { |
2356 | return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); |
2357 | } |
2358 | |
2359 | /// Returns the type of the element that would be extracted |
2360 | /// with an extractvalue instruction with the specified parameters. |
2361 | /// |
2362 | /// Null is returned if the indices are invalid for the specified type. |
2363 | static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); |
2364 | |
2365 | using idx_iterator = const unsigned*; |
2366 | |
2367 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2368 | inline idx_iterator idx_end() const { return Indices.end(); } |
2369 | inline iterator_range<idx_iterator> indices() const { |
2370 | return make_range(idx_begin(), idx_end()); |
2371 | } |
2372 | |
2373 | Value *getAggregateOperand() { |
2374 | return getOperand(0); |
2375 | } |
2376 | const Value *getAggregateOperand() const { |
2377 | return getOperand(0); |
2378 | } |
2379 | static unsigned getAggregateOperandIndex() { |
2380 | return 0U; // get index for modifying correct operand |
2381 | } |
2382 | |
2383 | ArrayRef<unsigned> getIndices() const { |
2384 | return Indices; |
2385 | } |
2386 | |
2387 | unsigned getNumIndices() const { |
2388 | return (unsigned)Indices.size(); |
2389 | } |
2390 | |
2391 | bool hasIndices() const { |
2392 | return true; |
2393 | } |
2394 | |
2395 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2396 | static bool classof(const Instruction *I) { |
2397 | return I->getOpcode() == Instruction::ExtractValue; |
2398 | } |
2399 | static bool classof(const Value *V) { |
2400 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2401 | } |
2402 | }; |
2403 | |
2404 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2405 | ArrayRef<unsigned> Idxs, |
2406 | const Twine &NameStr, |
2407 | Instruction *InsertBefore) |
2408 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2409 | ExtractValue, Agg, InsertBefore) { |
2410 | init(Idxs, NameStr); |
2411 | } |
2412 | |
2413 | ExtractValueInst::ExtractValueInst(Value *Agg, |
2414 | ArrayRef<unsigned> Idxs, |
2415 | const Twine &NameStr, |
2416 | BasicBlock *InsertAtEnd) |
2417 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), |
2418 | ExtractValue, Agg, InsertAtEnd) { |
2419 | init(Idxs, NameStr); |
2420 | } |
2421 | |
2422 | //===----------------------------------------------------------------------===// |
2423 | // InsertValueInst Class |
2424 | //===----------------------------------------------------------------------===// |
2425 | |
2426 | /// This instruction inserts a struct field of array element |
2427 | /// value into an aggregate value. |
2428 | /// |
2429 | class InsertValueInst : public Instruction { |
2430 | SmallVector<unsigned, 4> Indices; |
2431 | |
2432 | InsertValueInst(const InsertValueInst &IVI); |
2433 | |
2434 | /// Constructors - Create a insertvalue instruction with a base aggregate |
2435 | /// value, a value to insert, and a list of indices. The first ctor can |
2436 | /// optionally insert before an existing instruction, the second appends |
2437 | /// the new instruction to the specified BasicBlock. |
2438 | inline InsertValueInst(Value *Agg, Value *Val, |
2439 | ArrayRef<unsigned> Idxs, |
2440 | const Twine &NameStr, |
2441 | Instruction *InsertBefore); |
2442 | inline InsertValueInst(Value *Agg, Value *Val, |
2443 | ArrayRef<unsigned> Idxs, |
2444 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2445 | |
2446 | /// Constructors - These two constructors are convenience methods because one |
2447 | /// and two index insertvalue instructions are so common. |
2448 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, |
2449 | const Twine &NameStr = "", |
2450 | Instruction *InsertBefore = nullptr); |
2451 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, |
2452 | BasicBlock *InsertAtEnd); |
2453 | |
2454 | void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, |
2455 | const Twine &NameStr); |
2456 | |
2457 | protected: |
2458 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2459 | friend class Instruction; |
2460 | |
2461 | InsertValueInst *cloneImpl() const; |
2462 | |
2463 | public: |
2464 | // allocate space for exactly two operands |
2465 | void *operator new(size_t s) { |
2466 | return User::operator new(s, 2); |
2467 | } |
2468 | |
2469 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2470 | ArrayRef<unsigned> Idxs, |
2471 | const Twine &NameStr = "", |
2472 | Instruction *InsertBefore = nullptr) { |
2473 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); |
2474 | } |
2475 | |
2476 | static InsertValueInst *Create(Value *Agg, Value *Val, |
2477 | ArrayRef<unsigned> Idxs, |
2478 | const Twine &NameStr, |
2479 | BasicBlock *InsertAtEnd) { |
2480 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); |
2481 | } |
2482 | |
2483 | /// Transparently provide more efficient getOperand methods. |
2484 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2485 | |
2486 | using idx_iterator = const unsigned*; |
2487 | |
2488 | inline idx_iterator idx_begin() const { return Indices.begin(); } |
2489 | inline idx_iterator idx_end() const { return Indices.end(); } |
2490 | inline iterator_range<idx_iterator> indices() const { |
2491 | return make_range(idx_begin(), idx_end()); |
2492 | } |
2493 | |
2494 | Value *getAggregateOperand() { |
2495 | return getOperand(0); |
2496 | } |
2497 | const Value *getAggregateOperand() const { |
2498 | return getOperand(0); |
2499 | } |
2500 | static unsigned getAggregateOperandIndex() { |
2501 | return 0U; // get index for modifying correct operand |
2502 | } |
2503 | |
2504 | Value *getInsertedValueOperand() { |
2505 | return getOperand(1); |
2506 | } |
2507 | const Value *getInsertedValueOperand() const { |
2508 | return getOperand(1); |
2509 | } |
2510 | static unsigned getInsertedValueOperandIndex() { |
2511 | return 1U; // get index for modifying correct operand |
2512 | } |
2513 | |
2514 | ArrayRef<unsigned> getIndices() const { |
2515 | return Indices; |
2516 | } |
2517 | |
2518 | unsigned getNumIndices() const { |
2519 | return (unsigned)Indices.size(); |
2520 | } |
2521 | |
2522 | bool hasIndices() const { |
2523 | return true; |
2524 | } |
2525 | |
2526 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2527 | static bool classof(const Instruction *I) { |
2528 | return I->getOpcode() == Instruction::InsertValue; |
2529 | } |
2530 | static bool classof(const Value *V) { |
2531 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2532 | } |
2533 | }; |
2534 | |
2535 | template <> |
2536 | struct OperandTraits<InsertValueInst> : |
2537 | public FixedNumOperandTraits<InsertValueInst, 2> { |
2538 | }; |
2539 | |
2540 | InsertValueInst::InsertValueInst(Value *Agg, |
2541 | Value *Val, |
2542 | ArrayRef<unsigned> Idxs, |
2543 | const Twine &NameStr, |
2544 | Instruction *InsertBefore) |
2545 | : Instruction(Agg->getType(), InsertValue, |
2546 | OperandTraits<InsertValueInst>::op_begin(this), |
2547 | 2, InsertBefore) { |
2548 | init(Agg, Val, Idxs, NameStr); |
2549 | } |
2550 | |
2551 | InsertValueInst::InsertValueInst(Value *Agg, |
2552 | Value *Val, |
2553 | ArrayRef<unsigned> Idxs, |
2554 | const Twine &NameStr, |
2555 | BasicBlock *InsertAtEnd) |
2556 | : Instruction(Agg->getType(), InsertValue, |
2557 | OperandTraits<InsertValueInst>::op_begin(this), |
2558 | 2, InsertAtEnd) { |
2559 | init(Agg, Val, Idxs, NameStr); |
2560 | } |
2561 | |
2562 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst ::const_op_iterator InsertValueInst::op_begin() const { return OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst ::op_end() { return OperandTraits<InsertValueInst>::op_end (this); } InsertValueInst::const_op_iterator InsertValueInst:: op_end() const { return OperandTraits<InsertValueInst>:: op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<InsertValueInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2562, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this))[i_nocapture].get()); } void InsertValueInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<InsertValueInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2562, __PRETTY_FUNCTION__)); OperandTraits<InsertValueInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned InsertValueInst::getNumOperands() const { return OperandTraits <InsertValueInst>::operands(this); } template <int Idx_nocapture > Use &InsertValueInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertValueInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
2563 | |
2564 | //===----------------------------------------------------------------------===// |
2565 | // PHINode Class |
2566 | //===----------------------------------------------------------------------===// |
2567 | |
2568 | // PHINode - The PHINode class is used to represent the magical mystical PHI |
2569 | // node, that can not exist in nature, but can be synthesized in a computer |
2570 | // scientist's overactive imagination. |
2571 | // |
2572 | class PHINode : public Instruction { |
2573 | /// The number of operands actually allocated. NumOperands is |
2574 | /// the number actually in use. |
2575 | unsigned ReservedSpace; |
2576 | |
2577 | PHINode(const PHINode &PN); |
2578 | |
2579 | explicit PHINode(Type *Ty, unsigned NumReservedValues, |
2580 | const Twine &NameStr = "", |
2581 | Instruction *InsertBefore = nullptr) |
2582 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), |
2583 | ReservedSpace(NumReservedValues) { |
2584 | setName(NameStr); |
2585 | allocHungoffUses(ReservedSpace); |
2586 | } |
2587 | |
2588 | PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, |
2589 | BasicBlock *InsertAtEnd) |
2590 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), |
2591 | ReservedSpace(NumReservedValues) { |
2592 | setName(NameStr); |
2593 | allocHungoffUses(ReservedSpace); |
2594 | } |
2595 | |
2596 | protected: |
2597 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2598 | friend class Instruction; |
2599 | |
2600 | PHINode *cloneImpl() const; |
2601 | |
2602 | // allocHungoffUses - this is more complicated than the generic |
2603 | // User::allocHungoffUses, because we have to allocate Uses for the incoming |
2604 | // values and pointers to the incoming blocks, all in one allocation. |
2605 | void allocHungoffUses(unsigned N) { |
2606 | User::allocHungoffUses(N, /* IsPhi */ true); |
2607 | } |
2608 | |
2609 | public: |
2610 | /// Constructors - NumReservedValues is a hint for the number of incoming |
2611 | /// edges that this phi node will have (use 0 if you really have no idea). |
2612 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2613 | const Twine &NameStr = "", |
2614 | Instruction *InsertBefore = nullptr) { |
2615 | return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); |
2616 | } |
2617 | |
2618 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, |
2619 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
2620 | return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); |
2621 | } |
2622 | |
2623 | /// Provide fast operand accessors |
2624 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2625 | |
2626 | // Block iterator interface. This provides access to the list of incoming |
2627 | // basic blocks, which parallels the list of incoming values. |
2628 | |
2629 | using block_iterator = BasicBlock **; |
2630 | using const_block_iterator = BasicBlock * const *; |
2631 | |
2632 | block_iterator block_begin() { |
2633 | return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); |
2634 | } |
2635 | |
2636 | const_block_iterator block_begin() const { |
2637 | return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); |
2638 | } |
2639 | |
2640 | block_iterator block_end() { |
2641 | return block_begin() + getNumOperands(); |
2642 | } |
2643 | |
2644 | const_block_iterator block_end() const { |
2645 | return block_begin() + getNumOperands(); |
2646 | } |
2647 | |
2648 | iterator_range<block_iterator> blocks() { |
2649 | return make_range(block_begin(), block_end()); |
2650 | } |
2651 | |
2652 | iterator_range<const_block_iterator> blocks() const { |
2653 | return make_range(block_begin(), block_end()); |
2654 | } |
2655 | |
2656 | op_range incoming_values() { return operands(); } |
2657 | |
2658 | const_op_range incoming_values() const { return operands(); } |
2659 | |
2660 | /// Return the number of incoming edges |
2661 | /// |
2662 | unsigned getNumIncomingValues() const { return getNumOperands(); } |
2663 | |
2664 | /// Return incoming value number x |
2665 | /// |
2666 | Value *getIncomingValue(unsigned i) const { |
2667 | return getOperand(i); |
2668 | } |
2669 | void setIncomingValue(unsigned i, Value *V) { |
2670 | assert(V && "PHI node got a null value!")((V && "PHI node got a null value!") ? static_cast< void> (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2670, __PRETTY_FUNCTION__)); |
2671 | assert(getType() == V->getType() &&((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2672, __PRETTY_FUNCTION__)) |
2672 | "All operands to PHI node must be the same type as the PHI node!")((getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? static_cast<void> (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2672, __PRETTY_FUNCTION__)); |
2673 | setOperand(i, V); |
2674 | } |
2675 | |
2676 | static unsigned getOperandNumForIncomingValue(unsigned i) { |
2677 | return i; |
2678 | } |
2679 | |
2680 | static unsigned getIncomingValueNumForOperand(unsigned i) { |
2681 | return i; |
2682 | } |
2683 | |
2684 | /// Return incoming basic block number @p i. |
2685 | /// |
2686 | BasicBlock *getIncomingBlock(unsigned i) const { |
2687 | return block_begin()[i]; |
2688 | } |
2689 | |
2690 | /// Return incoming basic block corresponding |
2691 | /// to an operand of the PHI. |
2692 | /// |
2693 | BasicBlock *getIncomingBlock(const Use &U) const { |
2694 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")((this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? static_cast<void> (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2694, __PRETTY_FUNCTION__)); |
2695 | return getIncomingBlock(unsigned(&U - op_begin())); |
2696 | } |
2697 | |
2698 | /// Return incoming basic block corresponding |
2699 | /// to value use iterator. |
2700 | /// |
2701 | BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { |
2702 | return getIncomingBlock(I.getUse()); |
2703 | } |
2704 | |
2705 | void setIncomingBlock(unsigned i, BasicBlock *BB) { |
2706 | assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast <void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2706, __PRETTY_FUNCTION__)); |
2707 | block_begin()[i] = BB; |
2708 | } |
2709 | |
2710 | /// Replace every incoming basic block \p Old to basic block \p New. |
2711 | void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { |
2712 | assert(New && Old && "PHI node got a null basic block!")((New && Old && "PHI node got a null basic block!" ) ? static_cast<void> (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2712, __PRETTY_FUNCTION__)); |
2713 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) |
2714 | if (getIncomingBlock(Op) == Old) |
2715 | setIncomingBlock(Op, New); |
2716 | } |
2717 | |
2718 | /// Add an incoming value to the end of the PHI list |
2719 | /// |
2720 | void addIncoming(Value *V, BasicBlock *BB) { |
2721 | if (getNumOperands() == ReservedSpace) |
2722 | growOperands(); // Get more space! |
2723 | // Initialize some new operands. |
2724 | setNumHungOffUseOperands(getNumOperands() + 1); |
2725 | setIncomingValue(getNumOperands() - 1, V); |
2726 | setIncomingBlock(getNumOperands() - 1, BB); |
2727 | } |
2728 | |
2729 | /// Remove an incoming value. This is useful if a |
2730 | /// predecessor basic block is deleted. The value removed is returned. |
2731 | /// |
2732 | /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty |
2733 | /// is true), the PHI node is destroyed and any uses of it are replaced with |
2734 | /// dummy values. The only time there should be zero incoming values to a PHI |
2735 | /// node is when the block is dead, so this strategy is sound. |
2736 | /// |
2737 | Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); |
2738 | |
2739 | Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { |
2740 | int Idx = getBasicBlockIndex(BB); |
2741 | assert(Idx >= 0 && "Invalid basic block argument to remove!")((Idx >= 0 && "Invalid basic block argument to remove!" ) ? static_cast<void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2741, __PRETTY_FUNCTION__)); |
2742 | return removeIncomingValue(Idx, DeletePHIIfEmpty); |
2743 | } |
2744 | |
2745 | /// Return the first index of the specified basic |
2746 | /// block in the value list for this PHI. Returns -1 if no instance. |
2747 | /// |
2748 | int getBasicBlockIndex(const BasicBlock *BB) const { |
2749 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) |
2750 | if (block_begin()[i] == BB) |
2751 | return i; |
2752 | return -1; |
2753 | } |
2754 | |
2755 | Value *getIncomingValueForBlock(const BasicBlock *BB) const { |
2756 | int Idx = getBasicBlockIndex(BB); |
2757 | assert(Idx >= 0 && "Invalid basic block argument!")((Idx >= 0 && "Invalid basic block argument!") ? static_cast <void> (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2757, __PRETTY_FUNCTION__)); |
2758 | return getIncomingValue(Idx); |
2759 | } |
2760 | |
2761 | /// Set every incoming value(s) for block \p BB to \p V. |
2762 | void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { |
2763 | assert(BB && "PHI node got a null basic block!")((BB && "PHI node got a null basic block!") ? static_cast <void> (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2763, __PRETTY_FUNCTION__)); |
2764 | bool Found = false; |
2765 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) |
2766 | if (getIncomingBlock(Op) == BB) { |
2767 | Found = true; |
2768 | setIncomingValue(Op, V); |
2769 | } |
2770 | (void)Found; |
2771 | assert(Found && "Invalid basic block argument to set!")((Found && "Invalid basic block argument to set!") ? static_cast <void> (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2771, __PRETTY_FUNCTION__)); |
2772 | } |
2773 | |
2774 | /// If the specified PHI node always merges together the |
2775 | /// same value, return the value, otherwise return null. |
2776 | Value *hasConstantValue() const; |
2777 | |
2778 | /// Whether the specified PHI node always merges |
2779 | /// together the same value, assuming undefs are equal to a unique |
2780 | /// non-undef value. |
2781 | bool hasConstantOrUndefValue() const; |
2782 | |
2783 | /// If the PHI node is complete which means all of its parent's predecessors |
2784 | /// have incoming value in this PHI, return true, otherwise return false. |
2785 | bool isComplete() const { |
2786 | return llvm::all_of(predecessors(getParent()), |
2787 | [this](const BasicBlock *Pred) { |
2788 | return getBasicBlockIndex(Pred) >= 0; |
2789 | }); |
2790 | } |
2791 | |
2792 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
2793 | static bool classof(const Instruction *I) { |
2794 | return I->getOpcode() == Instruction::PHI; |
2795 | } |
2796 | static bool classof(const Value *V) { |
2797 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2798 | } |
2799 | |
2800 | private: |
2801 | void growOperands(); |
2802 | }; |
2803 | |
2804 | template <> |
2805 | struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { |
2806 | }; |
2807 | |
2808 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits <PHINode>::op_begin(this); } PHINode::const_op_iterator PHINode::op_begin() const { return OperandTraits<PHINode> ::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator PHINode::op_end() { return OperandTraits<PHINode>::op_end (this); } PHINode::const_op_iterator PHINode::op_end() const { return OperandTraits<PHINode>::op_end(const_cast<PHINode *>(this)); } Value *PHINode::getOperand(unsigned i_nocapture ) const { ((i_nocapture < OperandTraits<PHINode>::operands (this) && "getOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2808, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<PHINode>::op_begin(const_cast<PHINode *>(this))[i_nocapture].get()); } void PHINode::setOperand( unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<PHINode>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2808, __PRETTY_FUNCTION__)); OperandTraits<PHINode>:: op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode ::getNumOperands() const { return OperandTraits<PHINode> ::operands(this); } template <int Idx_nocapture> Use & PHINode::Op() { return this->OpFrom<Idx_nocapture>(this ); } template <int Idx_nocapture> const Use &PHINode ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } |
2809 | |
2810 | //===----------------------------------------------------------------------===// |
2811 | // LandingPadInst Class |
2812 | //===----------------------------------------------------------------------===// |
2813 | |
2814 | //===--------------------------------------------------------------------------- |
2815 | /// The landingpad instruction holds all of the information |
2816 | /// necessary to generate correct exception handling. The landingpad instruction |
2817 | /// cannot be moved from the top of a landing pad block, which itself is |
2818 | /// accessible only from the 'unwind' edge of an invoke. This uses the |
2819 | /// SubclassData field in Value to store whether or not the landingpad is a |
2820 | /// cleanup. |
2821 | /// |
2822 | class LandingPadInst : public Instruction { |
2823 | using CleanupField = BoolBitfieldElementT<0>; |
2824 | |
2825 | /// The number of operands actually allocated. NumOperands is |
2826 | /// the number actually in use. |
2827 | unsigned ReservedSpace; |
2828 | |
2829 | LandingPadInst(const LandingPadInst &LP); |
2830 | |
2831 | public: |
2832 | enum ClauseType { Catch, Filter }; |
2833 | |
2834 | private: |
2835 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2836 | const Twine &NameStr, Instruction *InsertBefore); |
2837 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, |
2838 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2839 | |
2840 | // Allocate space for exactly zero operands. |
2841 | void *operator new(size_t s) { |
2842 | return User::operator new(s); |
2843 | } |
2844 | |
2845 | void growOperands(unsigned Size); |
2846 | void init(unsigned NumReservedValues, const Twine &NameStr); |
2847 | |
2848 | protected: |
2849 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2850 | friend class Instruction; |
2851 | |
2852 | LandingPadInst *cloneImpl() const; |
2853 | |
2854 | public: |
2855 | /// Constructors - NumReservedClauses is a hint for the number of incoming |
2856 | /// clauses that this landingpad will have (use 0 if you really have no idea). |
2857 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2858 | const Twine &NameStr = "", |
2859 | Instruction *InsertBefore = nullptr); |
2860 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, |
2861 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
2862 | |
2863 | /// Provide fast operand accessors |
2864 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2865 | |
2866 | /// Return 'true' if this landingpad instruction is a |
2867 | /// cleanup. I.e., it should be run when unwinding even if its landing pad |
2868 | /// doesn't catch the exception. |
2869 | bool isCleanup() const { return getSubclassData<CleanupField>(); } |
2870 | |
2871 | /// Indicate that this landingpad instruction is a cleanup. |
2872 | void setCleanup(bool V) { setSubclassData<CleanupField>(V); } |
2873 | |
2874 | /// Add a catch or filter clause to the landing pad. |
2875 | void addClause(Constant *ClauseVal); |
2876 | |
2877 | /// Get the value of the clause at index Idx. Use isCatch/isFilter to |
2878 | /// determine what type of clause this is. |
2879 | Constant *getClause(unsigned Idx) const { |
2880 | return cast<Constant>(getOperandList()[Idx]); |
2881 | } |
2882 | |
2883 | /// Return 'true' if the clause and index Idx is a catch clause. |
2884 | bool isCatch(unsigned Idx) const { |
2885 | return !isa<ArrayType>(getOperandList()[Idx]->getType()); |
2886 | } |
2887 | |
2888 | /// Return 'true' if the clause and index Idx is a filter clause. |
2889 | bool isFilter(unsigned Idx) const { |
2890 | return isa<ArrayType>(getOperandList()[Idx]->getType()); |
2891 | } |
2892 | |
2893 | /// Get the number of clauses for this landing pad. |
2894 | unsigned getNumClauses() const { return getNumOperands(); } |
2895 | |
2896 | /// Grow the size of the operand list to accommodate the new |
2897 | /// number of clauses. |
2898 | void reserveClauses(unsigned Size) { growOperands(Size); } |
2899 | |
2900 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2901 | static bool classof(const Instruction *I) { |
2902 | return I->getOpcode() == Instruction::LandingPad; |
2903 | } |
2904 | static bool classof(const Value *V) { |
2905 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2906 | } |
2907 | }; |
2908 | |
2909 | template <> |
2910 | struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { |
2911 | }; |
2912 | |
2913 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst ::const_op_iterator LandingPadInst::op_begin() const { return OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst ::op_end() { return OperandTraits<LandingPadInst>::op_end (this); } LandingPadInst::const_op_iterator LandingPadInst::op_end () const { return OperandTraits<LandingPadInst>::op_end (const_cast<LandingPadInst*>(this)); } Value *LandingPadInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<LandingPadInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2913, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this))[i_nocapture].get()); } void LandingPadInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<LandingPadInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2913, __PRETTY_FUNCTION__)); OperandTraits<LandingPadInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned LandingPadInst::getNumOperands() const { return OperandTraits <LandingPadInst>::operands(this); } template <int Idx_nocapture > Use &LandingPadInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &LandingPadInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
2914 | |
2915 | //===----------------------------------------------------------------------===// |
2916 | // ReturnInst Class |
2917 | //===----------------------------------------------------------------------===// |
2918 | |
2919 | //===--------------------------------------------------------------------------- |
2920 | /// Return a value (possibly void), from a function. Execution |
2921 | /// does not continue in this function any longer. |
2922 | /// |
2923 | class ReturnInst : public Instruction { |
2924 | ReturnInst(const ReturnInst &RI); |
2925 | |
2926 | private: |
2927 | // ReturnInst constructors: |
2928 | // ReturnInst() - 'ret void' instruction |
2929 | // ReturnInst( null) - 'ret void' instruction |
2930 | // ReturnInst(Value* X) - 'ret X' instruction |
2931 | // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I |
2932 | // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I |
2933 | // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B |
2934 | // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B |
2935 | // |
2936 | // NOTE: If the Value* passed is of type void then the constructor behaves as |
2937 | // if it was passed NULL. |
2938 | explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, |
2939 | Instruction *InsertBefore = nullptr); |
2940 | ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); |
2941 | explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
2942 | |
2943 | protected: |
2944 | // Note: Instruction needs to be a friend here to call cloneImpl. |
2945 | friend class Instruction; |
2946 | |
2947 | ReturnInst *cloneImpl() const; |
2948 | |
2949 | public: |
2950 | static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, |
2951 | Instruction *InsertBefore = nullptr) { |
2952 | return new(!!retVal) ReturnInst(C, retVal, InsertBefore); |
2953 | } |
2954 | |
2955 | static ReturnInst* Create(LLVMContext &C, Value *retVal, |
2956 | BasicBlock *InsertAtEnd) { |
2957 | return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); |
2958 | } |
2959 | |
2960 | static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { |
2961 | return new(0) ReturnInst(C, InsertAtEnd); |
2962 | } |
2963 | |
2964 | /// Provide fast operand accessors |
2965 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
2966 | |
2967 | /// Convenience accessor. Returns null if there is no return value. |
2968 | Value *getReturnValue() const { |
2969 | return getNumOperands() != 0 ? getOperand(0) : nullptr; |
2970 | } |
2971 | |
2972 | unsigned getNumSuccessors() const { return 0; } |
2973 | |
2974 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
2975 | static bool classof(const Instruction *I) { |
2976 | return (I->getOpcode() == Instruction::Ret); |
2977 | } |
2978 | static bool classof(const Value *V) { |
2979 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
2980 | } |
2981 | |
2982 | private: |
2983 | BasicBlock *getSuccessor(unsigned idx) const { |
2984 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2984); |
2985 | } |
2986 | |
2987 | void setSuccessor(unsigned idx, BasicBlock *B) { |
2988 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2988); |
2989 | } |
2990 | }; |
2991 | |
2992 | template <> |
2993 | struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { |
2994 | }; |
2995 | |
2996 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits <ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator ReturnInst::op_begin() const { return OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst ::op_iterator ReturnInst::op_end() { return OperandTraits< ReturnInst>::op_end(this); } ReturnInst::const_op_iterator ReturnInst::op_end() const { return OperandTraits<ReturnInst >::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ReturnInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2996, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ReturnInst>::op_begin(const_cast<ReturnInst *>(this))[i_nocapture].get()); } void ReturnInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 2996, __PRETTY_FUNCTION__)); OperandTraits<ReturnInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ReturnInst ::getNumOperands() const { return OperandTraits<ReturnInst >::operands(this); } template <int Idx_nocapture> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & ReturnInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
2997 | |
2998 | //===----------------------------------------------------------------------===// |
2999 | // BranchInst Class |
3000 | //===----------------------------------------------------------------------===// |
3001 | |
3002 | //===--------------------------------------------------------------------------- |
3003 | /// Conditional or Unconditional Branch instruction. |
3004 | /// |
3005 | class BranchInst : public Instruction { |
3006 | /// Ops list - Branches are strange. The operands are ordered: |
3007 | /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because |
3008 | /// they don't have to check for cond/uncond branchness. These are mostly |
3009 | /// accessed relative from op_end(). |
3010 | BranchInst(const BranchInst &BI); |
3011 | // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): |
3012 | // BranchInst(BB *B) - 'br B' |
3013 | // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' |
3014 | // BranchInst(BB* B, Inst *I) - 'br B' insert before I |
3015 | // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I |
3016 | // BranchInst(BB* B, BB *I) - 'br B' insert at end |
3017 | // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end |
3018 | explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); |
3019 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
3020 | Instruction *InsertBefore = nullptr); |
3021 | BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); |
3022 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, |
3023 | BasicBlock *InsertAtEnd); |
3024 | |
3025 | void AssertOK(); |
3026 | |
3027 | protected: |
3028 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3029 | friend class Instruction; |
3030 | |
3031 | BranchInst *cloneImpl() const; |
3032 | |
3033 | public: |
3034 | /// Iterator type that casts an operand to a basic block. |
3035 | /// |
3036 | /// This only makes sense because the successors are stored as adjacent |
3037 | /// operands for branch instructions. |
3038 | struct succ_op_iterator |
3039 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
3040 | std::random_access_iterator_tag, BasicBlock *, |
3041 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
3042 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
3043 | |
3044 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3045 | BasicBlock *operator->() const { return operator*(); } |
3046 | }; |
3047 | |
3048 | /// The const version of `succ_op_iterator`. |
3049 | struct const_succ_op_iterator |
3050 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
3051 | std::random_access_iterator_tag, |
3052 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
3053 | const BasicBlock *> { |
3054 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3055 | : iterator_adaptor_base(I) {} |
3056 | |
3057 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3058 | const BasicBlock *operator->() const { return operator*(); } |
3059 | }; |
3060 | |
3061 | static BranchInst *Create(BasicBlock *IfTrue, |
3062 | Instruction *InsertBefore = nullptr) { |
3063 | return new(1) BranchInst(IfTrue, InsertBefore); |
3064 | } |
3065 | |
3066 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3067 | Value *Cond, Instruction *InsertBefore = nullptr) { |
3068 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); |
3069 | } |
3070 | |
3071 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { |
3072 | return new(1) BranchInst(IfTrue, InsertAtEnd); |
3073 | } |
3074 | |
3075 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, |
3076 | Value *Cond, BasicBlock *InsertAtEnd) { |
3077 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); |
3078 | } |
3079 | |
3080 | /// Transparently provide more efficient getOperand methods. |
3081 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3082 | |
3083 | bool isUnconditional() const { return getNumOperands() == 1; } |
3084 | bool isConditional() const { return getNumOperands() == 3; } |
3085 | |
3086 | Value *getCondition() const { |
3087 | assert(isConditional() && "Cannot get condition of an uncond branch!")((isConditional() && "Cannot get condition of an uncond branch!" ) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3087, __PRETTY_FUNCTION__)); |
3088 | return Op<-3>(); |
3089 | } |
3090 | |
3091 | void setCondition(Value *V) { |
3092 | assert(isConditional() && "Cannot set condition of unconditional branch!")((isConditional() && "Cannot set condition of unconditional branch!" ) ? static_cast<void> (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3092, __PRETTY_FUNCTION__)); |
3093 | Op<-3>() = V; |
3094 | } |
3095 | |
3096 | unsigned getNumSuccessors() const { return 1+isConditional(); } |
3097 | |
3098 | BasicBlock *getSuccessor(unsigned i) const { |
3099 | assert(i < getNumSuccessors() && "Successor # out of range for Branch!")((i < getNumSuccessors() && "Successor # out of range for Branch!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3099, __PRETTY_FUNCTION__)); |
3100 | return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); |
3101 | } |
3102 | |
3103 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3104 | assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")((idx < getNumSuccessors() && "Successor # out of range for Branch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3104, __PRETTY_FUNCTION__)); |
3105 | *(&Op<-1>() - idx) = NewSucc; |
3106 | } |
3107 | |
3108 | /// Swap the successors of this branch instruction. |
3109 | /// |
3110 | /// Swaps the successors of the branch instruction. This also swaps any |
3111 | /// branch weight metadata associated with the instruction so that it |
3112 | /// continues to map correctly to each operand. |
3113 | void swapSuccessors(); |
3114 | |
3115 | iterator_range<succ_op_iterator> successors() { |
3116 | return make_range( |
3117 | succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3118 | succ_op_iterator(value_op_end())); |
3119 | } |
3120 | |
3121 | iterator_range<const_succ_op_iterator> successors() const { |
3122 | return make_range(const_succ_op_iterator( |
3123 | std::next(value_op_begin(), isConditional() ? 1 : 0)), |
3124 | const_succ_op_iterator(value_op_end())); |
3125 | } |
3126 | |
3127 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3128 | static bool classof(const Instruction *I) { |
3129 | return (I->getOpcode() == Instruction::Br); |
3130 | } |
3131 | static bool classof(const Value *V) { |
3132 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3133 | } |
3134 | }; |
3135 | |
3136 | template <> |
3137 | struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { |
3138 | }; |
3139 | |
3140 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits <BranchInst>::op_begin(this); } BranchInst::const_op_iterator BranchInst::op_begin() const { return OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this)); } BranchInst ::op_iterator BranchInst::op_end() { return OperandTraits< BranchInst>::op_end(this); } BranchInst::const_op_iterator BranchInst::op_end() const { return OperandTraits<BranchInst >::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<BranchInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3140, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<BranchInst>::op_begin(const_cast<BranchInst *>(this))[i_nocapture].get()); } void BranchInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3140, __PRETTY_FUNCTION__)); OperandTraits<BranchInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned BranchInst ::getNumOperands() const { return OperandTraits<BranchInst >::operands(this); } template <int Idx_nocapture> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & BranchInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
3141 | |
3142 | //===----------------------------------------------------------------------===// |
3143 | // SwitchInst Class |
3144 | //===----------------------------------------------------------------------===// |
3145 | |
3146 | //===--------------------------------------------------------------------------- |
3147 | /// Multiway switch |
3148 | /// |
3149 | class SwitchInst : public Instruction { |
3150 | unsigned ReservedSpace; |
3151 | |
3152 | // Operand[0] = Value to switch on |
3153 | // Operand[1] = Default basic block destination |
3154 | // Operand[2n ] = Value to match |
3155 | // Operand[2n+1] = BasicBlock to go to on match |
3156 | SwitchInst(const SwitchInst &SI); |
3157 | |
3158 | /// Create a new switch instruction, specifying a value to switch on and a |
3159 | /// default destination. The number of additional cases can be specified here |
3160 | /// to make memory allocation more efficient. This constructor can also |
3161 | /// auto-insert before another instruction. |
3162 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3163 | Instruction *InsertBefore); |
3164 | |
3165 | /// Create a new switch instruction, specifying a value to switch on and a |
3166 | /// default destination. The number of additional cases can be specified here |
3167 | /// to make memory allocation more efficient. This constructor also |
3168 | /// auto-inserts at the end of the specified BasicBlock. |
3169 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, |
3170 | BasicBlock *InsertAtEnd); |
3171 | |
3172 | // allocate space for exactly zero operands |
3173 | void *operator new(size_t s) { |
3174 | return User::operator new(s); |
3175 | } |
3176 | |
3177 | void init(Value *Value, BasicBlock *Default, unsigned NumReserved); |
3178 | void growOperands(); |
3179 | |
3180 | protected: |
3181 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3182 | friend class Instruction; |
3183 | |
3184 | SwitchInst *cloneImpl() const; |
3185 | |
3186 | public: |
3187 | // -2 |
3188 | static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); |
3189 | |
3190 | template <typename CaseHandleT> class CaseIteratorImpl; |
3191 | |
3192 | /// A handle to a particular switch case. It exposes a convenient interface |
3193 | /// to both the case value and the successor block. |
3194 | /// |
3195 | /// We define this as a template and instantiate it to form both a const and |
3196 | /// non-const handle. |
3197 | template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> |
3198 | class CaseHandleImpl { |
3199 | // Directly befriend both const and non-const iterators. |
3200 | friend class SwitchInst::CaseIteratorImpl< |
3201 | CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; |
3202 | |
3203 | protected: |
3204 | // Expose the switch type we're parameterized with to the iterator. |
3205 | using SwitchInstType = SwitchInstT; |
3206 | |
3207 | SwitchInstT *SI; |
3208 | ptrdiff_t Index; |
3209 | |
3210 | CaseHandleImpl() = default; |
3211 | CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} |
3212 | |
3213 | public: |
3214 | /// Resolves case value for current case. |
3215 | ConstantIntT *getCaseValue() const { |
3216 | assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3217, __PRETTY_FUNCTION__)) |
3217 | "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3217, __PRETTY_FUNCTION__)); |
3218 | return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); |
3219 | } |
3220 | |
3221 | /// Resolves successor for current case. |
3222 | BasicBlockT *getCaseSuccessor() const { |
3223 | assert(((unsigned)Index < SI->getNumCases() ||((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3225, __PRETTY_FUNCTION__)) |
3224 | (unsigned)Index == DefaultPseudoIndex) &&((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3225, __PRETTY_FUNCTION__)) |
3225 | "Index out the number of cases.")((((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3225, __PRETTY_FUNCTION__)); |
3226 | return SI->getSuccessor(getSuccessorIndex()); |
3227 | } |
3228 | |
3229 | /// Returns number of current case. |
3230 | unsigned getCaseIndex() const { return Index; } |
3231 | |
3232 | /// Returns successor index for current case successor. |
3233 | unsigned getSuccessorIndex() const { |
3234 | assert(((unsigned)Index == DefaultPseudoIndex ||((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3236, __PRETTY_FUNCTION__)) |
3235 | (unsigned)Index < SI->getNumCases()) &&((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3236, __PRETTY_FUNCTION__)) |
3236 | "Index out the number of cases.")((((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3236, __PRETTY_FUNCTION__)); |
3237 | return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; |
3238 | } |
3239 | |
3240 | bool operator==(const CaseHandleImpl &RHS) const { |
3241 | assert(SI == RHS.SI && "Incompatible operators.")((SI == RHS.SI && "Incompatible operators.") ? static_cast <void> (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3241, __PRETTY_FUNCTION__)); |
3242 | return Index == RHS.Index; |
3243 | } |
3244 | }; |
3245 | |
3246 | using ConstCaseHandle = |
3247 | CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; |
3248 | |
3249 | class CaseHandle |
3250 | : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { |
3251 | friend class SwitchInst::CaseIteratorImpl<CaseHandle>; |
3252 | |
3253 | public: |
3254 | CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} |
3255 | |
3256 | /// Sets the new value for current case. |
3257 | void setValue(ConstantInt *V) { |
3258 | assert((unsigned)Index < SI->getNumCases() &&(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3259, __PRETTY_FUNCTION__)) |
3259 | "Index out the number of cases.")(((unsigned)Index < SI->getNumCases() && "Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3259, __PRETTY_FUNCTION__)); |
3260 | SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); |
3261 | } |
3262 | |
3263 | /// Sets the new successor for current case. |
3264 | void setSuccessor(BasicBlock *S) { |
3265 | SI->setSuccessor(getSuccessorIndex(), S); |
3266 | } |
3267 | }; |
3268 | |
3269 | template <typename CaseHandleT> |
3270 | class CaseIteratorImpl |
3271 | : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, |
3272 | std::random_access_iterator_tag, |
3273 | CaseHandleT> { |
3274 | using SwitchInstT = typename CaseHandleT::SwitchInstType; |
3275 | |
3276 | CaseHandleT Case; |
3277 | |
3278 | public: |
3279 | /// Default constructed iterator is in an invalid state until assigned to |
3280 | /// a case for a particular switch. |
3281 | CaseIteratorImpl() = default; |
3282 | |
3283 | /// Initializes case iterator for given SwitchInst and for given |
3284 | /// case number. |
3285 | CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} |
3286 | |
3287 | /// Initializes case iterator for given SwitchInst and for given |
3288 | /// successor index. |
3289 | static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, |
3290 | unsigned SuccessorIndex) { |
3291 | assert(SuccessorIndex < SI->getNumSuccessors() &&((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!" ) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3292, __PRETTY_FUNCTION__)) |
3292 | "Successor index # out of range!")((SuccessorIndex < SI->getNumSuccessors() && "Successor index # out of range!" ) ? static_cast<void> (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3292, __PRETTY_FUNCTION__)); |
3293 | return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) |
3294 | : CaseIteratorImpl(SI, DefaultPseudoIndex); |
3295 | } |
3296 | |
3297 | /// Support converting to the const variant. This will be a no-op for const |
3298 | /// variant. |
3299 | operator CaseIteratorImpl<ConstCaseHandle>() const { |
3300 | return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); |
3301 | } |
3302 | |
3303 | CaseIteratorImpl &operator+=(ptrdiff_t N) { |
3304 | // Check index correctness after addition. |
3305 | // Note: Index == getNumCases() means end(). |
3306 | assert(Case.Index + N >= 0 &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3308, __PRETTY_FUNCTION__)) |
3307 | (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3308, __PRETTY_FUNCTION__)) |
3308 | "Case.Index out the number of cases.")((Case.Index + N >= 0 && (unsigned)(Case.Index + N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3308, __PRETTY_FUNCTION__)); |
3309 | Case.Index += N; |
3310 | return *this; |
3311 | } |
3312 | CaseIteratorImpl &operator-=(ptrdiff_t N) { |
3313 | // Check index correctness after subtraction. |
3314 | // Note: Case.Index == getNumCases() means end(). |
3315 | assert(Case.Index - N >= 0 &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3317, __PRETTY_FUNCTION__)) |
3316 | (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3317, __PRETTY_FUNCTION__)) |
3317 | "Case.Index out the number of cases.")((Case.Index - N >= 0 && (unsigned)(Case.Index - N ) <= Case.SI->getNumCases() && "Case.Index out the number of cases." ) ? static_cast<void> (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3317, __PRETTY_FUNCTION__)); |
3318 | Case.Index -= N; |
3319 | return *this; |
3320 | } |
3321 | ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { |
3322 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators." ) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3322, __PRETTY_FUNCTION__)); |
3323 | return Case.Index - RHS.Case.Index; |
3324 | } |
3325 | bool operator==(const CaseIteratorImpl &RHS) const { |
3326 | return Case == RHS.Case; |
3327 | } |
3328 | bool operator<(const CaseIteratorImpl &RHS) const { |
3329 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")((Case.SI == RHS.Case.SI && "Incompatible operators." ) ? static_cast<void> (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3329, __PRETTY_FUNCTION__)); |
3330 | return Case.Index < RHS.Case.Index; |
3331 | } |
3332 | CaseHandleT &operator*() { return Case; } |
3333 | const CaseHandleT &operator*() const { return Case; } |
3334 | }; |
3335 | |
3336 | using CaseIt = CaseIteratorImpl<CaseHandle>; |
3337 | using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; |
3338 | |
3339 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3340 | unsigned NumCases, |
3341 | Instruction *InsertBefore = nullptr) { |
3342 | return new SwitchInst(Value, Default, NumCases, InsertBefore); |
3343 | } |
3344 | |
3345 | static SwitchInst *Create(Value *Value, BasicBlock *Default, |
3346 | unsigned NumCases, BasicBlock *InsertAtEnd) { |
3347 | return new SwitchInst(Value, Default, NumCases, InsertAtEnd); |
3348 | } |
3349 | |
3350 | /// Provide fast operand accessors |
3351 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3352 | |
3353 | // Accessor Methods for Switch stmt |
3354 | Value *getCondition() const { return getOperand(0); } |
3355 | void setCondition(Value *V) { setOperand(0, V); } |
3356 | |
3357 | BasicBlock *getDefaultDest() const { |
3358 | return cast<BasicBlock>(getOperand(1)); |
3359 | } |
3360 | |
3361 | void setDefaultDest(BasicBlock *DefaultCase) { |
3362 | setOperand(1, reinterpret_cast<Value*>(DefaultCase)); |
3363 | } |
3364 | |
3365 | /// Return the number of 'cases' in this switch instruction, excluding the |
3366 | /// default case. |
3367 | unsigned getNumCases() const { |
3368 | return getNumOperands()/2 - 1; |
3369 | } |
3370 | |
3371 | /// Returns a read/write iterator that points to the first case in the |
3372 | /// SwitchInst. |
3373 | CaseIt case_begin() { |
3374 | return CaseIt(this, 0); |
3375 | } |
3376 | |
3377 | /// Returns a read-only iterator that points to the first case in the |
3378 | /// SwitchInst. |
3379 | ConstCaseIt case_begin() const { |
3380 | return ConstCaseIt(this, 0); |
3381 | } |
3382 | |
3383 | /// Returns a read/write iterator that points one past the last in the |
3384 | /// SwitchInst. |
3385 | CaseIt case_end() { |
3386 | return CaseIt(this, getNumCases()); |
3387 | } |
3388 | |
3389 | /// Returns a read-only iterator that points one past the last in the |
3390 | /// SwitchInst. |
3391 | ConstCaseIt case_end() const { |
3392 | return ConstCaseIt(this, getNumCases()); |
3393 | } |
3394 | |
3395 | /// Iteration adapter for range-for loops. |
3396 | iterator_range<CaseIt> cases() { |
3397 | return make_range(case_begin(), case_end()); |
3398 | } |
3399 | |
3400 | /// Constant iteration adapter for range-for loops. |
3401 | iterator_range<ConstCaseIt> cases() const { |
3402 | return make_range(case_begin(), case_end()); |
3403 | } |
3404 | |
3405 | /// Returns an iterator that points to the default case. |
3406 | /// Note: this iterator allows to resolve successor only. Attempt |
3407 | /// to resolve case value causes an assertion. |
3408 | /// Also note, that increment and decrement also causes an assertion and |
3409 | /// makes iterator invalid. |
3410 | CaseIt case_default() { |
3411 | return CaseIt(this, DefaultPseudoIndex); |
3412 | } |
3413 | ConstCaseIt case_default() const { |
3414 | return ConstCaseIt(this, DefaultPseudoIndex); |
3415 | } |
3416 | |
3417 | /// Search all of the case values for the specified constant. If it is |
3418 | /// explicitly handled, return the case iterator of it, otherwise return |
3419 | /// default case iterator to indicate that it is handled by the default |
3420 | /// handler. |
3421 | CaseIt findCaseValue(const ConstantInt *C) { |
3422 | CaseIt I = llvm::find_if( |
3423 | cases(), [C](CaseHandle &Case) { return Case.getCaseValue() == C; }); |
3424 | if (I != case_end()) |
3425 | return I; |
3426 | |
3427 | return case_default(); |
3428 | } |
3429 | ConstCaseIt findCaseValue(const ConstantInt *C) const { |
3430 | ConstCaseIt I = llvm::find_if(cases(), [C](ConstCaseHandle &Case) { |
3431 | return Case.getCaseValue() == C; |
3432 | }); |
3433 | if (I != case_end()) |
3434 | return I; |
3435 | |
3436 | return case_default(); |
3437 | } |
3438 | |
3439 | /// Finds the unique case value for a given successor. Returns null if the |
3440 | /// successor is not found, not unique, or is the default case. |
3441 | ConstantInt *findCaseDest(BasicBlock *BB) { |
3442 | if (BB == getDefaultDest()) |
3443 | return nullptr; |
3444 | |
3445 | ConstantInt *CI = nullptr; |
3446 | for (auto Case : cases()) { |
3447 | if (Case.getCaseSuccessor() != BB) |
3448 | continue; |
3449 | |
3450 | if (CI) |
3451 | return nullptr; // Multiple cases lead to BB. |
3452 | |
3453 | CI = Case.getCaseValue(); |
3454 | } |
3455 | |
3456 | return CI; |
3457 | } |
3458 | |
3459 | /// Add an entry to the switch instruction. |
3460 | /// Note: |
3461 | /// This action invalidates case_end(). Old case_end() iterator will |
3462 | /// point to the added case. |
3463 | void addCase(ConstantInt *OnVal, BasicBlock *Dest); |
3464 | |
3465 | /// This method removes the specified case and its successor from the switch |
3466 | /// instruction. Note that this operation may reorder the remaining cases at |
3467 | /// index idx and above. |
3468 | /// Note: |
3469 | /// This action invalidates iterators for all cases following the one removed, |
3470 | /// including the case_end() iterator. It returns an iterator for the next |
3471 | /// case. |
3472 | CaseIt removeCase(CaseIt I); |
3473 | |
3474 | unsigned getNumSuccessors() const { return getNumOperands()/2; } |
3475 | BasicBlock *getSuccessor(unsigned idx) const { |
3476 | assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")((idx < getNumSuccessors() &&"Successor idx out of range for switch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3476, __PRETTY_FUNCTION__)); |
3477 | return cast<BasicBlock>(getOperand(idx*2+1)); |
3478 | } |
3479 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
3480 | assert(idx < getNumSuccessors() && "Successor # out of range for switch!")((idx < getNumSuccessors() && "Successor # out of range for switch!" ) ? static_cast<void> (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3480, __PRETTY_FUNCTION__)); |
3481 | setOperand(idx * 2 + 1, NewSucc); |
3482 | } |
3483 | |
3484 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3485 | static bool classof(const Instruction *I) { |
3486 | return I->getOpcode() == Instruction::Switch; |
3487 | } |
3488 | static bool classof(const Value *V) { |
3489 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3490 | } |
3491 | }; |
3492 | |
3493 | /// A wrapper class to simplify modification of SwitchInst cases along with |
3494 | /// their prof branch_weights metadata. |
3495 | class SwitchInstProfUpdateWrapper { |
3496 | SwitchInst &SI; |
3497 | Optional<SmallVector<uint32_t, 8> > Weights = None; |
3498 | bool Changed = false; |
3499 | |
3500 | protected: |
3501 | static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); |
3502 | |
3503 | MDNode *buildProfBranchWeightsMD(); |
3504 | |
3505 | void init(); |
3506 | |
3507 | public: |
3508 | using CaseWeightOpt = Optional<uint32_t>; |
3509 | SwitchInst *operator->() { return &SI; } |
3510 | SwitchInst &operator*() { return SI; } |
3511 | operator SwitchInst *() { return &SI; } |
3512 | |
3513 | SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } |
3514 | |
3515 | ~SwitchInstProfUpdateWrapper() { |
3516 | if (Changed) |
3517 | SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); |
3518 | } |
3519 | |
3520 | /// Delegate the call to the underlying SwitchInst::removeCase() and remove |
3521 | /// correspondent branch weight. |
3522 | SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); |
3523 | |
3524 | /// Delegate the call to the underlying SwitchInst::addCase() and set the |
3525 | /// specified branch weight for the added case. |
3526 | void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); |
3527 | |
3528 | /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark |
3529 | /// this object to not touch the underlying SwitchInst in destructor. |
3530 | SymbolTableList<Instruction>::iterator eraseFromParent(); |
3531 | |
3532 | void setSuccessorWeight(unsigned idx, CaseWeightOpt W); |
3533 | CaseWeightOpt getSuccessorWeight(unsigned idx); |
3534 | |
3535 | static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); |
3536 | }; |
3537 | |
3538 | template <> |
3539 | struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { |
3540 | }; |
3541 | |
3542 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits <SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator SwitchInst::op_begin() const { return OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst ::op_iterator SwitchInst::op_end() { return OperandTraits< SwitchInst>::op_end(this); } SwitchInst::const_op_iterator SwitchInst::op_end() const { return OperandTraits<SwitchInst >::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<SwitchInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3542, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<SwitchInst>::op_begin(const_cast<SwitchInst *>(this))[i_nocapture].get()); } void SwitchInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3542, __PRETTY_FUNCTION__)); OperandTraits<SwitchInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned SwitchInst ::getNumOperands() const { return OperandTraits<SwitchInst >::operands(this); } template <int Idx_nocapture> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & SwitchInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
3543 | |
3544 | //===----------------------------------------------------------------------===// |
3545 | // IndirectBrInst Class |
3546 | //===----------------------------------------------------------------------===// |
3547 | |
3548 | //===--------------------------------------------------------------------------- |
3549 | /// Indirect Branch Instruction. |
3550 | /// |
3551 | class IndirectBrInst : public Instruction { |
3552 | unsigned ReservedSpace; |
3553 | |
3554 | // Operand[0] = Address to jump to |
3555 | // Operand[n+1] = n-th destination |
3556 | IndirectBrInst(const IndirectBrInst &IBI); |
3557 | |
3558 | /// Create a new indirectbr instruction, specifying an |
3559 | /// Address to jump to. The number of expected destinations can be specified |
3560 | /// here to make memory allocation more efficient. This constructor can also |
3561 | /// autoinsert before another instruction. |
3562 | IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); |
3563 | |
3564 | /// Create a new indirectbr instruction, specifying an |
3565 | /// Address to jump to. The number of expected destinations can be specified |
3566 | /// here to make memory allocation more efficient. This constructor also |
3567 | /// autoinserts at the end of the specified BasicBlock. |
3568 | IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); |
3569 | |
3570 | // allocate space for exactly zero operands |
3571 | void *operator new(size_t s) { |
3572 | return User::operator new(s); |
3573 | } |
3574 | |
3575 | void init(Value *Address, unsigned NumDests); |
3576 | void growOperands(); |
3577 | |
3578 | protected: |
3579 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3580 | friend class Instruction; |
3581 | |
3582 | IndirectBrInst *cloneImpl() const; |
3583 | |
3584 | public: |
3585 | /// Iterator type that casts an operand to a basic block. |
3586 | /// |
3587 | /// This only makes sense because the successors are stored as adjacent |
3588 | /// operands for indirectbr instructions. |
3589 | struct succ_op_iterator |
3590 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, |
3591 | std::random_access_iterator_tag, BasicBlock *, |
3592 | ptrdiff_t, BasicBlock *, BasicBlock *> { |
3593 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} |
3594 | |
3595 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3596 | BasicBlock *operator->() const { return operator*(); } |
3597 | }; |
3598 | |
3599 | /// The const version of `succ_op_iterator`. |
3600 | struct const_succ_op_iterator |
3601 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, |
3602 | std::random_access_iterator_tag, |
3603 | const BasicBlock *, ptrdiff_t, const BasicBlock *, |
3604 | const BasicBlock *> { |
3605 | explicit const_succ_op_iterator(const_value_op_iterator I) |
3606 | : iterator_adaptor_base(I) {} |
3607 | |
3608 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } |
3609 | const BasicBlock *operator->() const { return operator*(); } |
3610 | }; |
3611 | |
3612 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3613 | Instruction *InsertBefore = nullptr) { |
3614 | return new IndirectBrInst(Address, NumDests, InsertBefore); |
3615 | } |
3616 | |
3617 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, |
3618 | BasicBlock *InsertAtEnd) { |
3619 | return new IndirectBrInst(Address, NumDests, InsertAtEnd); |
3620 | } |
3621 | |
3622 | /// Provide fast operand accessors. |
3623 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
3624 | |
3625 | // Accessor Methods for IndirectBrInst instruction. |
3626 | Value *getAddress() { return getOperand(0); } |
3627 | const Value *getAddress() const { return getOperand(0); } |
3628 | void setAddress(Value *V) { setOperand(0, V); } |
3629 | |
3630 | /// return the number of possible destinations in this |
3631 | /// indirectbr instruction. |
3632 | unsigned getNumDestinations() const { return getNumOperands()-1; } |
3633 | |
3634 | /// Return the specified destination. |
3635 | BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } |
3636 | const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } |
3637 | |
3638 | /// Add a destination. |
3639 | /// |
3640 | void addDestination(BasicBlock *Dest); |
3641 | |
3642 | /// This method removes the specified successor from the |
3643 | /// indirectbr instruction. |
3644 | void removeDestination(unsigned i); |
3645 | |
3646 | unsigned getNumSuccessors() const { return getNumOperands()-1; } |
3647 | BasicBlock *getSuccessor(unsigned i) const { |
3648 | return cast<BasicBlock>(getOperand(i+1)); |
3649 | } |
3650 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3651 | setOperand(i + 1, NewSucc); |
3652 | } |
3653 | |
3654 | iterator_range<succ_op_iterator> successors() { |
3655 | return make_range(succ_op_iterator(std::next(value_op_begin())), |
3656 | succ_op_iterator(value_op_end())); |
3657 | } |
3658 | |
3659 | iterator_range<const_succ_op_iterator> successors() const { |
3660 | return make_range(const_succ_op_iterator(std::next(value_op_begin())), |
3661 | const_succ_op_iterator(value_op_end())); |
3662 | } |
3663 | |
3664 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3665 | static bool classof(const Instruction *I) { |
3666 | return I->getOpcode() == Instruction::IndirectBr; |
3667 | } |
3668 | static bool classof(const Value *V) { |
3669 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3670 | } |
3671 | }; |
3672 | |
3673 | template <> |
3674 | struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { |
3675 | }; |
3676 | |
3677 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst ::const_op_iterator IndirectBrInst::op_begin() const { return OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst ::op_end() { return OperandTraits<IndirectBrInst>::op_end (this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end () const { return OperandTraits<IndirectBrInst>::op_end (const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3677, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this))[i_nocapture].get()); } void IndirectBrInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<IndirectBrInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3677, __PRETTY_FUNCTION__)); OperandTraits<IndirectBrInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands() const { return OperandTraits <IndirectBrInst>::operands(this); } template <int Idx_nocapture > Use &IndirectBrInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &IndirectBrInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
3678 | |
3679 | //===----------------------------------------------------------------------===// |
3680 | // InvokeInst Class |
3681 | //===----------------------------------------------------------------------===// |
3682 | |
3683 | /// Invoke instruction. The SubclassData field is used to hold the |
3684 | /// calling convention of the call. |
3685 | /// |
3686 | class InvokeInst : public CallBase { |
3687 | /// The number of operands for this call beyond the called function, |
3688 | /// arguments, and operand bundles. |
3689 | static constexpr int NumExtraOperands = 2; |
3690 | |
3691 | /// The index from the end of the operand array to the normal destination. |
3692 | static constexpr int NormalDestOpEndIdx = -3; |
3693 | |
3694 | /// The index from the end of the operand array to the unwind destination. |
3695 | static constexpr int UnwindDestOpEndIdx = -2; |
3696 | |
3697 | InvokeInst(const InvokeInst &BI); |
3698 | |
3699 | /// Construct an InvokeInst given a range of arguments. |
3700 | /// |
3701 | /// Construct an InvokeInst from a range of arguments |
3702 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3703 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3704 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3705 | const Twine &NameStr, Instruction *InsertBefore); |
3706 | |
3707 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3708 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3709 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3710 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
3711 | |
3712 | void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3713 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3714 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
3715 | |
3716 | /// Compute the number of operands to allocate. |
3717 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { |
3718 | // We need one operand for the called function, plus our extra operands and |
3719 | // the input operand counts provided. |
3720 | return 1 + NumExtraOperands + NumArgs + NumBundleInputs; |
3721 | } |
3722 | |
3723 | protected: |
3724 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3725 | friend class Instruction; |
3726 | |
3727 | InvokeInst *cloneImpl() const; |
3728 | |
3729 | public: |
3730 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3731 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3732 | const Twine &NameStr, |
3733 | Instruction *InsertBefore = nullptr) { |
3734 | int NumOperands = ComputeNumOperands(Args.size()); |
3735 | return new (NumOperands) |
3736 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3737 | NameStr, InsertBefore); |
3738 | } |
3739 | |
3740 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3741 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3742 | ArrayRef<OperandBundleDef> Bundles = None, |
3743 | const Twine &NameStr = "", |
3744 | Instruction *InsertBefore = nullptr) { |
3745 | int NumOperands = |
3746 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3747 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3748 | |
3749 | return new (NumOperands, DescriptorBytes) |
3750 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3751 | NameStr, InsertBefore); |
3752 | } |
3753 | |
3754 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3755 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3756 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3757 | int NumOperands = ComputeNumOperands(Args.size()); |
3758 | return new (NumOperands) |
3759 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, |
3760 | NameStr, InsertAtEnd); |
3761 | } |
3762 | |
3763 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3764 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3765 | ArrayRef<OperandBundleDef> Bundles, |
3766 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3767 | int NumOperands = |
3768 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); |
3769 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3770 | |
3771 | return new (NumOperands, DescriptorBytes) |
3772 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, |
3773 | NameStr, InsertAtEnd); |
3774 | } |
3775 | |
3776 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3777 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3778 | const Twine &NameStr, |
3779 | Instruction *InsertBefore = nullptr) { |
3780 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3781 | IfException, Args, None, NameStr, InsertBefore); |
3782 | } |
3783 | |
3784 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3785 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3786 | ArrayRef<OperandBundleDef> Bundles = None, |
3787 | const Twine &NameStr = "", |
3788 | Instruction *InsertBefore = nullptr) { |
3789 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3790 | IfException, Args, Bundles, NameStr, InsertBefore); |
3791 | } |
3792 | |
3793 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3794 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3795 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3796 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3797 | IfException, Args, NameStr, InsertAtEnd); |
3798 | } |
3799 | |
3800 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, |
3801 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3802 | ArrayRef<OperandBundleDef> Bundles, |
3803 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3804 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, |
3805 | IfException, Args, Bundles, NameStr, InsertAtEnd); |
3806 | } |
3807 | |
3808 | /// Create a clone of \p II with a different set of operand bundles and |
3809 | /// insert it before \p InsertPt. |
3810 | /// |
3811 | /// The returned invoke instruction is identical to \p II in every way except |
3812 | /// that the operand bundles for the new instruction are set to the operand |
3813 | /// bundles in \p Bundles. |
3814 | static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, |
3815 | Instruction *InsertPt = nullptr); |
3816 | |
3817 | /// Create a clone of \p II with a different set of operand bundles and |
3818 | /// insert it before \p InsertPt. |
3819 | /// |
3820 | /// The returned invoke instruction is identical to \p II in every way except |
3821 | /// that the operand bundle for the new instruction is set to the operand |
3822 | /// bundle in \p Bundle. |
3823 | static InvokeInst *CreateWithReplacedBundle(InvokeInst *II, |
3824 | OperandBundleDef Bundles, |
3825 | Instruction *InsertPt = nullptr); |
3826 | |
3827 | // get*Dest - Return the destination basic blocks... |
3828 | BasicBlock *getNormalDest() const { |
3829 | return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); |
3830 | } |
3831 | BasicBlock *getUnwindDest() const { |
3832 | return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); |
3833 | } |
3834 | void setNormalDest(BasicBlock *B) { |
3835 | Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3836 | } |
3837 | void setUnwindDest(BasicBlock *B) { |
3838 | Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); |
3839 | } |
3840 | |
3841 | /// Get the landingpad instruction from the landing pad |
3842 | /// block (the unwind destination). |
3843 | LandingPadInst *getLandingPadInst() const; |
3844 | |
3845 | BasicBlock *getSuccessor(unsigned i) const { |
3846 | assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!") ? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3846, __PRETTY_FUNCTION__)); |
3847 | return i == 0 ? getNormalDest() : getUnwindDest(); |
3848 | } |
3849 | |
3850 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
3851 | assert(i < 2 && "Successor # out of range for invoke!")((i < 2 && "Successor # out of range for invoke!") ? static_cast<void> (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 3851, __PRETTY_FUNCTION__)); |
3852 | if (i == 0) |
3853 | setNormalDest(NewSucc); |
3854 | else |
3855 | setUnwindDest(NewSucc); |
3856 | } |
3857 | |
3858 | unsigned getNumSuccessors() const { return 2; } |
3859 | |
3860 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
3861 | static bool classof(const Instruction *I) { |
3862 | return (I->getOpcode() == Instruction::Invoke); |
3863 | } |
3864 | static bool classof(const Value *V) { |
3865 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
3866 | } |
3867 | |
3868 | private: |
3869 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
3870 | // method so that subclasses cannot accidentally use it. |
3871 | template <typename Bitfield> |
3872 | void setSubclassData(typename Bitfield::Type Value) { |
3873 | Instruction::setSubclassData<Bitfield>(Value); |
3874 | } |
3875 | }; |
3876 | |
3877 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3878 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3879 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3880 | const Twine &NameStr, Instruction *InsertBefore) |
3881 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3882 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3883 | InsertBefore) { |
3884 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3885 | } |
3886 | |
3887 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, |
3888 | BasicBlock *IfException, ArrayRef<Value *> Args, |
3889 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3890 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
3891 | : CallBase(Ty->getReturnType(), Instruction::Invoke, |
3892 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
3893 | InsertAtEnd) { |
3894 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); |
3895 | } |
3896 | |
3897 | //===----------------------------------------------------------------------===// |
3898 | // CallBrInst Class |
3899 | //===----------------------------------------------------------------------===// |
3900 | |
3901 | /// CallBr instruction, tracking function calls that may not return control but |
3902 | /// instead transfer it to a third location. The SubclassData field is used to |
3903 | /// hold the calling convention of the call. |
3904 | /// |
3905 | class CallBrInst : public CallBase { |
3906 | |
3907 | unsigned NumIndirectDests; |
3908 | |
3909 | CallBrInst(const CallBrInst &BI); |
3910 | |
3911 | /// Construct a CallBrInst given a range of arguments. |
3912 | /// |
3913 | /// Construct a CallBrInst from a range of arguments |
3914 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
3915 | ArrayRef<BasicBlock *> IndirectDests, |
3916 | ArrayRef<Value *> Args, |
3917 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3918 | const Twine &NameStr, Instruction *InsertBefore); |
3919 | |
3920 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
3921 | ArrayRef<BasicBlock *> IndirectDests, |
3922 | ArrayRef<Value *> Args, |
3923 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
3924 | const Twine &NameStr, BasicBlock *InsertAtEnd); |
3925 | |
3926 | void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, |
3927 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, |
3928 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); |
3929 | |
3930 | /// Should the Indirect Destinations change, scan + update the Arg list. |
3931 | void updateArgBlockAddresses(unsigned i, BasicBlock *B); |
3932 | |
3933 | /// Compute the number of operands to allocate. |
3934 | static int ComputeNumOperands(int NumArgs, int NumIndirectDests, |
3935 | int NumBundleInputs = 0) { |
3936 | // We need one operand for the called function, plus our extra operands and |
3937 | // the input operand counts provided. |
3938 | return 2 + NumIndirectDests + NumArgs + NumBundleInputs; |
3939 | } |
3940 | |
3941 | protected: |
3942 | // Note: Instruction needs to be a friend here to call cloneImpl. |
3943 | friend class Instruction; |
3944 | |
3945 | CallBrInst *cloneImpl() const; |
3946 | |
3947 | public: |
3948 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3949 | BasicBlock *DefaultDest, |
3950 | ArrayRef<BasicBlock *> IndirectDests, |
3951 | ArrayRef<Value *> Args, const Twine &NameStr, |
3952 | Instruction *InsertBefore = nullptr) { |
3953 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
3954 | return new (NumOperands) |
3955 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
3956 | NumOperands, NameStr, InsertBefore); |
3957 | } |
3958 | |
3959 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3960 | BasicBlock *DefaultDest, |
3961 | ArrayRef<BasicBlock *> IndirectDests, |
3962 | ArrayRef<Value *> Args, |
3963 | ArrayRef<OperandBundleDef> Bundles = None, |
3964 | const Twine &NameStr = "", |
3965 | Instruction *InsertBefore = nullptr) { |
3966 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
3967 | CountBundleInputs(Bundles)); |
3968 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3969 | |
3970 | return new (NumOperands, DescriptorBytes) |
3971 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
3972 | NumOperands, NameStr, InsertBefore); |
3973 | } |
3974 | |
3975 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3976 | BasicBlock *DefaultDest, |
3977 | ArrayRef<BasicBlock *> IndirectDests, |
3978 | ArrayRef<Value *> Args, const Twine &NameStr, |
3979 | BasicBlock *InsertAtEnd) { |
3980 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); |
3981 | return new (NumOperands) |
3982 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, |
3983 | NumOperands, NameStr, InsertAtEnd); |
3984 | } |
3985 | |
3986 | static CallBrInst *Create(FunctionType *Ty, Value *Func, |
3987 | BasicBlock *DefaultDest, |
3988 | ArrayRef<BasicBlock *> IndirectDests, |
3989 | ArrayRef<Value *> Args, |
3990 | ArrayRef<OperandBundleDef> Bundles, |
3991 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
3992 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), |
3993 | CountBundleInputs(Bundles)); |
3994 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); |
3995 | |
3996 | return new (NumOperands, DescriptorBytes) |
3997 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, |
3998 | NumOperands, NameStr, InsertAtEnd); |
3999 | } |
4000 | |
4001 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4002 | ArrayRef<BasicBlock *> IndirectDests, |
4003 | ArrayRef<Value *> Args, const Twine &NameStr, |
4004 | Instruction *InsertBefore = nullptr) { |
4005 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4006 | IndirectDests, Args, NameStr, InsertBefore); |
4007 | } |
4008 | |
4009 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4010 | ArrayRef<BasicBlock *> IndirectDests, |
4011 | ArrayRef<Value *> Args, |
4012 | ArrayRef<OperandBundleDef> Bundles = None, |
4013 | const Twine &NameStr = "", |
4014 | Instruction *InsertBefore = nullptr) { |
4015 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4016 | IndirectDests, Args, Bundles, NameStr, InsertBefore); |
4017 | } |
4018 | |
4019 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, |
4020 | ArrayRef<BasicBlock *> IndirectDests, |
4021 | ArrayRef<Value *> Args, const Twine &NameStr, |
4022 | BasicBlock *InsertAtEnd) { |
4023 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4024 | IndirectDests, Args, NameStr, InsertAtEnd); |
4025 | } |
4026 | |
4027 | static CallBrInst *Create(FunctionCallee Func, |
4028 | BasicBlock *DefaultDest, |
4029 | ArrayRef<BasicBlock *> IndirectDests, |
4030 | ArrayRef<Value *> Args, |
4031 | ArrayRef<OperandBundleDef> Bundles, |
4032 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4033 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, |
4034 | IndirectDests, Args, Bundles, NameStr, InsertAtEnd); |
4035 | } |
4036 | |
4037 | /// Create a clone of \p CBI with a different set of operand bundles and |
4038 | /// insert it before \p InsertPt. |
4039 | /// |
4040 | /// The returned callbr instruction is identical to \p CBI in every way |
4041 | /// except that the operand bundles for the new instruction are set to the |
4042 | /// operand bundles in \p Bundles. |
4043 | static CallBrInst *Create(CallBrInst *CBI, |
4044 | ArrayRef<OperandBundleDef> Bundles, |
4045 | Instruction *InsertPt = nullptr); |
4046 | |
4047 | /// Return the number of callbr indirect dest labels. |
4048 | /// |
4049 | unsigned getNumIndirectDests() const { return NumIndirectDests; } |
4050 | |
4051 | /// getIndirectDestLabel - Return the i-th indirect dest label. |
4052 | /// |
4053 | Value *getIndirectDestLabel(unsigned i) const { |
4054 | assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4054, __PRETTY_FUNCTION__)); |
4055 | return getOperand(i + getNumArgOperands() + getNumTotalBundleOperands() + |
4056 | 1); |
4057 | } |
4058 | |
4059 | Value *getIndirectDestLabelUse(unsigned i) const { |
4060 | assert(i < getNumIndirectDests() && "Out of bounds!")((i < getNumIndirectDests() && "Out of bounds!") ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4060, __PRETTY_FUNCTION__)); |
4061 | return getOperandUse(i + getNumArgOperands() + getNumTotalBundleOperands() + |
4062 | 1); |
4063 | } |
4064 | |
4065 | // Return the destination basic blocks... |
4066 | BasicBlock *getDefaultDest() const { |
4067 | return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); |
4068 | } |
4069 | BasicBlock *getIndirectDest(unsigned i) const { |
4070 | return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); |
4071 | } |
4072 | SmallVector<BasicBlock *, 16> getIndirectDests() const { |
4073 | SmallVector<BasicBlock *, 16> IndirectDests; |
4074 | for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) |
4075 | IndirectDests.push_back(getIndirectDest(i)); |
4076 | return IndirectDests; |
4077 | } |
4078 | void setDefaultDest(BasicBlock *B) { |
4079 | *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); |
4080 | } |
4081 | void setIndirectDest(unsigned i, BasicBlock *B) { |
4082 | updateArgBlockAddresses(i, B); |
4083 | *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); |
4084 | } |
4085 | |
4086 | BasicBlock *getSuccessor(unsigned i) const { |
4087 | assert(i < getNumSuccessors() + 1 &&((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4088, __PRETTY_FUNCTION__)) |
4088 | "Successor # out of range for callbr!")((i < getNumSuccessors() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4088, __PRETTY_FUNCTION__)); |
4089 | return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); |
4090 | } |
4091 | |
4092 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { |
4093 | assert(i < getNumIndirectDests() + 1 &&((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4094, __PRETTY_FUNCTION__)) |
4094 | "Successor # out of range for callbr!")((i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!" ) ? static_cast<void> (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4094, __PRETTY_FUNCTION__)); |
4095 | return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); |
4096 | } |
4097 | |
4098 | unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } |
4099 | |
4100 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4101 | static bool classof(const Instruction *I) { |
4102 | return (I->getOpcode() == Instruction::CallBr); |
4103 | } |
4104 | static bool classof(const Value *V) { |
4105 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4106 | } |
4107 | |
4108 | private: |
4109 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4110 | // method so that subclasses cannot accidentally use it. |
4111 | template <typename Bitfield> |
4112 | void setSubclassData(typename Bitfield::Type Value) { |
4113 | Instruction::setSubclassData<Bitfield>(Value); |
4114 | } |
4115 | }; |
4116 | |
4117 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4118 | ArrayRef<BasicBlock *> IndirectDests, |
4119 | ArrayRef<Value *> Args, |
4120 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4121 | const Twine &NameStr, Instruction *InsertBefore) |
4122 | : CallBase(Ty->getReturnType(), Instruction::CallBr, |
4123 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4124 | InsertBefore) { |
4125 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4126 | } |
4127 | |
4128 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, |
4129 | ArrayRef<BasicBlock *> IndirectDests, |
4130 | ArrayRef<Value *> Args, |
4131 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, |
4132 | const Twine &NameStr, BasicBlock *InsertAtEnd) |
4133 | : CallBase(Ty->getReturnType(), Instruction::CallBr, |
4134 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, |
4135 | InsertAtEnd) { |
4136 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); |
4137 | } |
4138 | |
4139 | //===----------------------------------------------------------------------===// |
4140 | // ResumeInst Class |
4141 | //===----------------------------------------------------------------------===// |
4142 | |
4143 | //===--------------------------------------------------------------------------- |
4144 | /// Resume the propagation of an exception. |
4145 | /// |
4146 | class ResumeInst : public Instruction { |
4147 | ResumeInst(const ResumeInst &RI); |
4148 | |
4149 | explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); |
4150 | ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); |
4151 | |
4152 | protected: |
4153 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4154 | friend class Instruction; |
4155 | |
4156 | ResumeInst *cloneImpl() const; |
4157 | |
4158 | public: |
4159 | static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { |
4160 | return new(1) ResumeInst(Exn, InsertBefore); |
4161 | } |
4162 | |
4163 | static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { |
4164 | return new(1) ResumeInst(Exn, InsertAtEnd); |
4165 | } |
4166 | |
4167 | /// Provide fast operand accessors |
4168 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4169 | |
4170 | /// Convenience accessor. |
4171 | Value *getValue() const { return Op<0>(); } |
4172 | |
4173 | unsigned getNumSuccessors() const { return 0; } |
4174 | |
4175 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4176 | static bool classof(const Instruction *I) { |
4177 | return I->getOpcode() == Instruction::Resume; |
4178 | } |
4179 | static bool classof(const Value *V) { |
4180 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4181 | } |
4182 | |
4183 | private: |
4184 | BasicBlock *getSuccessor(unsigned idx) const { |
4185 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4185); |
4186 | } |
4187 | |
4188 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { |
4189 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4189); |
4190 | } |
4191 | }; |
4192 | |
4193 | template <> |
4194 | struct OperandTraits<ResumeInst> : |
4195 | public FixedNumOperandTraits<ResumeInst, 1> { |
4196 | }; |
4197 | |
4198 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits <ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator ResumeInst::op_begin() const { return OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst ::op_iterator ResumeInst::op_end() { return OperandTraits< ResumeInst>::op_end(this); } ResumeInst::const_op_iterator ResumeInst::op_end() const { return OperandTraits<ResumeInst >::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<ResumeInst>::operands(this) && "getOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4198, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<ResumeInst>::op_begin(const_cast<ResumeInst *>(this))[i_nocapture].get()); } void ResumeInst::setOperand (unsigned i_nocapture, Value *Val_nocapture) { ((i_nocapture < OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!" ) ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4198, __PRETTY_FUNCTION__)); OperandTraits<ResumeInst> ::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned ResumeInst ::getNumOperands() const { return OperandTraits<ResumeInst >::operands(this); } template <int Idx_nocapture> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture >(this); } template <int Idx_nocapture> const Use & ResumeInst::Op() const { return this->OpFrom<Idx_nocapture >(this); } |
4199 | |
4200 | //===----------------------------------------------------------------------===// |
4201 | // CatchSwitchInst Class |
4202 | //===----------------------------------------------------------------------===// |
4203 | class CatchSwitchInst : public Instruction { |
4204 | using UnwindDestField = BoolBitfieldElementT<0>; |
4205 | |
4206 | /// The number of operands actually allocated. NumOperands is |
4207 | /// the number actually in use. |
4208 | unsigned ReservedSpace; |
4209 | |
4210 | // Operand[0] = Outer scope |
4211 | // Operand[1] = Unwind block destination |
4212 | // Operand[n] = BasicBlock to go to on match |
4213 | CatchSwitchInst(const CatchSwitchInst &CSI); |
4214 | |
4215 | /// Create a new switch instruction, specifying a |
4216 | /// default destination. The number of additional handlers can be specified |
4217 | /// here to make memory allocation more efficient. |
4218 | /// This constructor can also autoinsert before another instruction. |
4219 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4220 | unsigned NumHandlers, const Twine &NameStr, |
4221 | Instruction *InsertBefore); |
4222 | |
4223 | /// Create a new switch instruction, specifying a |
4224 | /// default destination. The number of additional handlers can be specified |
4225 | /// here to make memory allocation more efficient. |
4226 | /// This constructor also autoinserts at the end of the specified BasicBlock. |
4227 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, |
4228 | unsigned NumHandlers, const Twine &NameStr, |
4229 | BasicBlock *InsertAtEnd); |
4230 | |
4231 | // allocate space for exactly zero operands |
4232 | void *operator new(size_t s) { return User::operator new(s); } |
4233 | |
4234 | void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); |
4235 | void growOperands(unsigned Size); |
4236 | |
4237 | protected: |
4238 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4239 | friend class Instruction; |
4240 | |
4241 | CatchSwitchInst *cloneImpl() const; |
4242 | |
4243 | public: |
4244 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4245 | unsigned NumHandlers, |
4246 | const Twine &NameStr = "", |
4247 | Instruction *InsertBefore = nullptr) { |
4248 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4249 | InsertBefore); |
4250 | } |
4251 | |
4252 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, |
4253 | unsigned NumHandlers, const Twine &NameStr, |
4254 | BasicBlock *InsertAtEnd) { |
4255 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, |
4256 | InsertAtEnd); |
4257 | } |
4258 | |
4259 | /// Provide fast operand accessors |
4260 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4261 | |
4262 | // Accessor Methods for CatchSwitch stmt |
4263 | Value *getParentPad() const { return getOperand(0); } |
4264 | void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } |
4265 | |
4266 | // Accessor Methods for CatchSwitch stmt |
4267 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } |
4268 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4269 | BasicBlock *getUnwindDest() const { |
4270 | if (hasUnwindDest()) |
4271 | return cast<BasicBlock>(getOperand(1)); |
4272 | return nullptr; |
4273 | } |
4274 | void setUnwindDest(BasicBlock *UnwindDest) { |
4275 | assert(UnwindDest)((UnwindDest) ? static_cast<void> (0) : __assert_fail ( "UnwindDest", "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4275, __PRETTY_FUNCTION__)); |
4276 | assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail ("hasUnwindDest()", "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4276, __PRETTY_FUNCTION__)); |
4277 | setOperand(1, UnwindDest); |
4278 | } |
4279 | |
4280 | /// return the number of 'handlers' in this catchswitch |
4281 | /// instruction, except the default handler |
4282 | unsigned getNumHandlers() const { |
4283 | if (hasUnwindDest()) |
4284 | return getNumOperands() - 2; |
4285 | return getNumOperands() - 1; |
4286 | } |
4287 | |
4288 | private: |
4289 | static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } |
4290 | static const BasicBlock *handler_helper(const Value *V) { |
4291 | return cast<BasicBlock>(V); |
4292 | } |
4293 | |
4294 | public: |
4295 | using DerefFnTy = BasicBlock *(*)(Value *); |
4296 | using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; |
4297 | using handler_range = iterator_range<handler_iterator>; |
4298 | using ConstDerefFnTy = const BasicBlock *(*)(const Value *); |
4299 | using const_handler_iterator = |
4300 | mapped_iterator<const_op_iterator, ConstDerefFnTy>; |
4301 | using const_handler_range = iterator_range<const_handler_iterator>; |
4302 | |
4303 | /// Returns an iterator that points to the first handler in CatchSwitchInst. |
4304 | handler_iterator handler_begin() { |
4305 | op_iterator It = op_begin() + 1; |
4306 | if (hasUnwindDest()) |
4307 | ++It; |
4308 | return handler_iterator(It, DerefFnTy(handler_helper)); |
4309 | } |
4310 | |
4311 | /// Returns an iterator that points to the first handler in the |
4312 | /// CatchSwitchInst. |
4313 | const_handler_iterator handler_begin() const { |
4314 | const_op_iterator It = op_begin() + 1; |
4315 | if (hasUnwindDest()) |
4316 | ++It; |
4317 | return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); |
4318 | } |
4319 | |
4320 | /// Returns a read-only iterator that points one past the last |
4321 | /// handler in the CatchSwitchInst. |
4322 | handler_iterator handler_end() { |
4323 | return handler_iterator(op_end(), DerefFnTy(handler_helper)); |
4324 | } |
4325 | |
4326 | /// Returns an iterator that points one past the last handler in the |
4327 | /// CatchSwitchInst. |
4328 | const_handler_iterator handler_end() const { |
4329 | return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); |
4330 | } |
4331 | |
4332 | /// iteration adapter for range-for loops. |
4333 | handler_range handlers() { |
4334 | return make_range(handler_begin(), handler_end()); |
4335 | } |
4336 | |
4337 | /// iteration adapter for range-for loops. |
4338 | const_handler_range handlers() const { |
4339 | return make_range(handler_begin(), handler_end()); |
4340 | } |
4341 | |
4342 | /// Add an entry to the switch instruction... |
4343 | /// Note: |
4344 | /// This action invalidates handler_end(). Old handler_end() iterator will |
4345 | /// point to the added handler. |
4346 | void addHandler(BasicBlock *Dest); |
4347 | |
4348 | void removeHandler(handler_iterator HI); |
4349 | |
4350 | unsigned getNumSuccessors() const { return getNumOperands() - 1; } |
4351 | BasicBlock *getSuccessor(unsigned Idx) const { |
4352 | assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4353, __PRETTY_FUNCTION__)) |
4353 | "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4353, __PRETTY_FUNCTION__)); |
4354 | return cast<BasicBlock>(getOperand(Idx + 1)); |
4355 | } |
4356 | void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { |
4357 | assert(Idx < getNumSuccessors() &&((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4358, __PRETTY_FUNCTION__)) |
4358 | "Successor # out of range for catchswitch!")((Idx < getNumSuccessors() && "Successor # out of range for catchswitch!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4358, __PRETTY_FUNCTION__)); |
4359 | setOperand(Idx + 1, NewSucc); |
4360 | } |
4361 | |
4362 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4363 | static bool classof(const Instruction *I) { |
4364 | return I->getOpcode() == Instruction::CatchSwitch; |
4365 | } |
4366 | static bool classof(const Value *V) { |
4367 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4368 | } |
4369 | }; |
4370 | |
4371 | template <> |
4372 | struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; |
4373 | |
4374 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst ::const_op_iterator CatchSwitchInst::op_begin() const { return OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst ::op_end() { return OperandTraits<CatchSwitchInst>::op_end (this); } CatchSwitchInst::const_op_iterator CatchSwitchInst:: op_end() const { return OperandTraits<CatchSwitchInst>:: op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4374, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this))[i_nocapture].get()); } void CatchSwitchInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<CatchSwitchInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4374, __PRETTY_FUNCTION__)); OperandTraits<CatchSwitchInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands() const { return OperandTraits <CatchSwitchInst>::operands(this); } template <int Idx_nocapture > Use &CatchSwitchInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchSwitchInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
4375 | |
4376 | //===----------------------------------------------------------------------===// |
4377 | // CleanupPadInst Class |
4378 | //===----------------------------------------------------------------------===// |
4379 | class CleanupPadInst : public FuncletPadInst { |
4380 | private: |
4381 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4382 | unsigned Values, const Twine &NameStr, |
4383 | Instruction *InsertBefore) |
4384 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4385 | NameStr, InsertBefore) {} |
4386 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, |
4387 | unsigned Values, const Twine &NameStr, |
4388 | BasicBlock *InsertAtEnd) |
4389 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, |
4390 | NameStr, InsertAtEnd) {} |
4391 | |
4392 | public: |
4393 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, |
4394 | const Twine &NameStr = "", |
4395 | Instruction *InsertBefore = nullptr) { |
4396 | unsigned Values = 1 + Args.size(); |
4397 | return new (Values) |
4398 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); |
4399 | } |
4400 | |
4401 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, |
4402 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4403 | unsigned Values = 1 + Args.size(); |
4404 | return new (Values) |
4405 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); |
4406 | } |
4407 | |
4408 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4409 | static bool classof(const Instruction *I) { |
4410 | return I->getOpcode() == Instruction::CleanupPad; |
4411 | } |
4412 | static bool classof(const Value *V) { |
4413 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4414 | } |
4415 | }; |
4416 | |
4417 | //===----------------------------------------------------------------------===// |
4418 | // CatchPadInst Class |
4419 | //===----------------------------------------------------------------------===// |
4420 | class CatchPadInst : public FuncletPadInst { |
4421 | private: |
4422 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4423 | unsigned Values, const Twine &NameStr, |
4424 | Instruction *InsertBefore) |
4425 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4426 | NameStr, InsertBefore) {} |
4427 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, |
4428 | unsigned Values, const Twine &NameStr, |
4429 | BasicBlock *InsertAtEnd) |
4430 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, |
4431 | NameStr, InsertAtEnd) {} |
4432 | |
4433 | public: |
4434 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4435 | const Twine &NameStr = "", |
4436 | Instruction *InsertBefore = nullptr) { |
4437 | unsigned Values = 1 + Args.size(); |
4438 | return new (Values) |
4439 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); |
4440 | } |
4441 | |
4442 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, |
4443 | const Twine &NameStr, BasicBlock *InsertAtEnd) { |
4444 | unsigned Values = 1 + Args.size(); |
4445 | return new (Values) |
4446 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); |
4447 | } |
4448 | |
4449 | /// Convenience accessors |
4450 | CatchSwitchInst *getCatchSwitch() const { |
4451 | return cast<CatchSwitchInst>(Op<-1>()); |
4452 | } |
4453 | void setCatchSwitch(Value *CatchSwitch) { |
4454 | assert(CatchSwitch)((CatchSwitch) ? static_cast<void> (0) : __assert_fail ( "CatchSwitch", "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4454, __PRETTY_FUNCTION__)); |
4455 | Op<-1>() = CatchSwitch; |
4456 | } |
4457 | |
4458 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4459 | static bool classof(const Instruction *I) { |
4460 | return I->getOpcode() == Instruction::CatchPad; |
4461 | } |
4462 | static bool classof(const Value *V) { |
4463 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4464 | } |
4465 | }; |
4466 | |
4467 | //===----------------------------------------------------------------------===// |
4468 | // CatchReturnInst Class |
4469 | //===----------------------------------------------------------------------===// |
4470 | |
4471 | class CatchReturnInst : public Instruction { |
4472 | CatchReturnInst(const CatchReturnInst &RI); |
4473 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); |
4474 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); |
4475 | |
4476 | void init(Value *CatchPad, BasicBlock *BB); |
4477 | |
4478 | protected: |
4479 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4480 | friend class Instruction; |
4481 | |
4482 | CatchReturnInst *cloneImpl() const; |
4483 | |
4484 | public: |
4485 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4486 | Instruction *InsertBefore = nullptr) { |
4487 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4487, __PRETTY_FUNCTION__)); |
4488 | assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4488, __PRETTY_FUNCTION__)); |
4489 | return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); |
4490 | } |
4491 | |
4492 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, |
4493 | BasicBlock *InsertAtEnd) { |
4494 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4494, __PRETTY_FUNCTION__)); |
4495 | assert(BB)((BB) ? static_cast<void> (0) : __assert_fail ("BB", "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4495, __PRETTY_FUNCTION__)); |
4496 | return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); |
4497 | } |
4498 | |
4499 | /// Provide fast operand accessors |
4500 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4501 | |
4502 | /// Convenience accessors. |
4503 | CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } |
4504 | void setCatchPad(CatchPadInst *CatchPad) { |
4505 | assert(CatchPad)((CatchPad) ? static_cast<void> (0) : __assert_fail ("CatchPad" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4505, __PRETTY_FUNCTION__)); |
4506 | Op<0>() = CatchPad; |
4507 | } |
4508 | |
4509 | BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } |
4510 | void setSuccessor(BasicBlock *NewSucc) { |
4511 | assert(NewSucc)((NewSucc) ? static_cast<void> (0) : __assert_fail ("NewSucc" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4511, __PRETTY_FUNCTION__)); |
4512 | Op<1>() = NewSucc; |
4513 | } |
4514 | unsigned getNumSuccessors() const { return 1; } |
4515 | |
4516 | /// Get the parentPad of this catchret's catchpad's catchswitch. |
4517 | /// The successor block is implicitly a member of this funclet. |
4518 | Value *getCatchSwitchParentPad() const { |
4519 | return getCatchPad()->getCatchSwitch()->getParentPad(); |
4520 | } |
4521 | |
4522 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4523 | static bool classof(const Instruction *I) { |
4524 | return (I->getOpcode() == Instruction::CatchRet); |
4525 | } |
4526 | static bool classof(const Value *V) { |
4527 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4528 | } |
4529 | |
4530 | private: |
4531 | BasicBlock *getSuccessor(unsigned Idx) const { |
4532 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4532, __PRETTY_FUNCTION__)); |
4533 | return getSuccessor(); |
4534 | } |
4535 | |
4536 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4537 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")((Idx < getNumSuccessors() && "Successor # out of range for catchret!" ) ? static_cast<void> (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4537, __PRETTY_FUNCTION__)); |
4538 | setSuccessor(B); |
4539 | } |
4540 | }; |
4541 | |
4542 | template <> |
4543 | struct OperandTraits<CatchReturnInst> |
4544 | : public FixedNumOperandTraits<CatchReturnInst, 2> {}; |
4545 | |
4546 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst ::const_op_iterator CatchReturnInst::op_begin() const { return OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst ::op_end() { return OperandTraits<CatchReturnInst>::op_end (this); } CatchReturnInst::const_op_iterator CatchReturnInst:: op_end() const { return OperandTraits<CatchReturnInst>:: op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst ::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4546, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this))[i_nocapture].get()); } void CatchReturnInst ::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (( i_nocapture < OperandTraits<CatchReturnInst>::operands (this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4546, __PRETTY_FUNCTION__)); OperandTraits<CatchReturnInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands() const { return OperandTraits <CatchReturnInst>::operands(this); } template <int Idx_nocapture > Use &CatchReturnInst::Op() { return this->OpFrom< Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchReturnInst::Op() const { return this-> OpFrom<Idx_nocapture>(this); } |
4547 | |
4548 | //===----------------------------------------------------------------------===// |
4549 | // CleanupReturnInst Class |
4550 | //===----------------------------------------------------------------------===// |
4551 | |
4552 | class CleanupReturnInst : public Instruction { |
4553 | using UnwindDestField = BoolBitfieldElementT<0>; |
4554 | |
4555 | private: |
4556 | CleanupReturnInst(const CleanupReturnInst &RI); |
4557 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4558 | Instruction *InsertBefore = nullptr); |
4559 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, |
4560 | BasicBlock *InsertAtEnd); |
4561 | |
4562 | void init(Value *CleanupPad, BasicBlock *UnwindBB); |
4563 | |
4564 | protected: |
4565 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4566 | friend class Instruction; |
4567 | |
4568 | CleanupReturnInst *cloneImpl() const; |
4569 | |
4570 | public: |
4571 | static CleanupReturnInst *Create(Value *CleanupPad, |
4572 | BasicBlock *UnwindBB = nullptr, |
4573 | Instruction *InsertBefore = nullptr) { |
4574 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4574, __PRETTY_FUNCTION__)); |
4575 | unsigned Values = 1; |
4576 | if (UnwindBB) |
4577 | ++Values; |
4578 | return new (Values) |
4579 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); |
4580 | } |
4581 | |
4582 | static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, |
4583 | BasicBlock *InsertAtEnd) { |
4584 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4584, __PRETTY_FUNCTION__)); |
4585 | unsigned Values = 1; |
4586 | if (UnwindBB) |
4587 | ++Values; |
4588 | return new (Values) |
4589 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); |
4590 | } |
4591 | |
4592 | /// Provide fast operand accessors |
4593 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; |
4594 | |
4595 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } |
4596 | bool unwindsToCaller() const { return !hasUnwindDest(); } |
4597 | |
4598 | /// Convenience accessor. |
4599 | CleanupPadInst *getCleanupPad() const { |
4600 | return cast<CleanupPadInst>(Op<0>()); |
4601 | } |
4602 | void setCleanupPad(CleanupPadInst *CleanupPad) { |
4603 | assert(CleanupPad)((CleanupPad) ? static_cast<void> (0) : __assert_fail ( "CleanupPad", "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4603, __PRETTY_FUNCTION__)); |
4604 | Op<0>() = CleanupPad; |
4605 | } |
4606 | |
4607 | unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } |
4608 | |
4609 | BasicBlock *getUnwindDest() const { |
4610 | return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; |
4611 | } |
4612 | void setUnwindDest(BasicBlock *NewDest) { |
4613 | assert(NewDest)((NewDest) ? static_cast<void> (0) : __assert_fail ("NewDest" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4613, __PRETTY_FUNCTION__)); |
4614 | assert(hasUnwindDest())((hasUnwindDest()) ? static_cast<void> (0) : __assert_fail ("hasUnwindDest()", "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4614, __PRETTY_FUNCTION__)); |
4615 | Op<1>() = NewDest; |
4616 | } |
4617 | |
4618 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4619 | static bool classof(const Instruction *I) { |
4620 | return (I->getOpcode() == Instruction::CleanupRet); |
4621 | } |
4622 | static bool classof(const Value *V) { |
4623 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4624 | } |
4625 | |
4626 | private: |
4627 | BasicBlock *getSuccessor(unsigned Idx) const { |
4628 | assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4628, __PRETTY_FUNCTION__)); |
4629 | return getUnwindDest(); |
4630 | } |
4631 | |
4632 | void setSuccessor(unsigned Idx, BasicBlock *B) { |
4633 | assert(Idx == 0)((Idx == 0) ? static_cast<void> (0) : __assert_fail ("Idx == 0" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4633, __PRETTY_FUNCTION__)); |
4634 | setUnwindDest(B); |
4635 | } |
4636 | |
4637 | // Shadow Instruction::setInstructionSubclassData with a private forwarding |
4638 | // method so that subclasses cannot accidentally use it. |
4639 | template <typename Bitfield> |
4640 | void setSubclassData(typename Bitfield::Type Value) { |
4641 | Instruction::setSubclassData<Bitfield>(Value); |
4642 | } |
4643 | }; |
4644 | |
4645 | template <> |
4646 | struct OperandTraits<CleanupReturnInst> |
4647 | : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; |
4648 | |
4649 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() { return OperandTraits<CleanupReturnInst>::op_begin(this ); } CleanupReturnInst::const_op_iterator CleanupReturnInst:: op_begin() const { return OperandTraits<CleanupReturnInst> ::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst ::op_iterator CleanupReturnInst::op_end() { return OperandTraits <CleanupReturnInst>::op_end(this); } CleanupReturnInst:: const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits <CleanupReturnInst>::op_end(const_cast<CleanupReturnInst *>(this)); } Value *CleanupReturnInst::getOperand(unsigned i_nocapture) const { ((i_nocapture < OperandTraits<CleanupReturnInst >::operands(this) && "getOperand() out of range!") ? static_cast<void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4649, __PRETTY_FUNCTION__)); return cast_or_null<Value> ( OperandTraits<CleanupReturnInst>::op_begin(const_cast <CleanupReturnInst*>(this))[i_nocapture].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture ) { ((i_nocapture < OperandTraits<CleanupReturnInst> ::operands(this) && "setOperand() out of range!") ? static_cast <void> (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4649, __PRETTY_FUNCTION__)); OperandTraits<CleanupReturnInst >::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands() const { return OperandTraits <CleanupReturnInst>::operands(this); } template <int Idx_nocapture> Use &CleanupReturnInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &CleanupReturnInst::Op() const { return this ->OpFrom<Idx_nocapture>(this); } |
4650 | |
4651 | //===----------------------------------------------------------------------===// |
4652 | // UnreachableInst Class |
4653 | //===----------------------------------------------------------------------===// |
4654 | |
4655 | //===--------------------------------------------------------------------------- |
4656 | /// This function has undefined behavior. In particular, the |
4657 | /// presence of this instruction indicates some higher level knowledge that the |
4658 | /// end of the block cannot be reached. |
4659 | /// |
4660 | class UnreachableInst : public Instruction { |
4661 | protected: |
4662 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4663 | friend class Instruction; |
4664 | |
4665 | UnreachableInst *cloneImpl() const; |
4666 | |
4667 | public: |
4668 | explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); |
4669 | explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); |
4670 | |
4671 | // allocate space for exactly zero operands |
4672 | void *operator new(size_t s) { |
4673 | return User::operator new(s, 0); |
4674 | } |
4675 | |
4676 | unsigned getNumSuccessors() const { return 0; } |
4677 | |
4678 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
4679 | static bool classof(const Instruction *I) { |
4680 | return I->getOpcode() == Instruction::Unreachable; |
4681 | } |
4682 | static bool classof(const Value *V) { |
4683 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4684 | } |
4685 | |
4686 | private: |
4687 | BasicBlock *getSuccessor(unsigned idx) const { |
4688 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4688); |
4689 | } |
4690 | |
4691 | void setSuccessor(unsigned idx, BasicBlock *B) { |
4692 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 4692); |
4693 | } |
4694 | }; |
4695 | |
4696 | //===----------------------------------------------------------------------===// |
4697 | // TruncInst Class |
4698 | //===----------------------------------------------------------------------===// |
4699 | |
4700 | /// This class represents a truncation of integer types. |
4701 | class TruncInst : public CastInst { |
4702 | protected: |
4703 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4704 | friend class Instruction; |
4705 | |
4706 | /// Clone an identical TruncInst |
4707 | TruncInst *cloneImpl() const; |
4708 | |
4709 | public: |
4710 | /// Constructor with insert-before-instruction semantics |
4711 | TruncInst( |
4712 | Value *S, ///< The value to be truncated |
4713 | Type *Ty, ///< The (smaller) type to truncate to |
4714 | const Twine &NameStr = "", ///< A name for the new instruction |
4715 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4716 | ); |
4717 | |
4718 | /// Constructor with insert-at-end-of-block semantics |
4719 | TruncInst( |
4720 | Value *S, ///< The value to be truncated |
4721 | Type *Ty, ///< The (smaller) type to truncate to |
4722 | const Twine &NameStr, ///< A name for the new instruction |
4723 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4724 | ); |
4725 | |
4726 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4727 | static bool classof(const Instruction *I) { |
4728 | return I->getOpcode() == Trunc; |
4729 | } |
4730 | static bool classof(const Value *V) { |
4731 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4732 | } |
4733 | }; |
4734 | |
4735 | //===----------------------------------------------------------------------===// |
4736 | // ZExtInst Class |
4737 | //===----------------------------------------------------------------------===// |
4738 | |
4739 | /// This class represents zero extension of integer types. |
4740 | class ZExtInst : public CastInst { |
4741 | protected: |
4742 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4743 | friend class Instruction; |
4744 | |
4745 | /// Clone an identical ZExtInst |
4746 | ZExtInst *cloneImpl() const; |
4747 | |
4748 | public: |
4749 | /// Constructor with insert-before-instruction semantics |
4750 | ZExtInst( |
4751 | Value *S, ///< The value to be zero extended |
4752 | Type *Ty, ///< The type to zero extend to |
4753 | const Twine &NameStr = "", ///< A name for the new instruction |
4754 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4755 | ); |
4756 | |
4757 | /// Constructor with insert-at-end semantics. |
4758 | ZExtInst( |
4759 | Value *S, ///< The value to be zero extended |
4760 | Type *Ty, ///< The type to zero extend to |
4761 | const Twine &NameStr, ///< A name for the new instruction |
4762 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4763 | ); |
4764 | |
4765 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4766 | static bool classof(const Instruction *I) { |
4767 | return I->getOpcode() == ZExt; |
4768 | } |
4769 | static bool classof(const Value *V) { |
4770 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4771 | } |
4772 | }; |
4773 | |
4774 | //===----------------------------------------------------------------------===// |
4775 | // SExtInst Class |
4776 | //===----------------------------------------------------------------------===// |
4777 | |
4778 | /// This class represents a sign extension of integer types. |
4779 | class SExtInst : public CastInst { |
4780 | protected: |
4781 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4782 | friend class Instruction; |
4783 | |
4784 | /// Clone an identical SExtInst |
4785 | SExtInst *cloneImpl() const; |
4786 | |
4787 | public: |
4788 | /// Constructor with insert-before-instruction semantics |
4789 | SExtInst( |
4790 | Value *S, ///< The value to be sign extended |
4791 | Type *Ty, ///< The type to sign extend to |
4792 | const Twine &NameStr = "", ///< A name for the new instruction |
4793 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4794 | ); |
4795 | |
4796 | /// Constructor with insert-at-end-of-block semantics |
4797 | SExtInst( |
4798 | Value *S, ///< The value to be sign extended |
4799 | Type *Ty, ///< The type to sign extend to |
4800 | const Twine &NameStr, ///< A name for the new instruction |
4801 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4802 | ); |
4803 | |
4804 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4805 | static bool classof(const Instruction *I) { |
4806 | return I->getOpcode() == SExt; |
4807 | } |
4808 | static bool classof(const Value *V) { |
4809 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4810 | } |
4811 | }; |
4812 | |
4813 | //===----------------------------------------------------------------------===// |
4814 | // FPTruncInst Class |
4815 | //===----------------------------------------------------------------------===// |
4816 | |
4817 | /// This class represents a truncation of floating point types. |
4818 | class FPTruncInst : public CastInst { |
4819 | protected: |
4820 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4821 | friend class Instruction; |
4822 | |
4823 | /// Clone an identical FPTruncInst |
4824 | FPTruncInst *cloneImpl() const; |
4825 | |
4826 | public: |
4827 | /// Constructor with insert-before-instruction semantics |
4828 | FPTruncInst( |
4829 | Value *S, ///< The value to be truncated |
4830 | Type *Ty, ///< The type to truncate to |
4831 | const Twine &NameStr = "", ///< A name for the new instruction |
4832 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4833 | ); |
4834 | |
4835 | /// Constructor with insert-before-instruction semantics |
4836 | FPTruncInst( |
4837 | Value *S, ///< The value to be truncated |
4838 | Type *Ty, ///< The type to truncate to |
4839 | const Twine &NameStr, ///< A name for the new instruction |
4840 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4841 | ); |
4842 | |
4843 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4844 | static bool classof(const Instruction *I) { |
4845 | return I->getOpcode() == FPTrunc; |
4846 | } |
4847 | static bool classof(const Value *V) { |
4848 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4849 | } |
4850 | }; |
4851 | |
4852 | //===----------------------------------------------------------------------===// |
4853 | // FPExtInst Class |
4854 | //===----------------------------------------------------------------------===// |
4855 | |
4856 | /// This class represents an extension of floating point types. |
4857 | class FPExtInst : public CastInst { |
4858 | protected: |
4859 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4860 | friend class Instruction; |
4861 | |
4862 | /// Clone an identical FPExtInst |
4863 | FPExtInst *cloneImpl() const; |
4864 | |
4865 | public: |
4866 | /// Constructor with insert-before-instruction semantics |
4867 | FPExtInst( |
4868 | Value *S, ///< The value to be extended |
4869 | Type *Ty, ///< The type to extend to |
4870 | const Twine &NameStr = "", ///< A name for the new instruction |
4871 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4872 | ); |
4873 | |
4874 | /// Constructor with insert-at-end-of-block semantics |
4875 | FPExtInst( |
4876 | Value *S, ///< The value to be extended |
4877 | Type *Ty, ///< The type to extend to |
4878 | const Twine &NameStr, ///< A name for the new instruction |
4879 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4880 | ); |
4881 | |
4882 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4883 | static bool classof(const Instruction *I) { |
4884 | return I->getOpcode() == FPExt; |
4885 | } |
4886 | static bool classof(const Value *V) { |
4887 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4888 | } |
4889 | }; |
4890 | |
4891 | //===----------------------------------------------------------------------===// |
4892 | // UIToFPInst Class |
4893 | //===----------------------------------------------------------------------===// |
4894 | |
4895 | /// This class represents a cast unsigned integer to floating point. |
4896 | class UIToFPInst : public CastInst { |
4897 | protected: |
4898 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4899 | friend class Instruction; |
4900 | |
4901 | /// Clone an identical UIToFPInst |
4902 | UIToFPInst *cloneImpl() const; |
4903 | |
4904 | public: |
4905 | /// Constructor with insert-before-instruction semantics |
4906 | UIToFPInst( |
4907 | Value *S, ///< The value to be converted |
4908 | Type *Ty, ///< The type to convert to |
4909 | const Twine &NameStr = "", ///< A name for the new instruction |
4910 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4911 | ); |
4912 | |
4913 | /// Constructor with insert-at-end-of-block semantics |
4914 | UIToFPInst( |
4915 | Value *S, ///< The value to be converted |
4916 | Type *Ty, ///< The type to convert to |
4917 | const Twine &NameStr, ///< A name for the new instruction |
4918 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4919 | ); |
4920 | |
4921 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4922 | static bool classof(const Instruction *I) { |
4923 | return I->getOpcode() == UIToFP; |
4924 | } |
4925 | static bool classof(const Value *V) { |
4926 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4927 | } |
4928 | }; |
4929 | |
4930 | //===----------------------------------------------------------------------===// |
4931 | // SIToFPInst Class |
4932 | //===----------------------------------------------------------------------===// |
4933 | |
4934 | /// This class represents a cast from signed integer to floating point. |
4935 | class SIToFPInst : public CastInst { |
4936 | protected: |
4937 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4938 | friend class Instruction; |
4939 | |
4940 | /// Clone an identical SIToFPInst |
4941 | SIToFPInst *cloneImpl() const; |
4942 | |
4943 | public: |
4944 | /// Constructor with insert-before-instruction semantics |
4945 | SIToFPInst( |
4946 | Value *S, ///< The value to be converted |
4947 | Type *Ty, ///< The type to convert to |
4948 | const Twine &NameStr = "", ///< A name for the new instruction |
4949 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4950 | ); |
4951 | |
4952 | /// Constructor with insert-at-end-of-block semantics |
4953 | SIToFPInst( |
4954 | Value *S, ///< The value to be converted |
4955 | Type *Ty, ///< The type to convert to |
4956 | const Twine &NameStr, ///< A name for the new instruction |
4957 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
4958 | ); |
4959 | |
4960 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
4961 | static bool classof(const Instruction *I) { |
4962 | return I->getOpcode() == SIToFP; |
4963 | } |
4964 | static bool classof(const Value *V) { |
4965 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
4966 | } |
4967 | }; |
4968 | |
4969 | //===----------------------------------------------------------------------===// |
4970 | // FPToUIInst Class |
4971 | //===----------------------------------------------------------------------===// |
4972 | |
4973 | /// This class represents a cast from floating point to unsigned integer |
4974 | class FPToUIInst : public CastInst { |
4975 | protected: |
4976 | // Note: Instruction needs to be a friend here to call cloneImpl. |
4977 | friend class Instruction; |
4978 | |
4979 | /// Clone an identical FPToUIInst |
4980 | FPToUIInst *cloneImpl() const; |
4981 | |
4982 | public: |
4983 | /// Constructor with insert-before-instruction semantics |
4984 | FPToUIInst( |
4985 | Value *S, ///< The value to be converted |
4986 | Type *Ty, ///< The type to convert to |
4987 | const Twine &NameStr = "", ///< A name for the new instruction |
4988 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
4989 | ); |
4990 | |
4991 | /// Constructor with insert-at-end-of-block semantics |
4992 | FPToUIInst( |
4993 | Value *S, ///< The value to be converted |
4994 | Type *Ty, ///< The type to convert to |
4995 | const Twine &NameStr, ///< A name for the new instruction |
4996 | BasicBlock *InsertAtEnd ///< Where to insert the new instruction |
4997 | ); |
4998 | |
4999 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5000 | static bool classof(const Instruction *I) { |
5001 | return I->getOpcode() == FPToUI; |
5002 | } |
5003 | static bool classof(const Value *V) { |
5004 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5005 | } |
5006 | }; |
5007 | |
5008 | //===----------------------------------------------------------------------===// |
5009 | // FPToSIInst Class |
5010 | //===----------------------------------------------------------------------===// |
5011 | |
5012 | /// This class represents a cast from floating point to signed integer. |
5013 | class FPToSIInst : public CastInst { |
5014 | protected: |
5015 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5016 | friend class Instruction; |
5017 | |
5018 | /// Clone an identical FPToSIInst |
5019 | FPToSIInst *cloneImpl() const; |
5020 | |
5021 | public: |
5022 | /// Constructor with insert-before-instruction semantics |
5023 | FPToSIInst( |
5024 | Value *S, ///< The value to be converted |
5025 | Type *Ty, ///< The type to convert to |
5026 | const Twine &NameStr = "", ///< A name for the new instruction |
5027 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5028 | ); |
5029 | |
5030 | /// Constructor with insert-at-end-of-block semantics |
5031 | FPToSIInst( |
5032 | Value *S, ///< The value to be converted |
5033 | Type *Ty, ///< The type to convert to |
5034 | const Twine &NameStr, ///< A name for the new instruction |
5035 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5036 | ); |
5037 | |
5038 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
5039 | static bool classof(const Instruction *I) { |
5040 | return I->getOpcode() == FPToSI; |
5041 | } |
5042 | static bool classof(const Value *V) { |
5043 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5044 | } |
5045 | }; |
5046 | |
5047 | //===----------------------------------------------------------------------===// |
5048 | // IntToPtrInst Class |
5049 | //===----------------------------------------------------------------------===// |
5050 | |
5051 | /// This class represents a cast from an integer to a pointer. |
5052 | class IntToPtrInst : public CastInst { |
5053 | public: |
5054 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5055 | friend class Instruction; |
5056 | |
5057 | /// Constructor with insert-before-instruction semantics |
5058 | IntToPtrInst( |
5059 | Value *S, ///< The value to be converted |
5060 | Type *Ty, ///< The type to convert to |
5061 | const Twine &NameStr = "", ///< A name for the new instruction |
5062 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5063 | ); |
5064 | |
5065 | /// Constructor with insert-at-end-of-block semantics |
5066 | IntToPtrInst( |
5067 | Value *S, ///< The value to be converted |
5068 | Type *Ty, ///< The type to convert to |
5069 | const Twine &NameStr, ///< A name for the new instruction |
5070 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5071 | ); |
5072 | |
5073 | /// Clone an identical IntToPtrInst. |
5074 | IntToPtrInst *cloneImpl() const; |
5075 | |
5076 | /// Returns the address space of this instruction's pointer type. |
5077 | unsigned getAddressSpace() const { |
5078 | return getType()->getPointerAddressSpace(); |
5079 | } |
5080 | |
5081 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5082 | static bool classof(const Instruction *I) { |
5083 | return I->getOpcode() == IntToPtr; |
5084 | } |
5085 | static bool classof(const Value *V) { |
5086 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5087 | } |
5088 | }; |
5089 | |
5090 | //===----------------------------------------------------------------------===// |
5091 | // PtrToIntInst Class |
5092 | //===----------------------------------------------------------------------===// |
5093 | |
5094 | /// This class represents a cast from a pointer to an integer. |
5095 | class PtrToIntInst : public CastInst { |
5096 | protected: |
5097 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5098 | friend class Instruction; |
5099 | |
5100 | /// Clone an identical PtrToIntInst. |
5101 | PtrToIntInst *cloneImpl() const; |
5102 | |
5103 | public: |
5104 | /// Constructor with insert-before-instruction semantics |
5105 | PtrToIntInst( |
5106 | Value *S, ///< The value to be converted |
5107 | Type *Ty, ///< The type to convert to |
5108 | const Twine &NameStr = "", ///< A name for the new instruction |
5109 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5110 | ); |
5111 | |
5112 | /// Constructor with insert-at-end-of-block semantics |
5113 | PtrToIntInst( |
5114 | Value *S, ///< The value to be converted |
5115 | Type *Ty, ///< The type to convert to |
5116 | const Twine &NameStr, ///< A name for the new instruction |
5117 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5118 | ); |
5119 | |
5120 | /// Gets the pointer operand. |
5121 | Value *getPointerOperand() { return getOperand(0); } |
5122 | /// Gets the pointer operand. |
5123 | const Value *getPointerOperand() const { return getOperand(0); } |
5124 | /// Gets the operand index of the pointer operand. |
5125 | static unsigned getPointerOperandIndex() { return 0U; } |
5126 | |
5127 | /// Returns the address space of the pointer operand. |
5128 | unsigned getPointerAddressSpace() const { |
5129 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5130 | } |
5131 | |
5132 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5133 | static bool classof(const Instruction *I) { |
5134 | return I->getOpcode() == PtrToInt; |
5135 | } |
5136 | static bool classof(const Value *V) { |
5137 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5138 | } |
5139 | }; |
5140 | |
5141 | //===----------------------------------------------------------------------===// |
5142 | // BitCastInst Class |
5143 | //===----------------------------------------------------------------------===// |
5144 | |
5145 | /// This class represents a no-op cast from one type to another. |
5146 | class BitCastInst : public CastInst { |
5147 | protected: |
5148 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5149 | friend class Instruction; |
5150 | |
5151 | /// Clone an identical BitCastInst. |
5152 | BitCastInst *cloneImpl() const; |
5153 | |
5154 | public: |
5155 | /// Constructor with insert-before-instruction semantics |
5156 | BitCastInst( |
5157 | Value *S, ///< The value to be casted |
5158 | Type *Ty, ///< The type to casted to |
5159 | const Twine &NameStr = "", ///< A name for the new instruction |
5160 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5161 | ); |
5162 | |
5163 | /// Constructor with insert-at-end-of-block semantics |
5164 | BitCastInst( |
5165 | Value *S, ///< The value to be casted |
5166 | Type *Ty, ///< The type to casted to |
5167 | const Twine &NameStr, ///< A name for the new instruction |
5168 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5169 | ); |
5170 | |
5171 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5172 | static bool classof(const Instruction *I) { |
5173 | return I->getOpcode() == BitCast; |
5174 | } |
5175 | static bool classof(const Value *V) { |
5176 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5177 | } |
5178 | }; |
5179 | |
5180 | //===----------------------------------------------------------------------===// |
5181 | // AddrSpaceCastInst Class |
5182 | //===----------------------------------------------------------------------===// |
5183 | |
5184 | /// This class represents a conversion between pointers from one address space |
5185 | /// to another. |
5186 | class AddrSpaceCastInst : public CastInst { |
5187 | protected: |
5188 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5189 | friend class Instruction; |
5190 | |
5191 | /// Clone an identical AddrSpaceCastInst. |
5192 | AddrSpaceCastInst *cloneImpl() const; |
5193 | |
5194 | public: |
5195 | /// Constructor with insert-before-instruction semantics |
5196 | AddrSpaceCastInst( |
5197 | Value *S, ///< The value to be casted |
5198 | Type *Ty, ///< The type to casted to |
5199 | const Twine &NameStr = "", ///< A name for the new instruction |
5200 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction |
5201 | ); |
5202 | |
5203 | /// Constructor with insert-at-end-of-block semantics |
5204 | AddrSpaceCastInst( |
5205 | Value *S, ///< The value to be casted |
5206 | Type *Ty, ///< The type to casted to |
5207 | const Twine &NameStr, ///< A name for the new instruction |
5208 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into |
5209 | ); |
5210 | |
5211 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5212 | static bool classof(const Instruction *I) { |
5213 | return I->getOpcode() == AddrSpaceCast; |
5214 | } |
5215 | static bool classof(const Value *V) { |
5216 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5217 | } |
5218 | |
5219 | /// Gets the pointer operand. |
5220 | Value *getPointerOperand() { |
5221 | return getOperand(0); |
5222 | } |
5223 | |
5224 | /// Gets the pointer operand. |
5225 | const Value *getPointerOperand() const { |
5226 | return getOperand(0); |
5227 | } |
5228 | |
5229 | /// Gets the operand index of the pointer operand. |
5230 | static unsigned getPointerOperandIndex() { |
5231 | return 0U; |
5232 | } |
5233 | |
5234 | /// Returns the address space of the pointer operand. |
5235 | unsigned getSrcAddressSpace() const { |
5236 | return getPointerOperand()->getType()->getPointerAddressSpace(); |
5237 | } |
5238 | |
5239 | /// Returns the address space of the result. |
5240 | unsigned getDestAddressSpace() const { |
5241 | return getType()->getPointerAddressSpace(); |
5242 | } |
5243 | }; |
5244 | |
5245 | /// A helper function that returns the pointer operand of a load or store |
5246 | /// instruction. Returns nullptr if not load or store. |
5247 | inline const Value *getLoadStorePointerOperand(const Value *V) { |
5248 | if (auto *Load = dyn_cast<LoadInst>(V)) |
5249 | return Load->getPointerOperand(); |
5250 | if (auto *Store = dyn_cast<StoreInst>(V)) |
5251 | return Store->getPointerOperand(); |
5252 | return nullptr; |
5253 | } |
5254 | inline Value *getLoadStorePointerOperand(Value *V) { |
5255 | return const_cast<Value *>( |
5256 | getLoadStorePointerOperand(static_cast<const Value *>(V))); |
5257 | } |
5258 | |
5259 | /// A helper function that returns the pointer operand of a load, store |
5260 | /// or GEP instruction. Returns nullptr if not load, store, or GEP. |
5261 | inline const Value *getPointerOperand(const Value *V) { |
5262 | if (auto *Ptr = getLoadStorePointerOperand(V)) |
5263 | return Ptr; |
5264 | if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) |
5265 | return Gep->getPointerOperand(); |
5266 | return nullptr; |
5267 | } |
5268 | inline Value *getPointerOperand(Value *V) { |
5269 | return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); |
5270 | } |
5271 | |
5272 | /// A helper function that returns the alignment of load or store instruction. |
5273 | inline Align getLoadStoreAlignment(Value *I) { |
5274 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 5275, __PRETTY_FUNCTION__)) |
5275 | "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 5275, __PRETTY_FUNCTION__)); |
5276 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5277 | return LI->getAlign(); |
5278 | return cast<StoreInst>(I)->getAlign(); |
5279 | } |
5280 | |
5281 | /// A helper function that returns the address space of the pointer operand of |
5282 | /// load or store instruction. |
5283 | inline unsigned getLoadStoreAddressSpace(Value *I) { |
5284 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 5285, __PRETTY_FUNCTION__)) |
5285 | "Expected Load or Store instruction")(((isa<LoadInst>(I) || isa<StoreInst>(I)) && "Expected Load or Store instruction") ? static_cast<void> (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "/build/llvm-toolchain-snapshot-12.0.0~++20201102111116+1ed2ca68191/llvm/include/llvm/IR/Instructions.h" , 5285, __PRETTY_FUNCTION__)); |
5286 | if (auto *LI = dyn_cast<LoadInst>(I)) |
5287 | return LI->getPointerAddressSpace(); |
5288 | return cast<StoreInst>(I)->getPointerAddressSpace(); |
5289 | } |
5290 | |
5291 | //===----------------------------------------------------------------------===// |
5292 | // FreezeInst Class |
5293 | //===----------------------------------------------------------------------===// |
5294 | |
5295 | /// This class represents a freeze function that returns random concrete |
5296 | /// value if an operand is either a poison value or an undef value |
5297 | class FreezeInst : public UnaryInstruction { |
5298 | protected: |
5299 | // Note: Instruction needs to be a friend here to call cloneImpl. |
5300 | friend class Instruction; |
5301 | |
5302 | /// Clone an identical FreezeInst |
5303 | FreezeInst *cloneImpl() const; |
5304 | |
5305 | public: |
5306 | explicit FreezeInst(Value *S, |
5307 | const Twine &NameStr = "", |
5308 | Instruction *InsertBefore = nullptr); |
5309 | FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); |
5310 | |
5311 | // Methods for support type inquiry through isa, cast, and dyn_cast: |
5312 | static inline bool classof(const Instruction *I) { |
5313 | return I->getOpcode() == Freeze; |
5314 | } |
5315 | static inline bool classof(const Value *V) { |
5316 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); |
5317 | } |
5318 | }; |
5319 | |
5320 | } // end namespace llvm |
5321 | |
5322 | #endif // LLVM_IR_INSTRUCTIONS_H |
1 | //===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file contains the declaration of the Instruction class, which is the |
10 | // base class for all of the LLVM instructions. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_IR_INSTRUCTION_H |
15 | #define LLVM_IR_INSTRUCTION_H |
16 | |
17 | #include "llvm/ADT/ArrayRef.h" |
18 | #include "llvm/ADT/Bitfields.h" |
19 | #include "llvm/ADT/None.h" |
20 | #include "llvm/ADT/StringRef.h" |
21 | #include "llvm/ADT/ilist_node.h" |
22 | #include "llvm/IR/DebugLoc.h" |
23 | #include "llvm/IR/SymbolTableListTraits.h" |
24 | #include "llvm/IR/User.h" |
25 | #include "llvm/IR/Value.h" |
26 | #include "llvm/Support/AtomicOrdering.h" |
27 | #include "llvm/Support/Casting.h" |
28 | #include <algorithm> |
29 | #include <cassert> |
30 | #include <cstdint> |
31 | #include <utility> |
32 | |
33 | namespace llvm { |
34 | |
35 | class BasicBlock; |
36 | class FastMathFlags; |
37 | class MDNode; |
38 | class Module; |
39 | struct AAMDNodes; |
40 | |
41 | template <> struct ilist_alloc_traits<Instruction> { |
42 | static inline void deleteNode(Instruction *V); |
43 | }; |
44 | |
45 | class Instruction : public User, |
46 | public ilist_node_with_parent<Instruction, BasicBlock> { |
47 | BasicBlock *Parent; |
48 | DebugLoc DbgLoc; // 'dbg' Metadata cache. |
49 | |
50 | /// Relative order of this instruction in its parent basic block. Used for |
51 | /// O(1) local dominance checks between instructions. |
52 | mutable unsigned Order = 0; |
53 | |
54 | protected: |
55 | // The 15 first bits of `Value::SubclassData` are available for subclasses of |
56 | // `Instruction` to use. |
57 | using OpaqueField = Bitfield::Element<uint16_t, 0, 15>; |
58 | |
59 | // Template alias so that all Instruction storing alignment use the same |
60 | // definiton. |
61 | // Valid alignments are powers of two from 2^0 to 2^MaxAlignmentExponent = |
62 | // 2^29. We store them as Log2(Alignment), so we need 5 bits to encode the 30 |
63 | // possible values. |
64 | template <unsigned Offset> |
65 | using AlignmentBitfieldElementT = |
66 | typename Bitfield::Element<unsigned, Offset, 5, |
67 | Value::MaxAlignmentExponent>; |
68 | |
69 | template <unsigned Offset> |
70 | using BoolBitfieldElementT = typename Bitfield::Element<bool, Offset, 1>; |
71 | |
72 | template <unsigned Offset> |
73 | using AtomicOrderingBitfieldElementT = |
74 | typename Bitfield::Element<AtomicOrdering, Offset, 3, |
75 | AtomicOrdering::LAST>; |
76 | |
77 | private: |
78 | // The last bit is used to store whether the instruction has metadata attached |
79 | // or not. |
80 | using HasMetadataField = Bitfield::Element<bool, 15, 1>; |
81 | |
82 | protected: |
83 | ~Instruction(); // Use deleteValue() to delete a generic Instruction. |
84 | |
85 | public: |
86 | Instruction(const Instruction &) = delete; |
87 | Instruction &operator=(const Instruction &) = delete; |
88 | |
89 | /// Specialize the methods defined in Value, as we know that an instruction |
90 | /// can only be used by other instructions. |
91 | Instruction *user_back() { return cast<Instruction>(*user_begin());} |
92 | const Instruction *user_back() const { return cast<Instruction>(*user_begin());} |
93 | |
94 | inline const BasicBlock *getParent() const { return Parent; } |
95 | inline BasicBlock *getParent() { return Parent; } |
96 | |
97 | /// Return the module owning the function this instruction belongs to |
98 | /// or nullptr it the function does not have a module. |
99 | /// |
100 | /// Note: this is undefined behavior if the instruction does not have a |
101 | /// parent, or the parent basic block does not have a parent function. |
102 | const Module *getModule() const; |
103 | Module *getModule() { |
104 | return const_cast<Module *>( |
105 | static_cast<const Instruction *>(this)->getModule()); |
106 | } |
107 | |
108 | /// Return the function this instruction belongs to. |
109 | /// |
110 | /// Note: it is undefined behavior to call this on an instruction not |
111 | /// currently inserted into a function. |
112 | const Function *getFunction() const; |
113 | Function *getFunction() { |
114 | return const_cast<Function *>( |
115 | static_cast<const Instruction *>(this)->getFunction()); |
116 | } |
117 | |
118 | /// This method unlinks 'this' from the containing basic block, but does not |
119 | /// delete it. |
120 | void removeFromParent(); |
121 | |
122 | /// This method unlinks 'this' from the containing basic block and deletes it. |
123 | /// |
124 | /// \returns an iterator pointing to the element after the erased one |
125 | SymbolTableList<Instruction>::iterator eraseFromParent(); |
126 | |
127 | /// Insert an unlinked instruction into a basic block immediately before |
128 | /// the specified instruction. |
129 | void insertBefore(Instruction *InsertPos); |
130 | |
131 | /// Insert an unlinked instruction into a basic block immediately after the |
132 | /// specified instruction. |
133 | void insertAfter(Instruction *InsertPos); |
134 | |
135 | /// Unlink this instruction from its current basic block and insert it into |
136 | /// the basic block that MovePos lives in, right before MovePos. |
137 | void moveBefore(Instruction *MovePos); |
138 | |
139 | /// Unlink this instruction and insert into BB before I. |
140 | /// |
141 | /// \pre I is a valid iterator into BB. |
142 | void moveBefore(BasicBlock &BB, SymbolTableList<Instruction>::iterator I); |
143 | |
144 | /// Unlink this instruction from its current basic block and insert it into |
145 | /// the basic block that MovePos lives in, right after MovePos. |
146 | void moveAfter(Instruction *MovePos); |
147 | |
148 | /// Given an instruction Other in the same basic block as this instruction, |
149 | /// return true if this instruction comes before Other. In this worst case, |
150 | /// this takes linear time in the number of instructions in the block. The |
151 | /// results are cached, so in common cases when the block remains unmodified, |
152 | /// it takes constant time. |
153 | bool comesBefore(const Instruction *Other) const; |
154 | |
155 | //===--------------------------------------------------------------------===// |
156 | // Subclass classification. |
157 | //===--------------------------------------------------------------------===// |
158 | |
159 | /// Returns a member of one of the enums like Instruction::Add. |
160 | unsigned getOpcode() const { return getValueID() - InstructionVal; } |
161 | |
162 | const char *getOpcodeName() const { return getOpcodeName(getOpcode()); } |
163 | bool isTerminator() const { return isTerminator(getOpcode()); } |
164 | bool isUnaryOp() const { return isUnaryOp(getOpcode()); } |
165 | bool isBinaryOp() const { return isBinaryOp(getOpcode()); } |
166 | bool isIntDivRem() const { return isIntDivRem(getOpcode()); } |
167 | bool isShift() const { return isShift(getOpcode()); } |
168 | bool isCast() const { return isCast(getOpcode()); } |
169 | bool isFuncletPad() const { return isFuncletPad(getOpcode()); } |
170 | bool isExceptionalTerminator() const { |
171 | return isExceptionalTerminator(getOpcode()); |
172 | } |
173 | bool isIndirectTerminator() const { |
174 | return isIndirectTerminator(getOpcode()); |
175 | } |
176 | |
177 | static const char* getOpcodeName(unsigned OpCode); |
178 | |
179 | static inline bool isTerminator(unsigned OpCode) { |
180 | return OpCode >= TermOpsBegin && OpCode < TermOpsEnd; |
181 | } |
182 | |
183 | static inline bool isUnaryOp(unsigned Opcode) { |
184 | return Opcode >= UnaryOpsBegin && Opcode < UnaryOpsEnd; |
185 | } |
186 | static inline bool isBinaryOp(unsigned Opcode) { |
187 | return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd; |
188 | } |
189 | |
190 | static inline bool isIntDivRem(unsigned Opcode) { |
191 | return Opcode == UDiv || Opcode == SDiv || Opcode == URem || Opcode == SRem; |
192 | } |
193 | |
194 | /// Determine if the Opcode is one of the shift instructions. |
195 | static inline bool isShift(unsigned Opcode) { |
196 | return Opcode >= Shl && Opcode <= AShr; |
197 | } |
198 | |
199 | /// Return true if this is a logical shift left or a logical shift right. |
200 | inline bool isLogicalShift() const { |
201 | return getOpcode() == Shl || getOpcode() == LShr; |
202 | } |
203 | |
204 | /// Return true if this is an arithmetic shift right. |
205 | inline bool isArithmeticShift() const { |
206 | return getOpcode() == AShr; |
207 | } |
208 | |
209 | /// Determine if the Opcode is and/or/xor. |
210 | static inline bool isBitwiseLogicOp(unsigned Opcode) { |
211 | return Opcode == And || Opcode == Or || Opcode == Xor; |
212 | } |
213 | |
214 | /// Return true if this is and/or/xor. |
215 | inline bool isBitwiseLogicOp() const { |
216 | return isBitwiseLogicOp(getOpcode()); |
217 | } |
218 | |
219 | /// Determine if the OpCode is one of the CastInst instructions. |
220 | static inline bool isCast(unsigned OpCode) { |
221 | return OpCode >= CastOpsBegin && OpCode < CastOpsEnd; |
222 | } |
223 | |
224 | /// Determine if the OpCode is one of the FuncletPadInst instructions. |
225 | static inline bool isFuncletPad(unsigned OpCode) { |
226 | return OpCode >= FuncletPadOpsBegin && OpCode < FuncletPadOpsEnd; |
227 | } |
228 | |
229 | /// Returns true if the OpCode is a terminator related to exception handling. |
230 | static inline bool isExceptionalTerminator(unsigned OpCode) { |
231 | switch (OpCode) { |
232 | case Instruction::CatchSwitch: |
233 | case Instruction::CatchRet: |
234 | case Instruction::CleanupRet: |
235 | case Instruction::Invoke: |
236 | case Instruction::Resume: |
237 | return true; |
238 | default: |
239 | return false; |
240 | } |
241 | } |
242 | |
243 | /// Returns true if the OpCode is a terminator with indirect targets. |
244 | static inline bool isIndirectTerminator(unsigned OpCode) { |
245 | switch (OpCode) { |
246 | case Instruction::IndirectBr: |
247 | case Instruction::CallBr: |
248 | return true; |
249 | default: |
250 | return false; |
251 | } |
252 | } |
253 | |
254 | //===--------------------------------------------------------------------===// |
255 | // Metadata manipulation. |
256 | //===--------------------------------------------------------------------===// |
257 | |
258 | /// Return true if this instruction has any metadata attached to it. |
259 | bool hasMetadata() const { return DbgLoc || Value::hasMetadata(); } |
260 | |
261 | /// Return true if this instruction has metadata attached to it other than a |
262 | /// debug location. |
263 | bool hasMetadataOtherThanDebugLoc() const { return Value::hasMetadata(); } |
264 | |
265 | /// Return true if this instruction has the given type of metadata attached. |
266 | bool hasMetadata(unsigned KindID) const { |
267 | return getMetadata(KindID) != nullptr; |
268 | } |
269 | |
270 | /// Return true if this instruction has the given type of metadata attached. |
271 | bool hasMetadata(StringRef Kind) const { |
272 | return getMetadata(Kind) != nullptr; |
273 | } |
274 | |
275 | /// Get the metadata of given kind attached to this Instruction. |
276 | /// If the metadata is not found then return null. |
277 | MDNode *getMetadata(unsigned KindID) const { |
278 | if (!hasMetadata()) return nullptr; |
279 | return getMetadataImpl(KindID); |
280 | } |
281 | |
282 | /// Get the metadata of given kind attached to this Instruction. |
283 | /// If the metadata is not found then return null. |
284 | MDNode *getMetadata(StringRef Kind) const { |
285 | if (!hasMetadata()) return nullptr; |
286 | return getMetadataImpl(Kind); |
287 | } |
288 | |
289 | /// Get all metadata attached to this Instruction. The first element of each |
290 | /// pair returned is the KindID, the second element is the metadata value. |
291 | /// This list is returned sorted by the KindID. |
292 | void |
293 | getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const { |
294 | if (hasMetadata()) |
295 | getAllMetadataImpl(MDs); |
296 | } |
297 | |
298 | /// This does the same thing as getAllMetadata, except that it filters out the |
299 | /// debug location. |
300 | void getAllMetadataOtherThanDebugLoc( |
301 | SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const { |
302 | Value::getAllMetadata(MDs); |
303 | } |
304 | |
305 | /// Fills the AAMDNodes structure with AA metadata from this instruction. |
306 | /// When Merge is true, the existing AA metadata is merged with that from this |
307 | /// instruction providing the most-general result. |
308 | void getAAMetadata(AAMDNodes &N, bool Merge = false) const; |
309 | |
310 | /// Set the metadata of the specified kind to the specified node. This updates |
311 | /// or replaces metadata if already present, or removes it if Node is null. |
312 | void setMetadata(unsigned KindID, MDNode *Node); |
313 | void setMetadata(StringRef Kind, MDNode *Node); |
314 | |
315 | /// Copy metadata from \p SrcInst to this instruction. \p WL, if not empty, |
316 | /// specifies the list of meta data that needs to be copied. If \p WL is |
317 | /// empty, all meta data will be copied. |
318 | void copyMetadata(const Instruction &SrcInst, |
319 | ArrayRef<unsigned> WL = ArrayRef<unsigned>()); |
320 | |
321 | /// If the instruction has "branch_weights" MD_prof metadata and the MDNode |
322 | /// has three operands (including name string), swap the order of the |
323 | /// metadata. |
324 | void swapProfMetadata(); |
325 | |
326 | /// Drop all unknown metadata except for debug locations. |
327 | /// @{ |
328 | /// Passes are required to drop metadata they don't understand. This is a |
329 | /// convenience method for passes to do so. |
330 | void dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs); |
331 | void dropUnknownNonDebugMetadata() { |
332 | return dropUnknownNonDebugMetadata(None); |
333 | } |
334 | void dropUnknownNonDebugMetadata(unsigned ID1) { |
335 | return dropUnknownNonDebugMetadata(makeArrayRef(ID1)); |
336 | } |
337 | void dropUnknownNonDebugMetadata(unsigned ID1, unsigned ID2) { |
338 | unsigned IDs[] = {ID1, ID2}; |
339 | return dropUnknownNonDebugMetadata(IDs); |
340 | } |
341 | /// @} |
342 | |
343 | /// Sets the metadata on this instruction from the AAMDNodes structure. |
344 | void setAAMetadata(const AAMDNodes &N); |
345 | |
346 | /// Retrieve the raw weight values of a conditional branch or select. |
347 | /// Returns true on success with profile weights filled in. |
348 | /// Returns false if no metadata or invalid metadata was found. |
349 | bool extractProfMetadata(uint64_t &TrueVal, uint64_t &FalseVal) const; |
350 | |
351 | /// Retrieve total raw weight values of a branch. |
352 | /// Returns true on success with profile total weights filled in. |
353 | /// Returns false if no metadata was found. |
354 | bool extractProfTotalWeight(uint64_t &TotalVal) const; |
355 | |
356 | /// Set the debug location information for this instruction. |
357 | void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); } |
358 | |
359 | /// Return the debug location for this node as a DebugLoc. |
360 | const DebugLoc &getDebugLoc() const { return DbgLoc; } |
361 | |
362 | /// Set or clear the nuw flag on this instruction, which must be an operator |
363 | /// which supports this flag. See LangRef.html for the meaning of this flag. |
364 | void setHasNoUnsignedWrap(bool b = true); |
365 | |
366 | /// Set or clear the nsw flag on this instruction, which must be an operator |
367 | /// which supports this flag. See LangRef.html for the meaning of this flag. |
368 | void setHasNoSignedWrap(bool b = true); |
369 | |
370 | /// Set or clear the exact flag on this instruction, which must be an operator |
371 | /// which supports this flag. See LangRef.html for the meaning of this flag. |
372 | void setIsExact(bool b = true); |
373 | |
374 | /// Determine whether the no unsigned wrap flag is set. |
375 | bool hasNoUnsignedWrap() const; |
376 | |
377 | /// Determine whether the no signed wrap flag is set. |
378 | bool hasNoSignedWrap() const; |
379 | |
380 | /// Drops flags that may cause this instruction to evaluate to poison despite |
381 | /// having non-poison inputs. |
382 | void dropPoisonGeneratingFlags(); |
383 | |
384 | /// Determine whether the exact flag is set. |
385 | bool isExact() const; |
386 | |
387 | /// Set or clear all fast-math-flags on this instruction, which must be an |
388 | /// operator which supports this flag. See LangRef.html for the meaning of |
389 | /// this flag. |
390 | void setFast(bool B); |
391 | |
392 | /// Set or clear the reassociation flag on this instruction, which must be |
393 | /// an operator which supports this flag. See LangRef.html for the meaning of |
394 | /// this flag. |
395 | void setHasAllowReassoc(bool B); |
396 | |
397 | /// Set or clear the no-nans flag on this instruction, which must be an |
398 | /// operator which supports this flag. See LangRef.html for the meaning of |
399 | /// this flag. |
400 | void setHasNoNaNs(bool B); |
401 | |
402 | /// Set or clear the no-infs flag on this instruction, which must be an |
403 | /// operator which supports this flag. See LangRef.html for the meaning of |
404 | /// this flag. |
405 | void setHasNoInfs(bool B); |
406 | |
407 | /// Set or clear the no-signed-zeros flag on this instruction, which must be |
408 | /// an operator which supports this flag. See LangRef.html for the meaning of |
409 | /// this flag. |
410 | void setHasNoSignedZeros(bool B); |
411 | |
412 | /// Set or clear the allow-reciprocal flag on this instruction, which must be |
413 | /// an operator which supports this flag. See LangRef.html for the meaning of |
414 | /// this flag. |
415 | void setHasAllowReciprocal(bool B); |
416 | |
417 | /// Set or clear the allow-contract flag on this instruction, which must be |
418 | /// an operator which supports this flag. See LangRef.html for the meaning of |
419 | /// this flag. |
420 | void setHasAllowContract(bool B); |
421 | |
422 | /// Set or clear the approximate-math-functions flag on this instruction, |
423 | /// which must be an operator which supports this flag. See LangRef.html for |
424 | /// the meaning of this flag. |
425 | void setHasApproxFunc(bool B); |
426 | |
427 | /// Convenience function for setting multiple fast-math flags on this |
428 | /// instruction, which must be an operator which supports these flags. See |
429 | /// LangRef.html for the meaning of these flags. |
430 | void setFastMathFlags(FastMathFlags FMF); |
431 | |
432 | /// Convenience function for transferring all fast-math flag values to this |
433 | /// instruction, which must be an operator which supports these flags. See |
434 | /// LangRef.html for the meaning of these flags. |
435 | void copyFastMathFlags(FastMathFlags FMF); |
436 | |
437 | /// Determine whether all fast-math-flags are set. |
438 | bool isFast() const; |
439 | |
440 | /// Determine whether the allow-reassociation flag is set. |
441 | bool hasAllowReassoc() const; |
442 | |
443 | /// Determine whether the no-NaNs flag is set. |
444 | bool hasNoNaNs() const; |
445 | |
446 | /// Determine whether the no-infs flag is set. |
447 | bool hasNoInfs() const; |
448 | |
449 | /// Determine whether the no-signed-zeros flag is set. |
450 | bool hasNoSignedZeros() const; |
451 | |
452 | /// Determine whether the allow-reciprocal flag is set. |
453 | bool hasAllowReciprocal() const; |
454 | |
455 | /// Determine whether the allow-contract flag is set. |
456 | bool hasAllowContract() const; |
457 | |
458 | /// Determine whether the approximate-math-functions flag is set. |
459 | bool hasApproxFunc() const; |
460 | |
461 | /// Convenience function for getting all the fast-math flags, which must be an |
462 | /// operator which supports these flags. See LangRef.html for the meaning of |
463 | /// these flags. |
464 | FastMathFlags getFastMathFlags() const; |
465 | |
466 | /// Copy I's fast-math flags |
467 | void copyFastMathFlags(const Instruction *I); |
468 | |
469 | /// Convenience method to copy supported exact, fast-math, and (optionally) |
470 | /// wrapping flags from V to this instruction. |
471 | void copyIRFlags(const Value *V, bool IncludeWrapFlags = true); |
472 | |
473 | /// Logical 'and' of any supported wrapping, exact, and fast-math flags of |
474 | /// V and this instruction. |
475 | void andIRFlags(const Value *V); |
476 | |
477 | /// Merge 2 debug locations and apply it to the Instruction. If the |
478 | /// instruction is a CallIns, we need to traverse the inline chain to find |
479 | /// the common scope. This is not efficient for N-way merging as each time |
480 | /// you merge 2 iterations, you need to rebuild the hashmap to find the |
481 | /// common scope. However, we still choose this API because: |
482 | /// 1) Simplicity: it takes 2 locations instead of a list of locations. |
483 | /// 2) In worst case, it increases the complexity from O(N*I) to |
484 | /// O(2*N*I), where N is # of Instructions to merge, and I is the |
485 | /// maximum level of inline stack. So it is still linear. |
486 | /// 3) Merging of call instructions should be extremely rare in real |
487 | /// applications, thus the N-way merging should be in code path. |
488 | /// The DebugLoc attached to this instruction will be overwritten by the |
489 | /// merged DebugLoc. |
490 | void applyMergedLocation(const DILocation *LocA, const DILocation *LocB); |
491 | |
492 | /// Updates the debug location given that the instruction has been hoisted |
493 | /// from a block to a predecessor of that block. |
494 | /// Note: it is undefined behavior to call this on an instruction not |
495 | /// currently inserted into a function. |
496 | void updateLocationAfterHoist(); |
497 | |
498 | /// Drop the instruction's debug location. This does not guarantee removal |
499 | /// of the !dbg source location attachment, as it must set a line 0 location |
500 | /// with scope information attached on call instructions. To guarantee |
501 | /// removal of the !dbg attachment, use the \ref setDebugLoc() API. |
502 | /// Note: it is undefined behavior to call this on an instruction not |
503 | /// currently inserted into a function. |
504 | void dropLocation(); |
505 | |
506 | private: |
507 | // These are all implemented in Metadata.cpp. |
508 | MDNode *getMetadataImpl(unsigned KindID) const; |
509 | MDNode *getMetadataImpl(StringRef Kind) const; |
510 | void |
511 | getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const; |
512 | |
513 | public: |
514 | //===--------------------------------------------------------------------===// |
515 | // Predicates and helper methods. |
516 | //===--------------------------------------------------------------------===// |
517 | |
518 | /// Return true if the instruction is associative: |
519 | /// |
520 | /// Associative operators satisfy: x op (y op z) === (x op y) op z |
521 | /// |
522 | /// In LLVM, the Add, Mul, And, Or, and Xor operators are associative. |
523 | /// |
524 | bool isAssociative() const LLVM_READONLY__attribute__((__pure__)); |
525 | static bool isAssociative(unsigned Opcode) { |
526 | return Opcode == And || Opcode == Or || Opcode == Xor || |
527 | Opcode == Add || Opcode == Mul; |
528 | } |
529 | |
530 | /// Return true if the instruction is commutative: |
531 | /// |
532 | /// Commutative operators satisfy: (x op y) === (y op x) |
533 | /// |
534 | /// In LLVM, these are the commutative operators, plus SetEQ and SetNE, when |
535 | /// applied to any type. |
536 | /// |
537 | bool isCommutative() const LLVM_READONLY__attribute__((__pure__)); |
538 | static bool isCommutative(unsigned Opcode) { |
539 | switch (Opcode) { |
540 | case Add: case FAdd: |
541 | case Mul: case FMul: |
542 | case And: case Or: case Xor: |
543 | return true; |
544 | default: |
545 | return false; |
546 | } |
547 | } |
548 | |
549 | /// Return true if the instruction is idempotent: |
550 | /// |
551 | /// Idempotent operators satisfy: x op x === x |
552 | /// |
553 | /// In LLVM, the And and Or operators are idempotent. |
554 | /// |
555 | bool isIdempotent() const { return isIdempotent(getOpcode()); } |
556 | static bool isIdempotent(unsigned Opcode) { |
557 | return Opcode == And || Opcode == Or; |
558 | } |
559 | |
560 | /// Return true if the instruction is nilpotent: |
561 | /// |
562 | /// Nilpotent operators satisfy: x op x === Id, |
563 | /// |
564 | /// where Id is the identity for the operator, i.e. a constant such that |
565 | /// x op Id === x and Id op x === x for all x. |
566 | /// |
567 | /// In LLVM, the Xor operator is nilpotent. |
568 | /// |
569 | bool isNilpotent() const { return isNilpotent(getOpcode()); } |
570 | static bool isNilpotent(unsigned Opcode) { |
571 | return Opcode == Xor; |
572 | } |
573 | |
574 | /// Return true if this instruction may modify memory. |
575 | bool mayWriteToMemory() const; |
576 | |
577 | /// Return true if this instruction may read memory. |
578 | bool mayReadFromMemory() const; |
579 | |
580 | /// Return true if this instruction may read or write memory. |
581 | bool mayReadOrWriteMemory() const { |
582 | return mayReadFromMemory() || mayWriteToMemory(); |
583 | } |
584 | |
585 | /// Return true if this instruction has an AtomicOrdering of unordered or |
586 | /// higher. |
587 | bool isAtomic() const; |
588 | |
589 | /// Return true if this atomic instruction loads from memory. |
590 | bool hasAtomicLoad() const; |
591 | |
592 | /// Return true if this atomic instruction stores to memory. |
593 | bool hasAtomicStore() const; |
594 | |
595 | /// Return true if this instruction may throw an exception. |
596 | bool mayThrow() const; |
597 | |
598 | /// Return true if this instruction behaves like a memory fence: it can load |
599 | /// or store to memory location without being given a memory location. |
600 | bool isFenceLike() const { |
601 | switch (getOpcode()) { |
602 | default: |
603 | return false; |
604 | // This list should be kept in sync with the list in mayWriteToMemory for |
605 | // all opcodes which don't have a memory location. |
606 | case Instruction::Fence: |
607 | case Instruction::CatchPad: |
608 | case Instruction::CatchRet: |
609 | case Instruction::Call: |
610 | case Instruction::Invoke: |
611 | return true; |
612 | } |
613 | } |
614 | |
615 | /// Return true if the instruction may have side effects. |
616 | /// |
617 | /// Note that this does not consider malloc and alloca to have side |
618 | /// effects because the newly allocated memory is completely invisible to |
619 | /// instructions which don't use the returned value. For cases where this |
620 | /// matters, isSafeToSpeculativelyExecute may be more appropriate. |
621 | bool mayHaveSideEffects() const { return mayWriteToMemory() || mayThrow(); } |
622 | |
623 | /// Return true if the instruction can be removed if the result is unused. |
624 | /// |
625 | /// When constant folding some instructions cannot be removed even if their |
626 | /// results are unused. Specifically terminator instructions and calls that |
627 | /// may have side effects cannot be removed without semantically changing the |
628 | /// generated program. |
629 | bool isSafeToRemove() const; |
630 | |
631 | /// Return true if the instruction is a variety of EH-block. |
632 | bool isEHPad() const { |
633 | switch (getOpcode()) { |
634 | case Instruction::CatchSwitch: |
635 | case Instruction::CatchPad: |
636 | case Instruction::CleanupPad: |
637 | case Instruction::LandingPad: |
638 | return true; |
639 | default: |
640 | return false; |
641 | } |
642 | } |
643 | |
644 | /// Return true if the instruction is a llvm.lifetime.start or |
645 | /// llvm.lifetime.end marker. |
646 | bool isLifetimeStartOrEnd() const; |
647 | |
648 | /// Return a pointer to the next non-debug instruction in the same basic |
649 | /// block as 'this', or nullptr if no such instruction exists. |
650 | const Instruction *getNextNonDebugInstruction() const; |
651 | Instruction *getNextNonDebugInstruction() { |
652 | return const_cast<Instruction *>( |
653 | static_cast<const Instruction *>(this)->getNextNonDebugInstruction()); |
654 | } |
655 | |
656 | /// Return a pointer to the previous non-debug instruction in the same basic |
657 | /// block as 'this', or nullptr if no such instruction exists. |
658 | const Instruction *getPrevNonDebugInstruction() const; |
659 | Instruction *getPrevNonDebugInstruction() { |
660 | return const_cast<Instruction *>( |
661 | static_cast<const Instruction *>(this)->getPrevNonDebugInstruction()); |
662 | } |
663 | |
664 | /// Create a copy of 'this' instruction that is identical in all ways except |
665 | /// the following: |
666 | /// * The instruction has no parent |
667 | /// * The instruction has no name |
668 | /// |
669 | Instruction *clone() const; |
670 | |
671 | /// Return true if the specified instruction is exactly identical to the |
672 | /// current one. This means that all operands match and any extra information |
673 | /// (e.g. load is volatile) agree. |
674 | bool isIdenticalTo(const Instruction *I) const; |
675 | |
676 | /// This is like isIdenticalTo, except that it ignores the |
677 | /// SubclassOptionalData flags, which may specify conditions under which the |
678 | /// instruction's result is undefined. |
679 | bool isIdenticalToWhenDefined(const Instruction *I) const; |
680 | |
681 | /// When checking for operation equivalence (using isSameOperationAs) it is |
682 | /// sometimes useful to ignore certain attributes. |
683 | enum OperationEquivalenceFlags { |
684 | /// Check for equivalence ignoring load/store alignment. |
685 | CompareIgnoringAlignment = 1<<0, |
686 | /// Check for equivalence treating a type and a vector of that type |
687 | /// as equivalent. |
688 | CompareUsingScalarTypes = 1<<1 |
689 | }; |
690 | |
691 | /// This function determines if the specified instruction executes the same |
692 | /// operation as the current one. This means that the opcodes, type, operand |
693 | /// types and any other factors affecting the operation must be the same. This |
694 | /// is similar to isIdenticalTo except the operands themselves don't have to |
695 | /// be identical. |
696 | /// @returns true if the specified instruction is the same operation as |
697 | /// the current one. |
698 | /// Determine if one instruction is the same operation as another. |
699 | bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const; |
700 | |
701 | /// Return true if there are any uses of this instruction in blocks other than |
702 | /// the specified block. Note that PHI nodes are considered to evaluate their |
703 | /// operands in the corresponding predecessor block. |
704 | bool isUsedOutsideOfBlock(const BasicBlock *BB) const; |
705 | |
706 | /// Return the number of successors that this instruction has. The instruction |
707 | /// must be a terminator. |
708 | unsigned getNumSuccessors() const; |
709 | |
710 | /// Return the specified successor. This instruction must be a terminator. |
711 | BasicBlock *getSuccessor(unsigned Idx) const; |
712 | |
713 | /// Update the specified successor to point at the provided block. This |
714 | /// instruction must be a terminator. |
715 | void setSuccessor(unsigned Idx, BasicBlock *BB); |
716 | |
717 | /// Replace specified successor OldBB to point at the provided block. |
718 | /// This instruction must be a terminator. |
719 | void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB); |
720 | |
721 | /// Methods for support type inquiry through isa, cast, and dyn_cast: |
722 | static bool classof(const Value *V) { |
723 | return V->getValueID() >= Value::InstructionVal; |
724 | } |
725 | |
726 | //---------------------------------------------------------------------- |
727 | // Exported enumerations. |
728 | // |
729 | enum TermOps { // These terminate basic blocks |
730 | #define FIRST_TERM_INST(N) TermOpsBegin = N, |
731 | #define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N, |
732 | #define LAST_TERM_INST(N) TermOpsEnd = N+1 |
733 | #include "llvm/IR/Instruction.def" |
734 | }; |
735 | |
736 | enum UnaryOps { |
737 | #define FIRST_UNARY_INST(N) UnaryOpsBegin = N, |
738 | #define HANDLE_UNARY_INST(N, OPC, CLASS) OPC = N, |
739 | #define LAST_UNARY_INST(N) UnaryOpsEnd = N+1 |
740 | #include "llvm/IR/Instruction.def" |
741 | }; |
742 | |
743 | enum BinaryOps { |
744 | #define FIRST_BINARY_INST(N) BinaryOpsBegin = N, |
745 | #define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N, |
746 | #define LAST_BINARY_INST(N) BinaryOpsEnd = N+1 |
747 | #include "llvm/IR/Instruction.def" |
748 | }; |
749 | |
750 | enum MemoryOps { |
751 | #define FIRST_MEMORY_INST(N) MemoryOpsBegin = N, |
752 | #define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N, |
753 | #define LAST_MEMORY_INST(N) MemoryOpsEnd = N+1 |
754 | #include "llvm/IR/Instruction.def" |
755 | }; |
756 | |
757 | enum CastOps { |
758 | #define FIRST_CAST_INST(N) CastOpsBegin = N, |
759 | #define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N, |
760 | #define LAST_CAST_INST(N) CastOpsEnd = N+1 |
761 | #include "llvm/IR/Instruction.def" |
762 | }; |
763 | |
764 | enum FuncletPadOps { |
765 | #define FIRST_FUNCLETPAD_INST(N) FuncletPadOpsBegin = N, |
766 | #define HANDLE_FUNCLETPAD_INST(N, OPC, CLASS) OPC = N, |
767 | #define LAST_FUNCLETPAD_INST(N) FuncletPadOpsEnd = N+1 |
768 | #include "llvm/IR/Instruction.def" |
769 | }; |
770 | |
771 | enum OtherOps { |
772 | #define FIRST_OTHER_INST(N) OtherOpsBegin = N, |
773 | #define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N, |
774 | #define LAST_OTHER_INST(N) OtherOpsEnd = N+1 |
775 | #include "llvm/IR/Instruction.def" |
776 | }; |
777 | |
778 | private: |
779 | friend class SymbolTableListTraits<Instruction>; |
780 | friend class BasicBlock; // For renumbering. |
781 | |
782 | // Shadow Value::setValueSubclassData with a private forwarding method so that |
783 | // subclasses cannot accidentally use it. |
784 | void setValueSubclassData(unsigned short D) { |
785 | Value::setValueSubclassData(D); |
786 | } |
787 | |
788 | unsigned short getSubclassDataFromValue() const { |
789 | return Value::getSubclassDataFromValue(); |
790 | } |
791 | |
792 | void setParent(BasicBlock *P); |
793 | |
794 | protected: |
795 | // Instruction subclasses can stick up to 15 bits of stuff into the |
796 | // SubclassData field of instruction with these members. |
797 | |
798 | template <typename BitfieldElement> |
799 | typename BitfieldElement::Type getSubclassData() const { |
800 | static_assert( |
801 | std::is_same<BitfieldElement, HasMetadataField>::value || |
802 | !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(), |
803 | "Must not overlap with the metadata bit"); |
804 | return Bitfield::get<BitfieldElement>(getSubclassDataFromValue()); |
805 | } |
806 | |
807 | template <typename BitfieldElement> |
808 | void setSubclassData(typename BitfieldElement::Type Value) { |
809 | static_assert( |
810 | std::is_same<BitfieldElement, HasMetadataField>::value || |
811 | !Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(), |
812 | "Must not overlap with the metadata bit"); |
813 | auto Storage = getSubclassDataFromValue(); |
814 | Bitfield::set<BitfieldElement>(Storage, Value); |
815 | setValueSubclassData(Storage); |
816 | } |
817 | |
818 | Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps, |
819 | Instruction *InsertBefore = nullptr); |
820 | Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps, |
821 | BasicBlock *InsertAtEnd); |
822 | |
823 | private: |
824 | /// Create a copy of this instruction. |
825 | Instruction *cloneImpl() const; |
826 | }; |
827 | |
828 | inline void ilist_alloc_traits<Instruction>::deleteNode(Instruction *V) { |
829 | V->deleteValue(); |
830 | } |
831 | |
832 | } // end namespace llvm |
833 | |
834 | #endif // LLVM_IR_INSTRUCTION_H |