File: | build/llvm-toolchain-snapshot-15~++20220420111733+e13d2efed663/llvm/include/llvm/IR/Instructions.h |
Warning: | line 1242, column 33 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===------- LoopBoundSplit.cpp - Split Loop Bound --------------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | ||||
9 | #include "llvm/Transforms/Scalar/LoopBoundSplit.h" | |||
10 | #include "llvm/ADT/Sequence.h" | |||
11 | #include "llvm/Analysis/LoopAnalysisManager.h" | |||
12 | #include "llvm/Analysis/LoopInfo.h" | |||
13 | #include "llvm/Analysis/ScalarEvolution.h" | |||
14 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | |||
15 | #include "llvm/IR/PatternMatch.h" | |||
16 | #include "llvm/Transforms/Scalar/LoopPassManager.h" | |||
17 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
18 | #include "llvm/Transforms/Utils/Cloning.h" | |||
19 | #include "llvm/Transforms/Utils/LoopSimplify.h" | |||
20 | #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" | |||
21 | ||||
22 | #define DEBUG_TYPE"loop-bound-split" "loop-bound-split" | |||
23 | ||||
24 | namespace llvm { | |||
25 | ||||
26 | using namespace PatternMatch; | |||
27 | ||||
28 | namespace { | |||
29 | struct ConditionInfo { | |||
30 | /// Branch instruction with this condition | |||
31 | BranchInst *BI = nullptr; | |||
32 | /// ICmp instruction with this condition | |||
33 | ICmpInst *ICmp = nullptr; | |||
34 | /// Preciate info | |||
35 | ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; | |||
36 | /// AddRec llvm value | |||
37 | Value *AddRecValue = nullptr; | |||
38 | /// Non PHI AddRec llvm value | |||
39 | Value *NonPHIAddRecValue; | |||
40 | /// Bound llvm value | |||
41 | Value *BoundValue = nullptr; | |||
42 | /// AddRec SCEV | |||
43 | const SCEVAddRecExpr *AddRecSCEV = nullptr; | |||
44 | /// Bound SCEV | |||
45 | const SCEV *BoundSCEV = nullptr; | |||
46 | ||||
47 | ConditionInfo() = default; | |||
48 | }; | |||
49 | } // namespace | |||
50 | ||||
51 | static void analyzeICmp(ScalarEvolution &SE, ICmpInst *ICmp, | |||
52 | ConditionInfo &Cond, const Loop &L) { | |||
53 | Cond.ICmp = ICmp; | |||
54 | if (match(ICmp, m_ICmp(Cond.Pred, m_Value(Cond.AddRecValue), | |||
55 | m_Value(Cond.BoundValue)))) { | |||
56 | const SCEV *AddRecSCEV = SE.getSCEV(Cond.AddRecValue); | |||
57 | const SCEV *BoundSCEV = SE.getSCEV(Cond.BoundValue); | |||
58 | const SCEVAddRecExpr *LHSAddRecSCEV = dyn_cast<SCEVAddRecExpr>(AddRecSCEV); | |||
59 | const SCEVAddRecExpr *RHSAddRecSCEV = dyn_cast<SCEVAddRecExpr>(BoundSCEV); | |||
60 | // Locate AddRec in LHSSCEV and Bound in RHSSCEV. | |||
61 | if (!LHSAddRecSCEV && RHSAddRecSCEV) { | |||
62 | std::swap(Cond.AddRecValue, Cond.BoundValue); | |||
63 | std::swap(AddRecSCEV, BoundSCEV); | |||
64 | Cond.Pred = ICmpInst::getSwappedPredicate(Cond.Pred); | |||
65 | } | |||
66 | ||||
67 | Cond.AddRecSCEV = dyn_cast<SCEVAddRecExpr>(AddRecSCEV); | |||
68 | Cond.BoundSCEV = BoundSCEV; | |||
69 | Cond.NonPHIAddRecValue = Cond.AddRecValue; | |||
70 | ||||
71 | // If the Cond.AddRecValue is PHI node, update Cond.NonPHIAddRecValue with | |||
72 | // value from backedge. | |||
73 | if (Cond.AddRecSCEV && isa<PHINode>(Cond.AddRecValue)) { | |||
74 | PHINode *PN = cast<PHINode>(Cond.AddRecValue); | |||
75 | Cond.NonPHIAddRecValue = PN->getIncomingValueForBlock(L.getLoopLatch()); | |||
76 | } | |||
77 | } | |||
78 | } | |||
79 | ||||
80 | static bool calculateUpperBound(const Loop &L, ScalarEvolution &SE, | |||
81 | ConditionInfo &Cond, bool IsExitCond) { | |||
82 | if (IsExitCond) { | |||
83 | const SCEV *ExitCount = SE.getExitCount(&L, Cond.ICmp->getParent()); | |||
84 | if (isa<SCEVCouldNotCompute>(ExitCount)) | |||
85 | return false; | |||
86 | ||||
87 | Cond.BoundSCEV = ExitCount; | |||
88 | return true; | |||
89 | } | |||
90 | ||||
91 | // For non-exit condtion, if pred is LT, keep existing bound. | |||
92 | if (Cond.Pred == ICmpInst::ICMP_SLT || Cond.Pred == ICmpInst::ICMP_ULT) | |||
93 | return true; | |||
94 | ||||
95 | // For non-exit condition, if pre is LE, try to convert it to LT. | |||
96 | // Range Range | |||
97 | // AddRec <= Bound --> AddRec < Bound + 1 | |||
98 | if (Cond.Pred != ICmpInst::ICMP_ULE && Cond.Pred != ICmpInst::ICMP_SLE) | |||
99 | return false; | |||
100 | ||||
101 | if (IntegerType *BoundSCEVIntType = | |||
102 | dyn_cast<IntegerType>(Cond.BoundSCEV->getType())) { | |||
103 | unsigned BitWidth = BoundSCEVIntType->getBitWidth(); | |||
104 | APInt Max = ICmpInst::isSigned(Cond.Pred) | |||
105 | ? APInt::getSignedMaxValue(BitWidth) | |||
106 | : APInt::getMaxValue(BitWidth); | |||
107 | const SCEV *MaxSCEV = SE.getConstant(Max); | |||
108 | // Check Bound < INT_MAX | |||
109 | ICmpInst::Predicate Pred = | |||
110 | ICmpInst::isSigned(Cond.Pred) ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; | |||
111 | if (SE.isKnownPredicate(Pred, Cond.BoundSCEV, MaxSCEV)) { | |||
112 | const SCEV *BoundPlusOneSCEV = | |||
113 | SE.getAddExpr(Cond.BoundSCEV, SE.getOne(BoundSCEVIntType)); | |||
114 | Cond.BoundSCEV = BoundPlusOneSCEV; | |||
115 | Cond.Pred = Pred; | |||
116 | return true; | |||
117 | } | |||
118 | } | |||
119 | ||||
120 | // ToDo: Support ICMP_NE/EQ. | |||
121 | ||||
122 | return false; | |||
123 | } | |||
124 | ||||
125 | static bool hasProcessableCondition(const Loop &L, ScalarEvolution &SE, | |||
126 | ICmpInst *ICmp, ConditionInfo &Cond, | |||
127 | bool IsExitCond) { | |||
128 | analyzeICmp(SE, ICmp, Cond, L); | |||
129 | ||||
130 | // The BoundSCEV should be evaluated at loop entry. | |||
131 | if (!SE.isAvailableAtLoopEntry(Cond.BoundSCEV, &L)) | |||
132 | return false; | |||
133 | ||||
134 | // Allowed AddRec as induction variable. | |||
135 | if (!Cond.AddRecSCEV) | |||
136 | return false; | |||
137 | ||||
138 | if (!Cond.AddRecSCEV->isAffine()) | |||
139 | return false; | |||
140 | ||||
141 | const SCEV *StepRecSCEV = Cond.AddRecSCEV->getStepRecurrence(SE); | |||
142 | // Allowed constant step. | |||
143 | if (!isa<SCEVConstant>(StepRecSCEV)) | |||
144 | return false; | |||
145 | ||||
146 | ConstantInt *StepCI = cast<SCEVConstant>(StepRecSCEV)->getValue(); | |||
147 | // Allowed positive step for now. | |||
148 | // TODO: Support negative step. | |||
149 | if (StepCI->isNegative() || StepCI->isZero()) | |||
150 | return false; | |||
151 | ||||
152 | // Calculate upper bound. | |||
153 | if (!calculateUpperBound(L, SE, Cond, IsExitCond)) | |||
154 | return false; | |||
155 | ||||
156 | return true; | |||
157 | } | |||
158 | ||||
159 | static bool isProcessableCondBI(const ScalarEvolution &SE, | |||
160 | const BranchInst *BI) { | |||
161 | BasicBlock *TrueSucc = nullptr; | |||
162 | BasicBlock *FalseSucc = nullptr; | |||
163 | ICmpInst::Predicate Pred; | |||
164 | Value *LHS, *RHS; | |||
165 | if (!match(BI, m_Br(m_ICmp(Pred, m_Value(LHS), m_Value(RHS)), | |||
166 | m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) | |||
167 | return false; | |||
168 | ||||
169 | if (!SE.isSCEVable(LHS->getType())) | |||
170 | return false; | |||
171 | assert(SE.isSCEVable(RHS->getType()) && "Expected RHS's type is SCEVable")(static_cast <bool> (SE.isSCEVable(RHS->getType()) && "Expected RHS's type is SCEVable") ? void (0) : __assert_fail ("SE.isSCEVable(RHS->getType()) && \"Expected RHS's type is SCEVable\"" , "llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp", 171, __extension__ __PRETTY_FUNCTION__)); | |||
172 | ||||
173 | if (TrueSucc == FalseSucc) | |||
174 | return false; | |||
175 | ||||
176 | return true; | |||
177 | } | |||
178 | ||||
179 | static bool canSplitLoopBound(const Loop &L, const DominatorTree &DT, | |||
180 | ScalarEvolution &SE, ConditionInfo &Cond) { | |||
181 | // Skip function with optsize. | |||
182 | if (L.getHeader()->getParent()->hasOptSize()) | |||
183 | return false; | |||
184 | ||||
185 | // Split only innermost loop. | |||
186 | if (!L.isInnermost()) | |||
187 | return false; | |||
188 | ||||
189 | // Check loop is in simplified form. | |||
190 | if (!L.isLoopSimplifyForm()) | |||
191 | return false; | |||
192 | ||||
193 | // Check loop is in LCSSA form. | |||
194 | if (!L.isLCSSAForm(DT)) | |||
195 | return false; | |||
196 | ||||
197 | // Skip loop that cannot be cloned. | |||
198 | if (!L.isSafeToClone()) | |||
199 | return false; | |||
200 | ||||
201 | BasicBlock *ExitingBB = L.getExitingBlock(); | |||
202 | // Assumed only one exiting block. | |||
203 | if (!ExitingBB) | |||
204 | return false; | |||
205 | ||||
206 | BranchInst *ExitingBI = dyn_cast<BranchInst>(ExitingBB->getTerminator()); | |||
207 | if (!ExitingBI) | |||
208 | return false; | |||
209 | ||||
210 | // Allowed only conditional branch with ICmp. | |||
211 | if (!isProcessableCondBI(SE, ExitingBI)) | |||
212 | return false; | |||
213 | ||||
214 | // Check the condition is processable. | |||
215 | ICmpInst *ICmp = cast<ICmpInst>(ExitingBI->getCondition()); | |||
216 | if (!hasProcessableCondition(L, SE, ICmp, Cond, /*IsExitCond*/ true)) | |||
217 | return false; | |||
218 | ||||
219 | Cond.BI = ExitingBI; | |||
220 | return true; | |||
221 | } | |||
222 | ||||
223 | static bool isProfitableToTransform(const Loop &L, const BranchInst *BI) { | |||
224 | // If the conditional branch splits a loop into two halves, we could | |||
225 | // generally say it is profitable. | |||
226 | // | |||
227 | // ToDo: Add more profitable cases here. | |||
228 | ||||
229 | // Check this branch causes diamond CFG. | |||
230 | BasicBlock *Succ0 = BI->getSuccessor(0); | |||
231 | BasicBlock *Succ1 = BI->getSuccessor(1); | |||
232 | ||||
233 | BasicBlock *Succ0Succ = Succ0->getSingleSuccessor(); | |||
234 | BasicBlock *Succ1Succ = Succ1->getSingleSuccessor(); | |||
235 | if (!Succ0Succ || !Succ1Succ || Succ0Succ != Succ1Succ) | |||
236 | return false; | |||
237 | ||||
238 | // ToDo: Calculate each successor's instruction cost. | |||
239 | ||||
240 | return true; | |||
241 | } | |||
242 | ||||
243 | static BranchInst *findSplitCandidate(const Loop &L, ScalarEvolution &SE, | |||
244 | ConditionInfo &ExitingCond, | |||
245 | ConditionInfo &SplitCandidateCond) { | |||
246 | for (auto *BB : L.blocks()) { | |||
247 | // Skip condition of backedge. | |||
248 | if (L.getLoopLatch() == BB) | |||
249 | continue; | |||
250 | ||||
251 | auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); | |||
252 | if (!BI) | |||
253 | continue; | |||
254 | ||||
255 | // Check conditional branch with ICmp. | |||
256 | if (!isProcessableCondBI(SE, BI)) | |||
257 | continue; | |||
258 | ||||
259 | // Skip loop invariant condition. | |||
260 | if (L.isLoopInvariant(BI->getCondition())) | |||
261 | continue; | |||
262 | ||||
263 | // Check the condition is processable. | |||
264 | ICmpInst *ICmp = cast<ICmpInst>(BI->getCondition()); | |||
265 | if (!hasProcessableCondition(L, SE, ICmp, SplitCandidateCond, | |||
266 | /*IsExitCond*/ false)) | |||
267 | continue; | |||
268 | ||||
269 | if (ExitingCond.BoundSCEV->getType() != | |||
270 | SplitCandidateCond.BoundSCEV->getType()) | |||
271 | continue; | |||
272 | ||||
273 | // After transformation, we assume the split condition of the pre-loop is | |||
274 | // always true. In order to guarantee it, we need to check the start value | |||
275 | // of the split cond AddRec satisfies the split condition. | |||
276 | if (!SE.isLoopEntryGuardedByCond(&L, SplitCandidateCond.Pred, | |||
277 | SplitCandidateCond.AddRecSCEV->getStart(), | |||
278 | SplitCandidateCond.BoundSCEV)) | |||
279 | continue; | |||
280 | ||||
281 | SplitCandidateCond.BI = BI; | |||
282 | return BI; | |||
283 | } | |||
284 | ||||
285 | return nullptr; | |||
286 | } | |||
287 | ||||
288 | static bool splitLoopBound(Loop &L, DominatorTree &DT, LoopInfo &LI, | |||
289 | ScalarEvolution &SE, LPMUpdater &U) { | |||
290 | ConditionInfo SplitCandidateCond; | |||
291 | ConditionInfo ExitingCond; | |||
292 | ||||
293 | // Check we can split this loop's bound. | |||
294 | if (!canSplitLoopBound(L, DT, SE, ExitingCond)) | |||
295 | return false; | |||
296 | ||||
297 | if (!findSplitCandidate(L, SE, ExitingCond, SplitCandidateCond)) | |||
298 | return false; | |||
299 | ||||
300 | if (!isProfitableToTransform(L, SplitCandidateCond.BI)) | |||
301 | return false; | |||
302 | ||||
303 | // Now, we have a split candidate. Let's build a form as below. | |||
304 | // +--------------------+ | |||
305 | // | preheader | | |||
306 | // | set up newbound | | |||
307 | // +--------------------+ | |||
308 | // | /----------------\ | |||
309 | // +--------v----v------+ | | |||
310 | // | header |---\ | | |||
311 | // | with true condition| | | | |||
312 | // +--------------------+ | | | |||
313 | // | | | | |||
314 | // +--------v-----------+ | | | |||
315 | // | if.then.BB | | | | |||
316 | // +--------------------+ | | | |||
317 | // | | | | |||
318 | // +--------v-----------<---/ | | |||
319 | // | latch >----------/ | |||
320 | // | with newbound | | |||
321 | // +--------------------+ | |||
322 | // | | |||
323 | // +--------v-----------+ | |||
324 | // | preheader2 |--------------\ | |||
325 | // | if (AddRec i != | | | |||
326 | // | org bound) | | | |||
327 | // +--------------------+ | | |||
328 | // | /----------------\ | | |||
329 | // +--------v----v------+ | | | |||
330 | // | header2 |---\ | | | |||
331 | // | conditional branch | | | | | |||
332 | // |with false condition| | | | | |||
333 | // +--------------------+ | | | | |||
334 | // | | | | | |||
335 | // +--------v-----------+ | | | | |||
336 | // | if.then.BB2 | | | | | |||
337 | // +--------------------+ | | | | |||
338 | // | | | | | |||
339 | // +--------v-----------<---/ | | | |||
340 | // | latch2 >----------/ | | |||
341 | // | with org bound | | | |||
342 | // +--------v-----------+ | | |||
343 | // | | | |||
344 | // | +---------------+ | | |||
345 | // +--> exit <-------/ | |||
346 | // +---------------+ | |||
347 | ||||
348 | // Let's create post loop. | |||
349 | SmallVector<BasicBlock *, 8> PostLoopBlocks; | |||
350 | Loop *PostLoop; | |||
351 | ValueToValueMapTy VMap; | |||
352 | BasicBlock *PreHeader = L.getLoopPreheader(); | |||
353 | BasicBlock *SplitLoopPH = SplitEdge(PreHeader, L.getHeader(), &DT, &LI); | |||
354 | PostLoop = cloneLoopWithPreheader(L.getExitBlock(), SplitLoopPH, &L, VMap, | |||
355 | ".split", &LI, &DT, PostLoopBlocks); | |||
356 | remapInstructionsInBlocks(PostLoopBlocks, VMap); | |||
357 | ||||
358 | BasicBlock *PostLoopPreHeader = PostLoop->getLoopPreheader(); | |||
359 | IRBuilder<> Builder(&PostLoopPreHeader->front()); | |||
360 | ||||
361 | // Update phi nodes in header of post-loop. | |||
362 | bool isExitingLatch = | |||
363 | (L.getExitingBlock() == L.getLoopLatch()) ? true : false; | |||
364 | Value *ExitingCondLCSSAPhi = nullptr; | |||
365 | for (PHINode &PN : L.getHeader()->phis()) { | |||
366 | // Create LCSSA phi node in preheader of post-loop. | |||
367 | PHINode *LCSSAPhi = | |||
368 | Builder.CreatePHI(PN.getType(), 1, PN.getName() + ".lcssa"); | |||
369 | LCSSAPhi->setDebugLoc(PN.getDebugLoc()); | |||
370 | // If the exiting block is loop latch, the phi does not have the update at | |||
371 | // last iteration. In this case, update lcssa phi with value from backedge. | |||
372 | LCSSAPhi->addIncoming( | |||
373 | isExitingLatch ? PN.getIncomingValueForBlock(L.getLoopLatch()) : &PN, | |||
374 | L.getExitingBlock()); | |||
375 | ||||
376 | // Update the start value of phi node in post-loop with the LCSSA phi node. | |||
377 | PHINode *PostLoopPN = cast<PHINode>(VMap[&PN]); | |||
378 | PostLoopPN->setIncomingValueForBlock(PostLoopPreHeader, LCSSAPhi); | |||
379 | ||||
380 | // Find PHI with exiting condition from pre-loop. The PHI should be | |||
381 | // SCEVAddRecExpr and have same incoming value from backedge with | |||
382 | // ExitingCond. | |||
383 | if (!SE.isSCEVable(PN.getType())) | |||
384 | continue; | |||
385 | ||||
386 | const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN)); | |||
387 | if (PhiSCEV && ExitingCond.NonPHIAddRecValue == | |||
388 | PN.getIncomingValueForBlock(L.getLoopLatch())) | |||
389 | ExitingCondLCSSAPhi = LCSSAPhi; | |||
390 | } | |||
391 | ||||
392 | // Add conditional branch to check we can skip post-loop in its preheader. | |||
393 | Instruction *OrigBI = PostLoopPreHeader->getTerminator(); | |||
394 | ICmpInst::Predicate Pred = ICmpInst::ICMP_NE; | |||
395 | Value *Cond = | |||
396 | Builder.CreateICmp(Pred, ExitingCondLCSSAPhi, ExitingCond.BoundValue); | |||
397 | Builder.CreateCondBr(Cond, PostLoop->getHeader(), PostLoop->getExitBlock()); | |||
398 | OrigBI->eraseFromParent(); | |||
399 | ||||
400 | // Create new loop bound and add it into preheader of pre-loop. | |||
401 | const SCEV *NewBoundSCEV = ExitingCond.BoundSCEV; | |||
402 | const SCEV *SplitBoundSCEV = SplitCandidateCond.BoundSCEV; | |||
403 | NewBoundSCEV = ICmpInst::isSigned(ExitingCond.Pred) | |||
404 | ? SE.getSMinExpr(NewBoundSCEV, SplitBoundSCEV) | |||
405 | : SE.getUMinExpr(NewBoundSCEV, SplitBoundSCEV); | |||
406 | ||||
407 | SCEVExpander Expander( | |||
408 | SE, L.getHeader()->getParent()->getParent()->getDataLayout(), "split"); | |||
409 | Instruction *InsertPt = SplitLoopPH->getTerminator(); | |||
410 | Value *NewBoundValue = | |||
411 | Expander.expandCodeFor(NewBoundSCEV, NewBoundSCEV->getType(), InsertPt); | |||
412 | NewBoundValue->setName("new.bound"); | |||
413 | ||||
414 | // Replace exiting bound value of pre-loop NewBound. | |||
415 | ExitingCond.ICmp->setOperand(1, NewBoundValue); | |||
416 | ||||
417 | // Replace SplitCandidateCond.BI's condition of pre-loop by True. | |||
418 | LLVMContext &Context = PreHeader->getContext(); | |||
419 | SplitCandidateCond.BI->setCondition(ConstantInt::getTrue(Context)); | |||
420 | ||||
421 | // Replace cloned SplitCandidateCond.BI's condition in post-loop by False. | |||
422 | BranchInst *ClonedSplitCandidateBI = | |||
423 | cast<BranchInst>(VMap[SplitCandidateCond.BI]); | |||
424 | ClonedSplitCandidateBI->setCondition(ConstantInt::getFalse(Context)); | |||
425 | ||||
426 | // Replace exit branch target of pre-loop by post-loop's preheader. | |||
427 | if (L.getExitBlock() == ExitingCond.BI->getSuccessor(0)) | |||
428 | ExitingCond.BI->setSuccessor(0, PostLoopPreHeader); | |||
429 | else | |||
430 | ExitingCond.BI->setSuccessor(1, PostLoopPreHeader); | |||
431 | ||||
432 | // Update phi node in exit block of post-loop. | |||
433 | Builder.SetInsertPoint(&PostLoopPreHeader->front()); | |||
434 | for (PHINode &PN : PostLoop->getExitBlock()->phis()) { | |||
435 | for (auto i : seq<int>(0, PN.getNumOperands())) { | |||
436 | // Check incoming block is pre-loop's exiting block. | |||
437 | if (PN.getIncomingBlock(i) == L.getExitingBlock()) { | |||
438 | Value *IncomingValue = PN.getIncomingValue(i); | |||
439 | ||||
440 | // Create LCSSA phi node for incoming value. | |||
441 | PHINode *LCSSAPhi = | |||
442 | Builder.CreatePHI(PN.getType(), 1, PN.getName() + ".lcssa"); | |||
443 | LCSSAPhi->setDebugLoc(PN.getDebugLoc()); | |||
444 | LCSSAPhi->addIncoming(IncomingValue, PN.getIncomingBlock(i)); | |||
445 | ||||
446 | // Replace pre-loop's exiting block by post-loop's preheader. | |||
447 | PN.setIncomingBlock(i, PostLoopPreHeader); | |||
448 | // Replace incoming value by LCSSAPhi. | |||
449 | PN.setIncomingValue(i, LCSSAPhi); | |||
450 | // Add a new incoming value with post-loop's exiting block. | |||
451 | PN.addIncoming(VMap[IncomingValue], PostLoop->getExitingBlock()); | |||
452 | } | |||
453 | } | |||
454 | } | |||
455 | ||||
456 | // Update dominator tree. | |||
457 | DT.changeImmediateDominator(PostLoopPreHeader, L.getExitingBlock()); | |||
458 | DT.changeImmediateDominator(PostLoop->getExitBlock(), PostLoopPreHeader); | |||
459 | ||||
460 | // Invalidate cached SE information. | |||
461 | SE.forgetLoop(&L); | |||
462 | ||||
463 | // Canonicalize loops. | |||
464 | simplifyLoop(&L, &DT, &LI, &SE, nullptr, nullptr, true); | |||
465 | simplifyLoop(PostLoop, &DT, &LI, &SE, nullptr, nullptr, true); | |||
466 | ||||
467 | // Add new post-loop to loop pass manager. | |||
468 | U.addSiblingLoops(PostLoop); | |||
469 | ||||
470 | return true; | |||
471 | } | |||
472 | ||||
473 | PreservedAnalyses LoopBoundSplitPass::run(Loop &L, LoopAnalysisManager &AM, | |||
474 | LoopStandardAnalysisResults &AR, | |||
475 | LPMUpdater &U) { | |||
476 | Function &F = *L.getHeader()->getParent(); | |||
477 | (void)F; | |||
478 | ||||
479 | LLVM_DEBUG(dbgs() << "Spliting bound of loop in " << F.getName() << ": " << Ldo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-bound-split")) { dbgs() << "Spliting bound of loop in " << F.getName() << ": " << L << "\n"; } } while (false) | |||
| ||||
480 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-bound-split")) { dbgs() << "Spliting bound of loop in " << F.getName() << ": " << L << "\n"; } } while (false); | |||
481 | ||||
482 | if (!splitLoopBound(L, AR.DT, AR.LI, AR.SE, U)) | |||
483 | return PreservedAnalyses::all(); | |||
484 | ||||
485 | assert(AR.DT.verify(DominatorTree::VerificationLevel::Fast))(static_cast <bool> (AR.DT.verify(DominatorTree::VerificationLevel ::Fast)) ? void (0) : __assert_fail ("AR.DT.verify(DominatorTree::VerificationLevel::Fast)" , "llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp", 485, __extension__ __PRETTY_FUNCTION__)); | |||
486 | AR.LI.verify(AR.DT); | |||
487 | ||||
488 | return getLoopPassPreservedAnalyses(); | |||
489 | } | |||
490 | ||||
491 | } // end namespace llvm |
1 | //===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the IRBuilder class, which is used as a convenient way |
10 | // to create LLVM instructions with a consistent and simplified interface. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_IR_IRBUILDER_H |
15 | #define LLVM_IR_IRBUILDER_H |
16 | |
17 | #include "llvm-c/Types.h" |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/None.h" |
20 | #include "llvm/ADT/STLExtras.h" |
21 | #include "llvm/ADT/StringRef.h" |
22 | #include "llvm/ADT/Twine.h" |
23 | #include "llvm/IR/BasicBlock.h" |
24 | #include "llvm/IR/Constant.h" |
25 | #include "llvm/IR/ConstantFolder.h" |
26 | #include "llvm/IR/Constants.h" |
27 | #include "llvm/IR/DataLayout.h" |
28 | #include "llvm/IR/DebugLoc.h" |
29 | #include "llvm/IR/DerivedTypes.h" |
30 | #include "llvm/IR/FPEnv.h" |
31 | #include "llvm/IR/Function.h" |
32 | #include "llvm/IR/GlobalVariable.h" |
33 | #include "llvm/IR/InstrTypes.h" |
34 | #include "llvm/IR/Instruction.h" |
35 | #include "llvm/IR/Instructions.h" |
36 | #include "llvm/IR/Intrinsics.h" |
37 | #include "llvm/IR/LLVMContext.h" |
38 | #include "llvm/IR/Module.h" |
39 | #include "llvm/IR/Operator.h" |
40 | #include "llvm/IR/Type.h" |
41 | #include "llvm/IR/Value.h" |
42 | #include "llvm/IR/ValueHandle.h" |
43 | #include "llvm/Support/AtomicOrdering.h" |
44 | #include "llvm/Support/CBindingWrapping.h" |
45 | #include "llvm/Support/Casting.h" |
46 | #include <cassert> |
47 | #include <cstdint> |
48 | #include <functional> |
49 | #include <utility> |
50 | |
51 | namespace llvm { |
52 | |
53 | class APInt; |
54 | class Use; |
55 | |
56 | /// This provides the default implementation of the IRBuilder |
57 | /// 'InsertHelper' method that is called whenever an instruction is created by |
58 | /// IRBuilder and needs to be inserted. |
59 | /// |
60 | /// By default, this inserts the instruction at the insertion point. |
61 | class IRBuilderDefaultInserter { |
62 | public: |
63 | virtual ~IRBuilderDefaultInserter(); |
64 | |
65 | virtual void InsertHelper(Instruction *I, const Twine &Name, |
66 | BasicBlock *BB, |
67 | BasicBlock::iterator InsertPt) const { |
68 | if (BB) BB->getInstList().insert(InsertPt, I); |
69 | I->setName(Name); |
70 | } |
71 | }; |
72 | |
73 | /// Provides an 'InsertHelper' that calls a user-provided callback after |
74 | /// performing the default insertion. |
75 | class IRBuilderCallbackInserter : public IRBuilderDefaultInserter { |
76 | std::function<void(Instruction *)> Callback; |
77 | |
78 | public: |
79 | ~IRBuilderCallbackInserter() override; |
80 | |
81 | IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback) |
82 | : Callback(std::move(Callback)) {} |
83 | |
84 | void InsertHelper(Instruction *I, const Twine &Name, |
85 | BasicBlock *BB, |
86 | BasicBlock::iterator InsertPt) const override { |
87 | IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); |
88 | Callback(I); |
89 | } |
90 | }; |
91 | |
92 | /// Common base class shared among various IRBuilders. |
93 | class IRBuilderBase { |
94 | /// Pairs of (metadata kind, MDNode *) that should be added to all newly |
95 | /// created instructions, like !dbg metadata. |
96 | SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy; |
97 | |
98 | /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not |
99 | /// null. If \p MD is null, remove the entry with \p Kind. |
100 | void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) { |
101 | if (!MD) { |
102 | erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) { |
103 | return KV.first == Kind; |
104 | }); |
105 | return; |
106 | } |
107 | |
108 | for (auto &KV : MetadataToCopy) |
109 | if (KV.first == Kind) { |
110 | KV.second = MD; |
111 | return; |
112 | } |
113 | |
114 | MetadataToCopy.emplace_back(Kind, MD); |
115 | } |
116 | |
117 | protected: |
118 | BasicBlock *BB; |
119 | BasicBlock::iterator InsertPt; |
120 | LLVMContext &Context; |
121 | const IRBuilderFolder &Folder; |
122 | const IRBuilderDefaultInserter &Inserter; |
123 | |
124 | MDNode *DefaultFPMathTag; |
125 | FastMathFlags FMF; |
126 | |
127 | bool IsFPConstrained = false; |
128 | fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; |
129 | RoundingMode DefaultConstrainedRounding = RoundingMode::Dynamic; |
130 | |
131 | ArrayRef<OperandBundleDef> DefaultOperandBundles; |
132 | |
133 | public: |
134 | IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder, |
135 | const IRBuilderDefaultInserter &Inserter, MDNode *FPMathTag, |
136 | ArrayRef<OperandBundleDef> OpBundles) |
137 | : Context(context), Folder(Folder), Inserter(Inserter), |
138 | DefaultFPMathTag(FPMathTag), DefaultOperandBundles(OpBundles) { |
139 | ClearInsertionPoint(); |
140 | } |
141 | |
142 | /// Insert and return the specified instruction. |
143 | template<typename InstTy> |
144 | InstTy *Insert(InstTy *I, const Twine &Name = "") const { |
145 | Inserter.InsertHelper(I, Name, BB, InsertPt); |
146 | AddMetadataToInst(I); |
147 | return I; |
148 | } |
149 | |
150 | /// No-op overload to handle constants. |
151 | Constant *Insert(Constant *C, const Twine& = "") const { |
152 | return C; |
153 | } |
154 | |
155 | Value *Insert(Value *V, const Twine &Name = "") const { |
156 | if (Instruction *I = dyn_cast<Instruction>(V)) |
157 | return Insert(I, Name); |
158 | assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0) : __assert_fail ("isa<Constant>(V)", "llvm/include/llvm/IR/IRBuilder.h" , 158, __extension__ __PRETTY_FUNCTION__)); |
159 | return V; |
160 | } |
161 | |
162 | //===--------------------------------------------------------------------===// |
163 | // Builder configuration methods |
164 | //===--------------------------------------------------------------------===// |
165 | |
166 | /// Clear the insertion point: created instructions will not be |
167 | /// inserted into a block. |
168 | void ClearInsertionPoint() { |
169 | BB = nullptr; |
170 | InsertPt = BasicBlock::iterator(); |
171 | } |
172 | |
173 | BasicBlock *GetInsertBlock() const { return BB; } |
174 | BasicBlock::iterator GetInsertPoint() const { return InsertPt; } |
175 | LLVMContext &getContext() const { return Context; } |
176 | |
177 | /// This specifies that created instructions should be appended to the |
178 | /// end of the specified block. |
179 | void SetInsertPoint(BasicBlock *TheBB) { |
180 | BB = TheBB; |
181 | InsertPt = BB->end(); |
182 | } |
183 | |
184 | /// This specifies that created instructions should be inserted before |
185 | /// the specified instruction. |
186 | void SetInsertPoint(Instruction *I) { |
187 | BB = I->getParent(); |
188 | InsertPt = I->getIterator(); |
189 | assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() && "Can't read debug loc from end()") ? void (0) : __assert_fail ("InsertPt != BB->end() && \"Can't read debug loc from end()\"" , "llvm/include/llvm/IR/IRBuilder.h", 189, __extension__ __PRETTY_FUNCTION__ )); |
190 | SetCurrentDebugLocation(I->getDebugLoc()); |
191 | } |
192 | |
193 | /// This specifies that created instructions should be inserted at the |
194 | /// specified point. |
195 | void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) { |
196 | BB = TheBB; |
197 | InsertPt = IP; |
198 | if (IP != TheBB->end()) |
199 | SetCurrentDebugLocation(IP->getDebugLoc()); |
200 | } |
201 | |
202 | /// Set location information used by debugging information. |
203 | void SetCurrentDebugLocation(DebugLoc L) { |
204 | AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode()); |
205 | } |
206 | |
207 | /// Collect metadata with IDs \p MetadataKinds from \p Src which should be |
208 | /// added to all created instructions. Entries present in MedataDataToCopy but |
209 | /// not on \p Src will be dropped from MetadataToCopy. |
210 | void CollectMetadataToCopy(Instruction *Src, |
211 | ArrayRef<unsigned> MetadataKinds) { |
212 | for (unsigned K : MetadataKinds) |
213 | AddOrRemoveMetadataToCopy(K, Src->getMetadata(K)); |
214 | } |
215 | |
216 | /// Get location information used by debugging information. |
217 | DebugLoc getCurrentDebugLocation() const; |
218 | |
219 | /// If this builder has a current debug location, set it on the |
220 | /// specified instruction. |
221 | void SetInstDebugLocation(Instruction *I) const; |
222 | |
223 | /// Add all entries in MetadataToCopy to \p I. |
224 | void AddMetadataToInst(Instruction *I) const { |
225 | for (auto &KV : MetadataToCopy) |
226 | I->setMetadata(KV.first, KV.second); |
227 | } |
228 | |
229 | /// Get the return type of the current function that we're emitting |
230 | /// into. |
231 | Type *getCurrentFunctionReturnType() const; |
232 | |
233 | /// InsertPoint - A saved insertion point. |
234 | class InsertPoint { |
235 | BasicBlock *Block = nullptr; |
236 | BasicBlock::iterator Point; |
237 | |
238 | public: |
239 | /// Creates a new insertion point which doesn't point to anything. |
240 | InsertPoint() = default; |
241 | |
242 | /// Creates a new insertion point at the given location. |
243 | InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint) |
244 | : Block(InsertBlock), Point(InsertPoint) {} |
245 | |
246 | /// Returns true if this insert point is set. |
247 | bool isSet() const { return (Block != nullptr); } |
248 | |
249 | BasicBlock *getBlock() const { return Block; } |
250 | BasicBlock::iterator getPoint() const { return Point; } |
251 | }; |
252 | |
253 | /// Returns the current insert point. |
254 | InsertPoint saveIP() const { |
255 | return InsertPoint(GetInsertBlock(), GetInsertPoint()); |
256 | } |
257 | |
258 | /// Returns the current insert point, clearing it in the process. |
259 | InsertPoint saveAndClearIP() { |
260 | InsertPoint IP(GetInsertBlock(), GetInsertPoint()); |
261 | ClearInsertionPoint(); |
262 | return IP; |
263 | } |
264 | |
265 | /// Sets the current insert point to a previously-saved location. |
266 | void restoreIP(InsertPoint IP) { |
267 | if (IP.isSet()) |
268 | SetInsertPoint(IP.getBlock(), IP.getPoint()); |
269 | else |
270 | ClearInsertionPoint(); |
271 | } |
272 | |
273 | /// Get the floating point math metadata being used. |
274 | MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; } |
275 | |
276 | /// Get the flags to be applied to created floating point ops |
277 | FastMathFlags getFastMathFlags() const { return FMF; } |
278 | |
279 | FastMathFlags &getFastMathFlags() { return FMF; } |
280 | |
281 | /// Clear the fast-math flags. |
282 | void clearFastMathFlags() { FMF.clear(); } |
283 | |
284 | /// Set the floating point math metadata to be used. |
285 | void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; } |
286 | |
287 | /// Set the fast-math flags to be used with generated fp-math operators |
288 | void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; } |
289 | |
290 | /// Enable/Disable use of constrained floating point math. When |
291 | /// enabled the CreateF<op>() calls instead create constrained |
292 | /// floating point intrinsic calls. Fast math flags are unaffected |
293 | /// by this setting. |
294 | void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; } |
295 | |
296 | /// Query for the use of constrained floating point math |
297 | bool getIsFPConstrained() { return IsFPConstrained; } |
298 | |
299 | /// Set the exception handling to be used with constrained floating point |
300 | void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) { |
301 | #ifndef NDEBUG |
302 | Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(NewExcept); |
303 | assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!" ) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\"" , "llvm/include/llvm/IR/IRBuilder.h", 303, __extension__ __PRETTY_FUNCTION__ )); |
304 | #endif |
305 | DefaultConstrainedExcept = NewExcept; |
306 | } |
307 | |
308 | /// Set the rounding mode handling to be used with constrained floating point |
309 | void setDefaultConstrainedRounding(RoundingMode NewRounding) { |
310 | #ifndef NDEBUG |
311 | Optional<StringRef> RoundingStr = convertRoundingModeToStr(NewRounding); |
312 | assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!" ) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\"" , "llvm/include/llvm/IR/IRBuilder.h", 312, __extension__ __PRETTY_FUNCTION__ )); |
313 | #endif |
314 | DefaultConstrainedRounding = NewRounding; |
315 | } |
316 | |
317 | /// Get the exception handling used with constrained floating point |
318 | fp::ExceptionBehavior getDefaultConstrainedExcept() { |
319 | return DefaultConstrainedExcept; |
320 | } |
321 | |
322 | /// Get the rounding mode handling used with constrained floating point |
323 | RoundingMode getDefaultConstrainedRounding() { |
324 | return DefaultConstrainedRounding; |
325 | } |
326 | |
327 | void setConstrainedFPFunctionAttr() { |
328 | assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!" ) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\"" , "llvm/include/llvm/IR/IRBuilder.h", 328, __extension__ __PRETTY_FUNCTION__ )); |
329 | |
330 | Function *F = BB->getParent(); |
331 | if (!F->hasFnAttribute(Attribute::StrictFP)) { |
332 | F->addFnAttr(Attribute::StrictFP); |
333 | } |
334 | } |
335 | |
336 | void setConstrainedFPCallAttr(CallBase *I) { |
337 | I->addFnAttr(Attribute::StrictFP); |
338 | } |
339 | |
340 | void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) { |
341 | DefaultOperandBundles = OpBundles; |
342 | } |
343 | |
344 | //===--------------------------------------------------------------------===// |
345 | // RAII helpers. |
346 | //===--------------------------------------------------------------------===// |
347 | |
348 | // RAII object that stores the current insertion point and restores it |
349 | // when the object is destroyed. This includes the debug location. |
350 | class InsertPointGuard { |
351 | IRBuilderBase &Builder; |
352 | AssertingVH<BasicBlock> Block; |
353 | BasicBlock::iterator Point; |
354 | DebugLoc DbgLoc; |
355 | |
356 | public: |
357 | InsertPointGuard(IRBuilderBase &B) |
358 | : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()), |
359 | DbgLoc(B.getCurrentDebugLocation()) {} |
360 | |
361 | InsertPointGuard(const InsertPointGuard &) = delete; |
362 | InsertPointGuard &operator=(const InsertPointGuard &) = delete; |
363 | |
364 | ~InsertPointGuard() { |
365 | Builder.restoreIP(InsertPoint(Block, Point)); |
366 | Builder.SetCurrentDebugLocation(DbgLoc); |
367 | } |
368 | }; |
369 | |
370 | // RAII object that stores the current fast math settings and restores |
371 | // them when the object is destroyed. |
372 | class FastMathFlagGuard { |
373 | IRBuilderBase &Builder; |
374 | FastMathFlags FMF; |
375 | MDNode *FPMathTag; |
376 | bool IsFPConstrained; |
377 | fp::ExceptionBehavior DefaultConstrainedExcept; |
378 | RoundingMode DefaultConstrainedRounding; |
379 | |
380 | public: |
381 | FastMathFlagGuard(IRBuilderBase &B) |
382 | : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag), |
383 | IsFPConstrained(B.IsFPConstrained), |
384 | DefaultConstrainedExcept(B.DefaultConstrainedExcept), |
385 | DefaultConstrainedRounding(B.DefaultConstrainedRounding) {} |
386 | |
387 | FastMathFlagGuard(const FastMathFlagGuard &) = delete; |
388 | FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete; |
389 | |
390 | ~FastMathFlagGuard() { |
391 | Builder.FMF = FMF; |
392 | Builder.DefaultFPMathTag = FPMathTag; |
393 | Builder.IsFPConstrained = IsFPConstrained; |
394 | Builder.DefaultConstrainedExcept = DefaultConstrainedExcept; |
395 | Builder.DefaultConstrainedRounding = DefaultConstrainedRounding; |
396 | } |
397 | }; |
398 | |
399 | // RAII object that stores the current default operand bundles and restores |
400 | // them when the object is destroyed. |
401 | class OperandBundlesGuard { |
402 | IRBuilderBase &Builder; |
403 | ArrayRef<OperandBundleDef> DefaultOperandBundles; |
404 | |
405 | public: |
406 | OperandBundlesGuard(IRBuilderBase &B) |
407 | : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {} |
408 | |
409 | OperandBundlesGuard(const OperandBundlesGuard &) = delete; |
410 | OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete; |
411 | |
412 | ~OperandBundlesGuard() { |
413 | Builder.DefaultOperandBundles = DefaultOperandBundles; |
414 | } |
415 | }; |
416 | |
417 | |
418 | //===--------------------------------------------------------------------===// |
419 | // Miscellaneous creation methods. |
420 | //===--------------------------------------------------------------------===// |
421 | |
422 | /// Make a new global variable with initializer type i8* |
423 | /// |
424 | /// Make a new global variable with an initializer that has array of i8 type |
425 | /// filled in with the null terminated string value specified. The new global |
426 | /// variable will be marked mergable with any others of the same contents. If |
427 | /// Name is specified, it is the name of the global variable created. |
428 | /// |
429 | /// If no module is given via \p M, it is take from the insertion point basic |
430 | /// block. |
431 | GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "", |
432 | unsigned AddressSpace = 0, |
433 | Module *M = nullptr); |
434 | |
435 | /// Get a constant value representing either true or false. |
436 | ConstantInt *getInt1(bool V) { |
437 | return ConstantInt::get(getInt1Ty(), V); |
438 | } |
439 | |
440 | /// Get the constant value for i1 true. |
441 | ConstantInt *getTrue() { |
442 | return ConstantInt::getTrue(Context); |
443 | } |
444 | |
445 | /// Get the constant value for i1 false. |
446 | ConstantInt *getFalse() { |
447 | return ConstantInt::getFalse(Context); |
448 | } |
449 | |
450 | /// Get a constant 8-bit value. |
451 | ConstantInt *getInt8(uint8_t C) { |
452 | return ConstantInt::get(getInt8Ty(), C); |
453 | } |
454 | |
455 | /// Get a constant 16-bit value. |
456 | ConstantInt *getInt16(uint16_t C) { |
457 | return ConstantInt::get(getInt16Ty(), C); |
458 | } |
459 | |
460 | /// Get a constant 32-bit value. |
461 | ConstantInt *getInt32(uint32_t C) { |
462 | return ConstantInt::get(getInt32Ty(), C); |
463 | } |
464 | |
465 | /// Get a constant 64-bit value. |
466 | ConstantInt *getInt64(uint64_t C) { |
467 | return ConstantInt::get(getInt64Ty(), C); |
468 | } |
469 | |
470 | /// Get a constant N-bit value, zero extended or truncated from |
471 | /// a 64-bit value. |
472 | ConstantInt *getIntN(unsigned N, uint64_t C) { |
473 | return ConstantInt::get(getIntNTy(N), C); |
474 | } |
475 | |
476 | /// Get a constant integer value. |
477 | ConstantInt *getInt(const APInt &AI) { |
478 | return ConstantInt::get(Context, AI); |
479 | } |
480 | |
481 | //===--------------------------------------------------------------------===// |
482 | // Type creation methods |
483 | //===--------------------------------------------------------------------===// |
484 | |
485 | /// Fetch the type representing a single bit |
486 | IntegerType *getInt1Ty() { |
487 | return Type::getInt1Ty(Context); |
488 | } |
489 | |
490 | /// Fetch the type representing an 8-bit integer. |
491 | IntegerType *getInt8Ty() { |
492 | return Type::getInt8Ty(Context); |
493 | } |
494 | |
495 | /// Fetch the type representing a 16-bit integer. |
496 | IntegerType *getInt16Ty() { |
497 | return Type::getInt16Ty(Context); |
498 | } |
499 | |
500 | /// Fetch the type representing a 32-bit integer. |
501 | IntegerType *getInt32Ty() { |
502 | return Type::getInt32Ty(Context); |
503 | } |
504 | |
505 | /// Fetch the type representing a 64-bit integer. |
506 | IntegerType *getInt64Ty() { |
507 | return Type::getInt64Ty(Context); |
508 | } |
509 | |
510 | /// Fetch the type representing a 128-bit integer. |
511 | IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); } |
512 | |
513 | /// Fetch the type representing an N-bit integer. |
514 | IntegerType *getIntNTy(unsigned N) { |
515 | return Type::getIntNTy(Context, N); |
516 | } |
517 | |
518 | /// Fetch the type representing a 16-bit floating point value. |
519 | Type *getHalfTy() { |
520 | return Type::getHalfTy(Context); |
521 | } |
522 | |
523 | /// Fetch the type representing a 16-bit brain floating point value. |
524 | Type *getBFloatTy() { |
525 | return Type::getBFloatTy(Context); |
526 | } |
527 | |
528 | /// Fetch the type representing a 32-bit floating point value. |
529 | Type *getFloatTy() { |
530 | return Type::getFloatTy(Context); |
531 | } |
532 | |
533 | /// Fetch the type representing a 64-bit floating point value. |
534 | Type *getDoubleTy() { |
535 | return Type::getDoubleTy(Context); |
536 | } |
537 | |
538 | /// Fetch the type representing void. |
539 | Type *getVoidTy() { |
540 | return Type::getVoidTy(Context); |
541 | } |
542 | |
543 | /// Fetch the type representing a pointer to an 8-bit integer value. |
544 | PointerType *getInt8PtrTy(unsigned AddrSpace = 0) { |
545 | return Type::getInt8PtrTy(Context, AddrSpace); |
546 | } |
547 | |
548 | /// Fetch the type representing a pointer to an integer value. |
549 | IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) { |
550 | return DL.getIntPtrType(Context, AddrSpace); |
551 | } |
552 | |
553 | //===--------------------------------------------------------------------===// |
554 | // Intrinsic creation methods |
555 | //===--------------------------------------------------------------------===// |
556 | |
557 | /// Create and insert a memset to the specified pointer and the |
558 | /// specified value. |
559 | /// |
560 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is |
561 | /// specified, it will be added to the instruction. Likewise with alias.scope |
562 | /// and noalias tags. |
563 | CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, |
564 | MaybeAlign Align, bool isVolatile = false, |
565 | MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, |
566 | MDNode *NoAliasTag = nullptr) { |
567 | return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, |
568 | TBAATag, ScopeTag, NoAliasTag); |
569 | } |
570 | |
571 | CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align, |
572 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
573 | MDNode *ScopeTag = nullptr, |
574 | MDNode *NoAliasTag = nullptr); |
575 | |
576 | /// Create and insert an element unordered-atomic memset of the region of |
577 | /// memory starting at the given pointer to the given value. |
578 | /// |
579 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is |
580 | /// specified, it will be added to the instruction. Likewise with alias.scope |
581 | /// and noalias tags. |
582 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, |
583 | uint64_t Size, Align Alignment, |
584 | uint32_t ElementSize, |
585 | MDNode *TBAATag = nullptr, |
586 | MDNode *ScopeTag = nullptr, |
587 | MDNode *NoAliasTag = nullptr) { |
588 | return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size), |
589 | Align(Alignment), ElementSize, |
590 | TBAATag, ScopeTag, NoAliasTag); |
591 | } |
592 | |
593 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, |
594 | Value *Size, Align Alignment, |
595 | uint32_t ElementSize, |
596 | MDNode *TBAATag = nullptr, |
597 | MDNode *ScopeTag = nullptr, |
598 | MDNode *NoAliasTag = nullptr); |
599 | |
600 | /// Create and insert a memcpy between the specified pointers. |
601 | /// |
602 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is |
603 | /// specified, it will be added to the instruction. Likewise with alias.scope |
604 | /// and noalias tags. |
605 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, |
606 | MaybeAlign SrcAlign, uint64_t Size, |
607 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
608 | MDNode *TBAAStructTag = nullptr, |
609 | MDNode *ScopeTag = nullptr, |
610 | MDNode *NoAliasTag = nullptr) { |
611 | return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size), |
612 | isVolatile, TBAATag, TBAAStructTag, ScopeTag, |
613 | NoAliasTag); |
614 | } |
615 | |
616 | CallInst *CreateMemTransferInst( |
617 | Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, |
618 | MaybeAlign SrcAlign, Value *Size, bool isVolatile = false, |
619 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, |
620 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); |
621 | |
622 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, |
623 | MaybeAlign SrcAlign, Value *Size, |
624 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
625 | MDNode *TBAAStructTag = nullptr, |
626 | MDNode *ScopeTag = nullptr, |
627 | MDNode *NoAliasTag = nullptr) { |
628 | return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src, |
629 | SrcAlign, Size, isVolatile, TBAATag, |
630 | TBAAStructTag, ScopeTag, NoAliasTag); |
631 | } |
632 | |
633 | CallInst * |
634 | CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src, |
635 | MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false, |
636 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, |
637 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); |
638 | |
639 | /// Create and insert an element unordered-atomic memcpy between the |
640 | /// specified pointers. |
641 | /// |
642 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively. |
643 | /// |
644 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is |
645 | /// specified, it will be added to the instruction. Likewise with alias.scope |
646 | /// and noalias tags. |
647 | CallInst *CreateElementUnorderedAtomicMemCpy( |
648 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
649 | uint32_t ElementSize, MDNode *TBAATag = nullptr, |
650 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, |
651 | MDNode *NoAliasTag = nullptr); |
652 | |
653 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, |
654 | MaybeAlign SrcAlign, uint64_t Size, |
655 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
656 | MDNode *ScopeTag = nullptr, |
657 | MDNode *NoAliasTag = nullptr) { |
658 | return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size), |
659 | isVolatile, TBAATag, ScopeTag, NoAliasTag); |
660 | } |
661 | |
662 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, |
663 | MaybeAlign SrcAlign, Value *Size, |
664 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
665 | MDNode *ScopeTag = nullptr, |
666 | MDNode *NoAliasTag = nullptr); |
667 | |
668 | /// \brief Create and insert an element unordered-atomic memmove between the |
669 | /// specified pointers. |
670 | /// |
671 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, |
672 | /// respectively. |
673 | /// |
674 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is |
675 | /// specified, it will be added to the instruction. Likewise with alias.scope |
676 | /// and noalias tags. |
677 | CallInst *CreateElementUnorderedAtomicMemMove( |
678 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
679 | uint32_t ElementSize, MDNode *TBAATag = nullptr, |
680 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, |
681 | MDNode *NoAliasTag = nullptr); |
682 | |
683 | /// Create a sequential vector fadd reduction intrinsic of the source vector. |
684 | /// The first parameter is a scalar accumulator value. An unordered reduction |
685 | /// can be created by adding the reassoc fast-math flag to the resulting |
686 | /// sequential reduction. |
687 | CallInst *CreateFAddReduce(Value *Acc, Value *Src); |
688 | |
689 | /// Create a sequential vector fmul reduction intrinsic of the source vector. |
690 | /// The first parameter is a scalar accumulator value. An unordered reduction |
691 | /// can be created by adding the reassoc fast-math flag to the resulting |
692 | /// sequential reduction. |
693 | CallInst *CreateFMulReduce(Value *Acc, Value *Src); |
694 | |
695 | /// Create a vector int add reduction intrinsic of the source vector. |
696 | CallInst *CreateAddReduce(Value *Src); |
697 | |
698 | /// Create a vector int mul reduction intrinsic of the source vector. |
699 | CallInst *CreateMulReduce(Value *Src); |
700 | |
701 | /// Create a vector int AND reduction intrinsic of the source vector. |
702 | CallInst *CreateAndReduce(Value *Src); |
703 | |
704 | /// Create a vector int OR reduction intrinsic of the source vector. |
705 | CallInst *CreateOrReduce(Value *Src); |
706 | |
707 | /// Create a vector int XOR reduction intrinsic of the source vector. |
708 | CallInst *CreateXorReduce(Value *Src); |
709 | |
710 | /// Create a vector integer max reduction intrinsic of the source |
711 | /// vector. |
712 | CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false); |
713 | |
714 | /// Create a vector integer min reduction intrinsic of the source |
715 | /// vector. |
716 | CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false); |
717 | |
718 | /// Create a vector float max reduction intrinsic of the source |
719 | /// vector. |
720 | CallInst *CreateFPMaxReduce(Value *Src); |
721 | |
722 | /// Create a vector float min reduction intrinsic of the source |
723 | /// vector. |
724 | CallInst *CreateFPMinReduce(Value *Src); |
725 | |
726 | /// Create a lifetime.start intrinsic. |
727 | /// |
728 | /// If the pointer isn't i8* it will be converted. |
729 | CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr); |
730 | |
731 | /// Create a lifetime.end intrinsic. |
732 | /// |
733 | /// If the pointer isn't i8* it will be converted. |
734 | CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr); |
735 | |
736 | /// Create a call to invariant.start intrinsic. |
737 | /// |
738 | /// If the pointer isn't i8* it will be converted. |
739 | CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr); |
740 | |
741 | /// Create a call to Masked Load intrinsic |
742 | CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, |
743 | Value *PassThru = nullptr, const Twine &Name = ""); |
744 | |
745 | /// Create a call to Masked Store intrinsic |
746 | CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, |
747 | Value *Mask); |
748 | |
749 | /// Create a call to Masked Gather intrinsic |
750 | CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, |
751 | Value *Mask = nullptr, Value *PassThru = nullptr, |
752 | const Twine &Name = ""); |
753 | |
754 | /// Create a call to Masked Scatter intrinsic |
755 | CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, |
756 | Value *Mask = nullptr); |
757 | |
758 | /// Create an assume intrinsic call that allows the optimizer to |
759 | /// assume that the provided condition will be true. |
760 | /// |
761 | /// The optional argument \p OpBundles specifies operand bundles that are |
762 | /// added to the call instruction. |
763 | CallInst *CreateAssumption(Value *Cond, |
764 | ArrayRef<OperandBundleDef> OpBundles = llvm::None); |
765 | |
766 | /// Create a llvm.experimental.noalias.scope.decl intrinsic call. |
767 | Instruction *CreateNoAliasScopeDeclaration(Value *Scope); |
768 | Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) { |
769 | return CreateNoAliasScopeDeclaration( |
770 | MetadataAsValue::get(Context, ScopeTag)); |
771 | } |
772 | |
773 | /// Create a call to the experimental.gc.statepoint intrinsic to |
774 | /// start a new statepoint sequence. |
775 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, |
776 | FunctionCallee ActualCallee, |
777 | ArrayRef<Value *> CallArgs, |
778 | Optional<ArrayRef<Value *>> DeoptArgs, |
779 | ArrayRef<Value *> GCArgs, |
780 | const Twine &Name = ""); |
781 | |
782 | /// Create a call to the experimental.gc.statepoint intrinsic to |
783 | /// start a new statepoint sequence. |
784 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, |
785 | FunctionCallee ActualCallee, uint32_t Flags, |
786 | ArrayRef<Value *> CallArgs, |
787 | Optional<ArrayRef<Use>> TransitionArgs, |
788 | Optional<ArrayRef<Use>> DeoptArgs, |
789 | ArrayRef<Value *> GCArgs, |
790 | const Twine &Name = ""); |
791 | |
792 | /// Conveninence function for the common case when CallArgs are filled |
793 | /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be |
794 | /// .get()'ed to get the Value pointer. |
795 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, |
796 | FunctionCallee ActualCallee, |
797 | ArrayRef<Use> CallArgs, |
798 | Optional<ArrayRef<Value *>> DeoptArgs, |
799 | ArrayRef<Value *> GCArgs, |
800 | const Twine &Name = ""); |
801 | |
802 | /// Create an invoke to the experimental.gc.statepoint intrinsic to |
803 | /// start a new statepoint sequence. |
804 | InvokeInst * |
805 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, |
806 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, |
807 | BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs, |
808 | Optional<ArrayRef<Value *>> DeoptArgs, |
809 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); |
810 | |
811 | /// Create an invoke to the experimental.gc.statepoint intrinsic to |
812 | /// start a new statepoint sequence. |
813 | InvokeInst *CreateGCStatepointInvoke( |
814 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
815 | BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, |
816 | ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs, |
817 | Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, |
818 | const Twine &Name = ""); |
819 | |
820 | // Convenience function for the common case when CallArgs are filled in using |
821 | // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to |
822 | // get the Value *. |
823 | InvokeInst * |
824 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, |
825 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, |
826 | BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, |
827 | Optional<ArrayRef<Value *>> DeoptArgs, |
828 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); |
829 | |
830 | /// Create a call to the experimental.gc.result intrinsic to extract |
831 | /// the result from a call wrapped in a statepoint. |
832 | CallInst *CreateGCResult(Instruction *Statepoint, |
833 | Type *ResultType, |
834 | const Twine &Name = ""); |
835 | |
836 | /// Create a call to the experimental.gc.relocate intrinsics to |
837 | /// project the relocated value of one pointer from the statepoint. |
838 | CallInst *CreateGCRelocate(Instruction *Statepoint, |
839 | int BaseOffset, |
840 | int DerivedOffset, |
841 | Type *ResultType, |
842 | const Twine &Name = ""); |
843 | |
844 | /// Create a call to the experimental.gc.pointer.base intrinsic to get the |
845 | /// base pointer for the specified derived pointer. |
846 | CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = ""); |
847 | |
848 | /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get |
849 | /// the offset of the specified derived pointer from its base. |
850 | CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = ""); |
851 | |
852 | /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale |
853 | /// will be the same type as that of \p Scaling. |
854 | Value *CreateVScale(Constant *Scaling, const Twine &Name = ""); |
855 | |
856 | /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...> |
857 | Value *CreateStepVector(Type *DstType, const Twine &Name = ""); |
858 | |
859 | /// Create a call to intrinsic \p ID with 1 operand which is mangled on its |
860 | /// type. |
861 | CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, |
862 | Instruction *FMFSource = nullptr, |
863 | const Twine &Name = ""); |
864 | |
865 | /// Create a call to intrinsic \p ID with 2 operands which is mangled on the |
866 | /// first type. |
867 | CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, |
868 | Instruction *FMFSource = nullptr, |
869 | const Twine &Name = ""); |
870 | |
871 | /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If |
872 | /// \p FMFSource is provided, copy fast-math-flags from that instruction to |
873 | /// the intrinsic. |
874 | CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types, |
875 | ArrayRef<Value *> Args, |
876 | Instruction *FMFSource = nullptr, |
877 | const Twine &Name = ""); |
878 | |
879 | /// Create call to the minnum intrinsic. |
880 | CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") { |
881 | return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name); |
882 | } |
883 | |
884 | /// Create call to the maxnum intrinsic. |
885 | CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") { |
886 | return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name); |
887 | } |
888 | |
889 | /// Create call to the minimum intrinsic. |
890 | CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") { |
891 | return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name); |
892 | } |
893 | |
894 | /// Create call to the maximum intrinsic. |
895 | CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") { |
896 | return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name); |
897 | } |
898 | |
899 | /// Create a call to the arithmetic_fence intrinsic. |
900 | CallInst *CreateArithmeticFence(Value *Val, Type *DstType, |
901 | const Twine &Name = "") { |
902 | return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr, |
903 | Name); |
904 | } |
905 | |
906 | /// Create a call to the experimental.vector.extract intrinsic. |
907 | CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, |
908 | const Twine &Name = "") { |
909 | return CreateIntrinsic(Intrinsic::experimental_vector_extract, |
910 | {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr, |
911 | Name); |
912 | } |
913 | |
914 | /// Create a call to the experimental.vector.insert intrinsic. |
915 | CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, |
916 | Value *Idx, const Twine &Name = "") { |
917 | return CreateIntrinsic(Intrinsic::experimental_vector_insert, |
918 | {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx}, |
919 | nullptr, Name); |
920 | } |
921 | |
922 | private: |
923 | /// Create a call to a masked intrinsic with given Id. |
924 | CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops, |
925 | ArrayRef<Type *> OverloadedTypes, |
926 | const Twine &Name = ""); |
927 | |
928 | Value *getCastedInt8PtrValue(Value *Ptr); |
929 | |
930 | //===--------------------------------------------------------------------===// |
931 | // Instruction creation methods: Terminators |
932 | //===--------------------------------------------------------------------===// |
933 | |
934 | private: |
935 | /// Helper to add branch weight and unpredictable metadata onto an |
936 | /// instruction. |
937 | /// \returns The annotated instruction. |
938 | template <typename InstTy> |
939 | InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) { |
940 | if (Weights) |
941 | I->setMetadata(LLVMContext::MD_prof, Weights); |
942 | if (Unpredictable) |
943 | I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable); |
944 | return I; |
945 | } |
946 | |
947 | public: |
948 | /// Create a 'ret void' instruction. |
949 | ReturnInst *CreateRetVoid() { |
950 | return Insert(ReturnInst::Create(Context)); |
951 | } |
952 | |
953 | /// Create a 'ret <val>' instruction. |
954 | ReturnInst *CreateRet(Value *V) { |
955 | return Insert(ReturnInst::Create(Context, V)); |
956 | } |
957 | |
958 | /// Create a sequence of N insertvalue instructions, |
959 | /// with one Value from the retVals array each, that build a aggregate |
960 | /// return value one value at a time, and a ret instruction to return |
961 | /// the resulting aggregate value. |
962 | /// |
963 | /// This is a convenience function for code that uses aggregate return values |
964 | /// as a vehicle for having multiple return values. |
965 | ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) { |
966 | Value *V = UndefValue::get(getCurrentFunctionReturnType()); |
967 | for (unsigned i = 0; i != N; ++i) |
968 | V = CreateInsertValue(V, retVals[i], i, "mrv"); |
969 | return Insert(ReturnInst::Create(Context, V)); |
970 | } |
971 | |
972 | /// Create an unconditional 'br label X' instruction. |
973 | BranchInst *CreateBr(BasicBlock *Dest) { |
974 | return Insert(BranchInst::Create(Dest)); |
975 | } |
976 | |
977 | /// Create a conditional 'br Cond, TrueDest, FalseDest' |
978 | /// instruction. |
979 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, |
980 | MDNode *BranchWeights = nullptr, |
981 | MDNode *Unpredictable = nullptr) { |
982 | return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond), |
983 | BranchWeights, Unpredictable)); |
984 | } |
985 | |
986 | /// Create a conditional 'br Cond, TrueDest, FalseDest' |
987 | /// instruction. Copy branch meta data if available. |
988 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, |
989 | Instruction *MDSrc) { |
990 | BranchInst *Br = BranchInst::Create(True, False, Cond); |
991 | if (MDSrc) { |
992 | unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable, |
993 | LLVMContext::MD_make_implicit, LLVMContext::MD_dbg}; |
994 | Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4)); |
995 | } |
996 | return Insert(Br); |
997 | } |
998 | |
999 | /// Create a switch instruction with the specified value, default dest, |
1000 | /// and with a hint for the number of cases that will be added (for efficient |
1001 | /// allocation). |
1002 | SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10, |
1003 | MDNode *BranchWeights = nullptr, |
1004 | MDNode *Unpredictable = nullptr) { |
1005 | return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases), |
1006 | BranchWeights, Unpredictable)); |
1007 | } |
1008 | |
1009 | /// Create an indirect branch instruction with the specified address |
1010 | /// operand, with an optional hint for the number of destinations that will be |
1011 | /// added (for efficient allocation). |
1012 | IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) { |
1013 | return Insert(IndirectBrInst::Create(Addr, NumDests)); |
1014 | } |
1015 | |
1016 | /// Create an invoke instruction. |
1017 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, |
1018 | BasicBlock *NormalDest, BasicBlock *UnwindDest, |
1019 | ArrayRef<Value *> Args, |
1020 | ArrayRef<OperandBundleDef> OpBundles, |
1021 | const Twine &Name = "") { |
1022 | InvokeInst *II = |
1023 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles); |
1024 | if (IsFPConstrained) |
1025 | setConstrainedFPCallAttr(II); |
1026 | return Insert(II, Name); |
1027 | } |
1028 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, |
1029 | BasicBlock *NormalDest, BasicBlock *UnwindDest, |
1030 | ArrayRef<Value *> Args = None, |
1031 | const Twine &Name = "") { |
1032 | InvokeInst *II = |
1033 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args); |
1034 | if (IsFPConstrained) |
1035 | setConstrainedFPCallAttr(II); |
1036 | return Insert(II, Name); |
1037 | } |
1038 | |
1039 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, |
1040 | BasicBlock *UnwindDest, ArrayRef<Value *> Args, |
1041 | ArrayRef<OperandBundleDef> OpBundles, |
1042 | const Twine &Name = "") { |
1043 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), |
1044 | NormalDest, UnwindDest, Args, OpBundles, Name); |
1045 | } |
1046 | |
1047 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, |
1048 | BasicBlock *UnwindDest, |
1049 | ArrayRef<Value *> Args = None, |
1050 | const Twine &Name = "") { |
1051 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), |
1052 | NormalDest, UnwindDest, Args, Name); |
1053 | } |
1054 | |
1055 | /// \brief Create a callbr instruction. |
1056 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, |
1057 | BasicBlock *DefaultDest, |
1058 | ArrayRef<BasicBlock *> IndirectDests, |
1059 | ArrayRef<Value *> Args = None, |
1060 | const Twine &Name = "") { |
1061 | return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, |
1062 | Args), Name); |
1063 | } |
1064 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, |
1065 | BasicBlock *DefaultDest, |
1066 | ArrayRef<BasicBlock *> IndirectDests, |
1067 | ArrayRef<Value *> Args, |
1068 | ArrayRef<OperandBundleDef> OpBundles, |
1069 | const Twine &Name = "") { |
1070 | return Insert( |
1071 | CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args, |
1072 | OpBundles), Name); |
1073 | } |
1074 | |
1075 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, |
1076 | ArrayRef<BasicBlock *> IndirectDests, |
1077 | ArrayRef<Value *> Args = None, |
1078 | const Twine &Name = "") { |
1079 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), |
1080 | DefaultDest, IndirectDests, Args, Name); |
1081 | } |
1082 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, |
1083 | ArrayRef<BasicBlock *> IndirectDests, |
1084 | ArrayRef<Value *> Args, |
1085 | ArrayRef<OperandBundleDef> OpBundles, |
1086 | const Twine &Name = "") { |
1087 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), |
1088 | DefaultDest, IndirectDests, Args, Name); |
1089 | } |
1090 | |
1091 | ResumeInst *CreateResume(Value *Exn) { |
1092 | return Insert(ResumeInst::Create(Exn)); |
1093 | } |
1094 | |
1095 | CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad, |
1096 | BasicBlock *UnwindBB = nullptr) { |
1097 | return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB)); |
1098 | } |
1099 | |
1100 | CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB, |
1101 | unsigned NumHandlers, |
1102 | const Twine &Name = "") { |
1103 | return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers), |
1104 | Name); |
1105 | } |
1106 | |
1107 | CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args, |
1108 | const Twine &Name = "") { |
1109 | return Insert(CatchPadInst::Create(ParentPad, Args), Name); |
1110 | } |
1111 | |
1112 | CleanupPadInst *CreateCleanupPad(Value *ParentPad, |
1113 | ArrayRef<Value *> Args = None, |
1114 | const Twine &Name = "") { |
1115 | return Insert(CleanupPadInst::Create(ParentPad, Args), Name); |
1116 | } |
1117 | |
1118 | CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) { |
1119 | return Insert(CatchReturnInst::Create(CatchPad, BB)); |
1120 | } |
1121 | |
1122 | UnreachableInst *CreateUnreachable() { |
1123 | return Insert(new UnreachableInst(Context)); |
1124 | } |
1125 | |
1126 | //===--------------------------------------------------------------------===// |
1127 | // Instruction creation methods: Binary Operators |
1128 | //===--------------------------------------------------------------------===// |
1129 | private: |
1130 | BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc, |
1131 | Value *LHS, Value *RHS, |
1132 | const Twine &Name, |
1133 | bool HasNUW, bool HasNSW) { |
1134 | BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name); |
1135 | if (HasNUW) BO->setHasNoUnsignedWrap(); |
1136 | if (HasNSW) BO->setHasNoSignedWrap(); |
1137 | return BO; |
1138 | } |
1139 | |
1140 | Instruction *setFPAttrs(Instruction *I, MDNode *FPMD, |
1141 | FastMathFlags FMF) const { |
1142 | if (!FPMD) |
1143 | FPMD = DefaultFPMathTag; |
1144 | if (FPMD) |
1145 | I->setMetadata(LLVMContext::MD_fpmath, FPMD); |
1146 | I->setFastMathFlags(FMF); |
1147 | return I; |
1148 | } |
1149 | |
1150 | Value *foldConstant(Instruction::BinaryOps Opc, Value *L, |
1151 | Value *R, const Twine &Name) const { |
1152 | auto *LC = dyn_cast<Constant>(L); |
1153 | auto *RC = dyn_cast<Constant>(R); |
1154 | return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr; |
1155 | } |
1156 | |
1157 | Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) { |
1158 | RoundingMode UseRounding = DefaultConstrainedRounding; |
1159 | |
1160 | if (Rounding.hasValue()) |
1161 | UseRounding = Rounding.getValue(); |
1162 | |
1163 | Optional<StringRef> RoundingStr = convertRoundingModeToStr(UseRounding); |
1164 | assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!" ) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1164, __extension__ __PRETTY_FUNCTION__ )); |
1165 | auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue()); |
1166 | |
1167 | return MetadataAsValue::get(Context, RoundingMDS); |
1168 | } |
1169 | |
1170 | Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) { |
1171 | fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept; |
1172 | |
1173 | if (Except.hasValue()) |
1174 | UseExcept = Except.getValue(); |
1175 | |
1176 | Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(UseExcept); |
1177 | assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!" ) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1177, __extension__ __PRETTY_FUNCTION__ )); |
1178 | auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue()); |
1179 | |
1180 | return MetadataAsValue::get(Context, ExceptMDS); |
1181 | } |
1182 | |
1183 | Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) { |
1184 | assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1187, __extension__ __PRETTY_FUNCTION__ )) |
1185 | Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1187, __extension__ __PRETTY_FUNCTION__ )) |
1186 | Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1187, __extension__ __PRETTY_FUNCTION__ )) |
1187 | "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1187, __extension__ __PRETTY_FUNCTION__ )); |
1188 | |
1189 | StringRef PredicateStr = CmpInst::getPredicateName(Predicate); |
1190 | auto *PredicateMDS = MDString::get(Context, PredicateStr); |
1191 | |
1192 | return MetadataAsValue::get(Context, PredicateMDS); |
1193 | } |
1194 | |
1195 | public: |
1196 | Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "", |
1197 | bool HasNUW = false, bool HasNSW = false) { |
1198 | if (auto *V = Folder.FoldAdd(LHS, RHS, HasNUW, HasNSW)) |
1199 | return V; |
1200 | return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name, |
1201 | HasNUW, HasNSW); |
1202 | } |
1203 | |
1204 | Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { |
1205 | return CreateAdd(LHS, RHS, Name, false, true); |
1206 | } |
1207 | |
1208 | Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { |
1209 | return CreateAdd(LHS, RHS, Name, true, false); |
1210 | } |
1211 | |
1212 | Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "", |
1213 | bool HasNUW = false, bool HasNSW = false) { |
1214 | if (auto *LC = dyn_cast<Constant>(LHS)) |
1215 | if (auto *RC = dyn_cast<Constant>(RHS)) |
1216 | return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name); |
1217 | return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name, |
1218 | HasNUW, HasNSW); |
1219 | } |
1220 | |
1221 | Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") { |
1222 | return CreateSub(LHS, RHS, Name, false, true); |
1223 | } |
1224 | |
1225 | Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") { |
1226 | return CreateSub(LHS, RHS, Name, true, false); |
1227 | } |
1228 | |
1229 | Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "", |
1230 | bool HasNUW = false, bool HasNSW = false) { |
1231 | if (auto *LC = dyn_cast<Constant>(LHS)) |
1232 | if (auto *RC = dyn_cast<Constant>(RHS)) |
1233 | return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name); |
1234 | return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name, |
1235 | HasNUW, HasNSW); |
1236 | } |
1237 | |
1238 | Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") { |
1239 | return CreateMul(LHS, RHS, Name, false, true); |
1240 | } |
1241 | |
1242 | Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") { |
1243 | return CreateMul(LHS, RHS, Name, true, false); |
1244 | } |
1245 | |
1246 | Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "", |
1247 | bool isExact = false) { |
1248 | if (auto *LC = dyn_cast<Constant>(LHS)) |
1249 | if (auto *RC = dyn_cast<Constant>(RHS)) |
1250 | return Insert(Folder.CreateUDiv(LC, RC, isExact), Name); |
1251 | if (!isExact) |
1252 | return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name); |
1253 | return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name); |
1254 | } |
1255 | |
1256 | Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") { |
1257 | return CreateUDiv(LHS, RHS, Name, true); |
1258 | } |
1259 | |
1260 | Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "", |
1261 | bool isExact = false) { |
1262 | if (auto *LC = dyn_cast<Constant>(LHS)) |
1263 | if (auto *RC = dyn_cast<Constant>(RHS)) |
1264 | return Insert(Folder.CreateSDiv(LC, RC, isExact), Name); |
1265 | if (!isExact) |
1266 | return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name); |
1267 | return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name); |
1268 | } |
1269 | |
1270 | Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") { |
1271 | return CreateSDiv(LHS, RHS, Name, true); |
1272 | } |
1273 | |
1274 | Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") { |
1275 | if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V; |
1276 | return Insert(BinaryOperator::CreateURem(LHS, RHS), Name); |
1277 | } |
1278 | |
1279 | Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") { |
1280 | if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V; |
1281 | return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name); |
1282 | } |
1283 | |
1284 | Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "", |
1285 | bool HasNUW = false, bool HasNSW = false) { |
1286 | if (auto *LC = dyn_cast<Constant>(LHS)) |
1287 | if (auto *RC = dyn_cast<Constant>(RHS)) |
1288 | return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name); |
1289 | return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name, |
1290 | HasNUW, HasNSW); |
1291 | } |
1292 | |
1293 | Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "", |
1294 | bool HasNUW = false, bool HasNSW = false) { |
1295 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, |
1296 | HasNUW, HasNSW); |
1297 | } |
1298 | |
1299 | Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "", |
1300 | bool HasNUW = false, bool HasNSW = false) { |
1301 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, |
1302 | HasNUW, HasNSW); |
1303 | } |
1304 | |
1305 | Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "", |
1306 | bool isExact = false) { |
1307 | if (auto *LC = dyn_cast<Constant>(LHS)) |
1308 | if (auto *RC = dyn_cast<Constant>(RHS)) |
1309 | return Insert(Folder.CreateLShr(LC, RC, isExact), Name); |
1310 | if (!isExact) |
1311 | return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name); |
1312 | return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name); |
1313 | } |
1314 | |
1315 | Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "", |
1316 | bool isExact = false) { |
1317 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); |
1318 | } |
1319 | |
1320 | Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "", |
1321 | bool isExact = false) { |
1322 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); |
1323 | } |
1324 | |
1325 | Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "", |
1326 | bool isExact = false) { |
1327 | if (auto *LC = dyn_cast<Constant>(LHS)) |
1328 | if (auto *RC = dyn_cast<Constant>(RHS)) |
1329 | return Insert(Folder.CreateAShr(LC, RC, isExact), Name); |
1330 | if (!isExact) |
1331 | return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name); |
1332 | return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name); |
1333 | } |
1334 | |
1335 | Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "", |
1336 | bool isExact = false) { |
1337 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); |
1338 | } |
1339 | |
1340 | Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "", |
1341 | bool isExact = false) { |
1342 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); |
1343 | } |
1344 | |
1345 | Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") { |
1346 | if (auto *V = Folder.FoldAnd(LHS, RHS)) |
1347 | return V; |
1348 | return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name); |
1349 | } |
1350 | |
1351 | Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") { |
1352 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1353 | } |
1354 | |
1355 | Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") { |
1356 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1357 | } |
1358 | |
1359 | Value *CreateAnd(ArrayRef<Value*> Ops) { |
1360 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1360, __extension__ __PRETTY_FUNCTION__)); |
1361 | Value *Accum = Ops[0]; |
1362 | for (unsigned i = 1; i < Ops.size(); i++) |
1363 | Accum = CreateAnd(Accum, Ops[i]); |
1364 | return Accum; |
1365 | } |
1366 | |
1367 | Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") { |
1368 | if (auto *V = Folder.FoldOr(LHS, RHS)) |
1369 | return V; |
1370 | return Insert(BinaryOperator::CreateOr(LHS, RHS), Name); |
1371 | } |
1372 | |
1373 | Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") { |
1374 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1375 | } |
1376 | |
1377 | Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") { |
1378 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1379 | } |
1380 | |
1381 | Value *CreateOr(ArrayRef<Value*> Ops) { |
1382 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1382, __extension__ __PRETTY_FUNCTION__)); |
1383 | Value *Accum = Ops[0]; |
1384 | for (unsigned i = 1; i < Ops.size(); i++) |
1385 | Accum = CreateOr(Accum, Ops[i]); |
1386 | return Accum; |
1387 | } |
1388 | |
1389 | Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") { |
1390 | if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V; |
1391 | return Insert(BinaryOperator::CreateXor(LHS, RHS), Name); |
1392 | } |
1393 | |
1394 | Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") { |
1395 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1396 | } |
1397 | |
1398 | Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") { |
1399 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1400 | } |
1401 | |
1402 | Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "", |
1403 | MDNode *FPMD = nullptr) { |
1404 | if (IsFPConstrained) |
1405 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, |
1406 | L, R, nullptr, Name, FPMD); |
1407 | |
1408 | if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V; |
1409 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF); |
1410 | return Insert(I, Name); |
1411 | } |
1412 | |
1413 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1414 | /// default FMF. |
1415 | Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource, |
1416 | const Twine &Name = "") { |
1417 | if (IsFPConstrained) |
1418 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, |
1419 | L, R, FMFSource, Name); |
1420 | |
1421 | if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V; |
1422 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr, |
1423 | FMFSource->getFastMathFlags()); |
1424 | return Insert(I, Name); |
1425 | } |
1426 | |
1427 | Value *CreateFSub(Value *L, Value *R, const Twine &Name = "", |
1428 | MDNode *FPMD = nullptr) { |
1429 | if (IsFPConstrained) |
1430 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, |
1431 | L, R, nullptr, Name, FPMD); |
1432 | |
1433 | if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V; |
1434 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF); |
1435 | return Insert(I, Name); |
1436 | } |
1437 | |
1438 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1439 | /// default FMF. |
1440 | Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource, |
1441 | const Twine &Name = "") { |
1442 | if (IsFPConstrained) |
1443 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, |
1444 | L, R, FMFSource, Name); |
1445 | |
1446 | if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V; |
1447 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr, |
1448 | FMFSource->getFastMathFlags()); |
1449 | return Insert(I, Name); |
1450 | } |
1451 | |
1452 | Value *CreateFMul(Value *L, Value *R, const Twine &Name = "", |
1453 | MDNode *FPMD = nullptr) { |
1454 | if (IsFPConstrained) |
1455 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, |
1456 | L, R, nullptr, Name, FPMD); |
1457 | |
1458 | if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V; |
1459 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF); |
1460 | return Insert(I, Name); |
1461 | } |
1462 | |
1463 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1464 | /// default FMF. |
1465 | Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource, |
1466 | const Twine &Name = "") { |
1467 | if (IsFPConstrained) |
1468 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, |
1469 | L, R, FMFSource, Name); |
1470 | |
1471 | if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V; |
1472 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr, |
1473 | FMFSource->getFastMathFlags()); |
1474 | return Insert(I, Name); |
1475 | } |
1476 | |
1477 | Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "", |
1478 | MDNode *FPMD = nullptr) { |
1479 | if (IsFPConstrained) |
1480 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, |
1481 | L, R, nullptr, Name, FPMD); |
1482 | |
1483 | if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V; |
1484 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF); |
1485 | return Insert(I, Name); |
1486 | } |
1487 | |
1488 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1489 | /// default FMF. |
1490 | Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource, |
1491 | const Twine &Name = "") { |
1492 | if (IsFPConstrained) |
1493 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, |
1494 | L, R, FMFSource, Name); |
1495 | |
1496 | if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V; |
1497 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr, |
1498 | FMFSource->getFastMathFlags()); |
1499 | return Insert(I, Name); |
1500 | } |
1501 | |
1502 | Value *CreateFRem(Value *L, Value *R, const Twine &Name = "", |
1503 | MDNode *FPMD = nullptr) { |
1504 | if (IsFPConstrained) |
1505 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, |
1506 | L, R, nullptr, Name, FPMD); |
1507 | |
1508 | if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V; |
1509 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF); |
1510 | return Insert(I, Name); |
1511 | } |
1512 | |
1513 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1514 | /// default FMF. |
1515 | Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource, |
1516 | const Twine &Name = "") { |
1517 | if (IsFPConstrained) |
1518 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, |
1519 | L, R, FMFSource, Name); |
1520 | |
1521 | if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V; |
1522 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr, |
1523 | FMFSource->getFastMathFlags()); |
1524 | return Insert(I, Name); |
1525 | } |
1526 | |
1527 | Value *CreateBinOp(Instruction::BinaryOps Opc, |
1528 | Value *LHS, Value *RHS, const Twine &Name = "", |
1529 | MDNode *FPMathTag = nullptr) { |
1530 | if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V; |
1531 | Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS); |
1532 | if (isa<FPMathOperator>(BinOp)) |
1533 | setFPAttrs(BinOp, FPMathTag, FMF); |
1534 | return Insert(BinOp, Name); |
1535 | } |
1536 | |
1537 | Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") { |
1538 | assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy (1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)" , "llvm/include/llvm/IR/IRBuilder.h", 1538, __extension__ __PRETTY_FUNCTION__ )); |
1539 | return CreateSelect(Cond1, Cond2, |
1540 | ConstantInt::getNullValue(Cond2->getType()), Name); |
1541 | } |
1542 | |
1543 | Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") { |
1544 | assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy (1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)" , "llvm/include/llvm/IR/IRBuilder.h", 1544, __extension__ __PRETTY_FUNCTION__ )); |
1545 | return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()), |
1546 | Cond2, Name); |
1547 | } |
1548 | |
1549 | // NOTE: this is sequential, non-commutative, ordered reduction! |
1550 | Value *CreateLogicalOr(ArrayRef<Value *> Ops) { |
1551 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1551, __extension__ __PRETTY_FUNCTION__)); |
1552 | Value *Accum = Ops[0]; |
1553 | for (unsigned i = 1; i < Ops.size(); i++) |
1554 | Accum = CreateLogicalOr(Accum, Ops[i]); |
1555 | return Accum; |
1556 | } |
1557 | |
1558 | CallInst *CreateConstrainedFPBinOp( |
1559 | Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr, |
1560 | const Twine &Name = "", MDNode *FPMathTag = nullptr, |
1561 | Optional<RoundingMode> Rounding = None, |
1562 | Optional<fp::ExceptionBehavior> Except = None); |
1563 | |
1564 | Value *CreateNeg(Value *V, const Twine &Name = "", |
1565 | bool HasNUW = false, bool HasNSW = false) { |
1566 | if (auto *VC = dyn_cast<Constant>(V)) |
1567 | return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name); |
1568 | BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name); |
1569 | if (HasNUW) BO->setHasNoUnsignedWrap(); |
1570 | if (HasNSW) BO->setHasNoSignedWrap(); |
1571 | return BO; |
1572 | } |
1573 | |
1574 | Value *CreateNSWNeg(Value *V, const Twine &Name = "") { |
1575 | return CreateNeg(V, Name, false, true); |
1576 | } |
1577 | |
1578 | Value *CreateNUWNeg(Value *V, const Twine &Name = "") { |
1579 | return CreateNeg(V, Name, true, false); |
1580 | } |
1581 | |
1582 | Value *CreateFNeg(Value *V, const Twine &Name = "", |
1583 | MDNode *FPMathTag = nullptr) { |
1584 | if (auto *VC = dyn_cast<Constant>(V)) |
1585 | return Insert(Folder.CreateFNeg(VC), Name); |
1586 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF), |
1587 | Name); |
1588 | } |
1589 | |
1590 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1591 | /// default FMF. |
1592 | Value *CreateFNegFMF(Value *V, Instruction *FMFSource, |
1593 | const Twine &Name = "") { |
1594 | if (auto *VC = dyn_cast<Constant>(V)) |
1595 | return Insert(Folder.CreateFNeg(VC), Name); |
1596 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr, |
1597 | FMFSource->getFastMathFlags()), |
1598 | Name); |
1599 | } |
1600 | |
1601 | Value *CreateNot(Value *V, const Twine &Name = "") { |
1602 | if (auto *VC = dyn_cast<Constant>(V)) |
1603 | return Insert(Folder.CreateNot(VC), Name); |
1604 | return Insert(BinaryOperator::CreateNot(V), Name); |
1605 | } |
1606 | |
1607 | Value *CreateUnOp(Instruction::UnaryOps Opc, |
1608 | Value *V, const Twine &Name = "", |
1609 | MDNode *FPMathTag = nullptr) { |
1610 | if (auto *VC = dyn_cast<Constant>(V)) |
1611 | return Insert(Folder.CreateUnOp(Opc, VC), Name); |
1612 | Instruction *UnOp = UnaryOperator::Create(Opc, V); |
1613 | if (isa<FPMathOperator>(UnOp)) |
1614 | setFPAttrs(UnOp, FPMathTag, FMF); |
1615 | return Insert(UnOp, Name); |
1616 | } |
1617 | |
1618 | /// Create either a UnaryOperator or BinaryOperator depending on \p Opc. |
1619 | /// Correct number of operands must be passed accordingly. |
1620 | Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, |
1621 | const Twine &Name = "", MDNode *FPMathTag = nullptr); |
1622 | |
1623 | //===--------------------------------------------------------------------===// |
1624 | // Instruction creation methods: Memory Instructions |
1625 | //===--------------------------------------------------------------------===// |
1626 | |
1627 | AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace, |
1628 | Value *ArraySize = nullptr, const Twine &Name = "") { |
1629 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1630 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); |
1631 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); |
1632 | } |
1633 | |
1634 | AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr, |
1635 | const Twine &Name = "") { |
1636 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1637 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); |
1638 | unsigned AddrSpace = DL.getAllocaAddrSpace(); |
1639 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); |
1640 | } |
1641 | |
1642 | /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of |
1643 | /// converting the string to 'bool' for the isVolatile parameter. |
1644 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) { |
1645 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); |
1646 | } |
1647 | |
1648 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") { |
1649 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); |
1650 | } |
1651 | |
1652 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile, |
1653 | const Twine &Name = "") { |
1654 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name); |
1655 | } |
1656 | |
1657 | StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) { |
1658 | return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile); |
1659 | } |
1660 | |
1661 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, |
1662 | const char *Name) { |
1663 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); |
1664 | } |
1665 | |
1666 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, |
1667 | const Twine &Name = "") { |
1668 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); |
1669 | } |
1670 | |
1671 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, |
1672 | bool isVolatile, const Twine &Name = "") { |
1673 | if (!Align) { |
1674 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1675 | Align = DL.getABITypeAlign(Ty); |
1676 | } |
1677 | return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name); |
1678 | } |
1679 | |
1680 | StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, |
1681 | bool isVolatile = false) { |
1682 | if (!Align) { |
1683 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1684 | Align = DL.getABITypeAlign(Val->getType()); |
1685 | } |
1686 | return Insert(new StoreInst(Val, Ptr, isVolatile, *Align)); |
1687 | } |
1688 | FenceInst *CreateFence(AtomicOrdering Ordering, |
1689 | SyncScope::ID SSID = SyncScope::System, |
1690 | const Twine &Name = "") { |
1691 | return Insert(new FenceInst(Context, Ordering, SSID), Name); |
1692 | } |
1693 | |
1694 | AtomicCmpXchgInst * |
1695 | CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, |
1696 | AtomicOrdering SuccessOrdering, |
1697 | AtomicOrdering FailureOrdering, |
1698 | SyncScope::ID SSID = SyncScope::System) { |
1699 | if (!Align) { |
1700 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1701 | Align = llvm::Align(DL.getTypeStoreSize(New->getType())); |
1702 | } |
1703 | |
1704 | return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering, |
1705 | FailureOrdering, SSID)); |
1706 | } |
1707 | |
1708 | AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, |
1709 | Value *Val, MaybeAlign Align, |
1710 | AtomicOrdering Ordering, |
1711 | SyncScope::ID SSID = SyncScope::System) { |
1712 | if (!Align) { |
1713 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1714 | Align = llvm::Align(DL.getTypeStoreSize(Val->getType())); |
1715 | } |
1716 | |
1717 | return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID)); |
1718 | } |
1719 | |
1720 | Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, |
1721 | const Twine &Name = "") { |
1722 | if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, /*IsInBounds=*/false)) |
1723 | return V; |
1724 | return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name); |
1725 | } |
1726 | |
1727 | Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, |
1728 | const Twine &Name = "") { |
1729 | if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, /*IsInBounds=*/true)) |
1730 | return V; |
1731 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name); |
1732 | } |
1733 | |
1734 | Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") { |
1735 | if (auto *V = Folder.FoldGEP(Ty, Ptr, {Idx}, /*IsInBounds=*/false)) |
1736 | return V; |
1737 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); |
1738 | } |
1739 | |
1740 | Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx, |
1741 | const Twine &Name = "") { |
1742 | if (auto *V = Folder.FoldGEP(Ty, Ptr, {Idx}, /*IsInBounds=*/true)) |
1743 | return V; |
1744 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); |
1745 | } |
1746 | |
1747 | Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, |
1748 | const Twine &Name = "") { |
1749 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); |
1750 | |
1751 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false)) |
1752 | return V; |
1753 | |
1754 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); |
1755 | } |
1756 | |
1757 | Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, |
1758 | const Twine &Name = "") { |
1759 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); |
1760 | |
1761 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true)) |
1762 | return V; |
1763 | |
1764 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); |
1765 | } |
1766 | |
1767 | Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, |
1768 | const Twine &Name = "") { |
1769 | Value *Idxs[] = { |
1770 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), |
1771 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) |
1772 | }; |
1773 | |
1774 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false)) |
1775 | return V; |
1776 | |
1777 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); |
1778 | } |
1779 | |
1780 | Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, |
1781 | unsigned Idx1, const Twine &Name = "") { |
1782 | Value *Idxs[] = { |
1783 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), |
1784 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) |
1785 | }; |
1786 | |
1787 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true)) |
1788 | return V; |
1789 | |
1790 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); |
1791 | } |
1792 | |
1793 | Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, |
1794 | const Twine &Name = "") { |
1795 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); |
1796 | |
1797 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false)) |
1798 | return V; |
1799 | |
1800 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); |
1801 | } |
1802 | |
1803 | Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, |
1804 | const Twine &Name = "") { |
1805 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); |
1806 | |
1807 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true)) |
1808 | return V; |
1809 | |
1810 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); |
1811 | } |
1812 | |
1813 | Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1, |
1814 | const Twine &Name = "") { |
1815 | Value *Idxs[] = { |
1816 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), |
1817 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) |
1818 | }; |
1819 | |
1820 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false)) |
1821 | return V; |
1822 | |
1823 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); |
1824 | } |
1825 | |
1826 | Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, |
1827 | uint64_t Idx1, const Twine &Name = "") { |
1828 | Value *Idxs[] = { |
1829 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), |
1830 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) |
1831 | }; |
1832 | |
1833 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true)) |
1834 | return V; |
1835 | |
1836 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); |
1837 | } |
1838 | |
1839 | Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, |
1840 | const Twine &Name = "") { |
1841 | return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name); |
1842 | } |
1843 | |
1844 | /// Same as CreateGlobalString, but return a pointer with "i8*" type |
1845 | /// instead of a pointer to array of i8. |
1846 | /// |
1847 | /// If no module is given via \p M, it is take from the insertion point basic |
1848 | /// block. |
1849 | Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "", |
1850 | unsigned AddressSpace = 0, |
1851 | Module *M = nullptr) { |
1852 | GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M); |
1853 | Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); |
1854 | Constant *Indices[] = {Zero, Zero}; |
1855 | return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV, |
1856 | Indices); |
1857 | } |
1858 | |
1859 | //===--------------------------------------------------------------------===// |
1860 | // Instruction creation methods: Cast/Conversion Operators |
1861 | //===--------------------------------------------------------------------===// |
1862 | |
1863 | Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") { |
1864 | return CreateCast(Instruction::Trunc, V, DestTy, Name); |
1865 | } |
1866 | |
1867 | Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") { |
1868 | return CreateCast(Instruction::ZExt, V, DestTy, Name); |
1869 | } |
1870 | |
1871 | Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") { |
1872 | return CreateCast(Instruction::SExt, V, DestTy, Name); |
1873 | } |
1874 | |
1875 | /// Create a ZExt or Trunc from the integer value V to DestTy. Return |
1876 | /// the value untouched if the type of V is already DestTy. |
1877 | Value *CreateZExtOrTrunc(Value *V, Type *DestTy, |
1878 | const Twine &Name = "") { |
1879 | assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1881, __extension__ __PRETTY_FUNCTION__ )) |
1880 | DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1881, __extension__ __PRETTY_FUNCTION__ )) |
1881 | "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1881, __extension__ __PRETTY_FUNCTION__ )); |
1882 | Type *VTy = V->getType(); |
1883 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) |
1884 | return CreateZExt(V, DestTy, Name); |
1885 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) |
1886 | return CreateTrunc(V, DestTy, Name); |
1887 | return V; |
1888 | } |
1889 | |
1890 | /// Create a SExt or Trunc from the integer value V to DestTy. Return |
1891 | /// the value untouched if the type of V is already DestTy. |
1892 | Value *CreateSExtOrTrunc(Value *V, Type *DestTy, |
1893 | const Twine &Name = "") { |
1894 | assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1896, __extension__ __PRETTY_FUNCTION__ )) |
1895 | DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1896, __extension__ __PRETTY_FUNCTION__ )) |
1896 | "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1896, __extension__ __PRETTY_FUNCTION__ )); |
1897 | Type *VTy = V->getType(); |
1898 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) |
1899 | return CreateSExt(V, DestTy, Name); |
1900 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) |
1901 | return CreateTrunc(V, DestTy, Name); |
1902 | return V; |
1903 | } |
1904 | |
1905 | Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") { |
1906 | if (IsFPConstrained) |
1907 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui, |
1908 | V, DestTy, nullptr, Name); |
1909 | return CreateCast(Instruction::FPToUI, V, DestTy, Name); |
1910 | } |
1911 | |
1912 | Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") { |
1913 | if (IsFPConstrained) |
1914 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi, |
1915 | V, DestTy, nullptr, Name); |
1916 | return CreateCast(Instruction::FPToSI, V, DestTy, Name); |
1917 | } |
1918 | |
1919 | Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ |
1920 | if (IsFPConstrained) |
1921 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp, |
1922 | V, DestTy, nullptr, Name); |
1923 | return CreateCast(Instruction::UIToFP, V, DestTy, Name); |
1924 | } |
1925 | |
1926 | Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ |
1927 | if (IsFPConstrained) |
1928 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp, |
1929 | V, DestTy, nullptr, Name); |
1930 | return CreateCast(Instruction::SIToFP, V, DestTy, Name); |
1931 | } |
1932 | |
1933 | Value *CreateFPTrunc(Value *V, Type *DestTy, |
1934 | const Twine &Name = "") { |
1935 | if (IsFPConstrained) |
1936 | return CreateConstrainedFPCast( |
1937 | Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr, |
1938 | Name); |
1939 | return CreateCast(Instruction::FPTrunc, V, DestTy, Name); |
1940 | } |
1941 | |
1942 | Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") { |
1943 | if (IsFPConstrained) |
1944 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext, |
1945 | V, DestTy, nullptr, Name); |
1946 | return CreateCast(Instruction::FPExt, V, DestTy, Name); |
1947 | } |
1948 | |
1949 | Value *CreatePtrToInt(Value *V, Type *DestTy, |
1950 | const Twine &Name = "") { |
1951 | return CreateCast(Instruction::PtrToInt, V, DestTy, Name); |
1952 | } |
1953 | |
1954 | Value *CreateIntToPtr(Value *V, Type *DestTy, |
1955 | const Twine &Name = "") { |
1956 | return CreateCast(Instruction::IntToPtr, V, DestTy, Name); |
1957 | } |
1958 | |
1959 | Value *CreateBitCast(Value *V, Type *DestTy, |
1960 | const Twine &Name = "") { |
1961 | return CreateCast(Instruction::BitCast, V, DestTy, Name); |
1962 | } |
1963 | |
1964 | Value *CreateAddrSpaceCast(Value *V, Type *DestTy, |
1965 | const Twine &Name = "") { |
1966 | return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name); |
1967 | } |
1968 | |
1969 | Value *CreateZExtOrBitCast(Value *V, Type *DestTy, |
1970 | const Twine &Name = "") { |
1971 | if (V->getType() == DestTy) |
1972 | return V; |
1973 | if (auto *VC = dyn_cast<Constant>(V)) |
1974 | return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name); |
1975 | return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name); |
1976 | } |
1977 | |
1978 | Value *CreateSExtOrBitCast(Value *V, Type *DestTy, |
1979 | const Twine &Name = "") { |
1980 | if (V->getType() == DestTy) |
1981 | return V; |
1982 | if (auto *VC = dyn_cast<Constant>(V)) |
1983 | return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name); |
1984 | return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name); |
1985 | } |
1986 | |
1987 | Value *CreateTruncOrBitCast(Value *V, Type *DestTy, |
1988 | const Twine &Name = "") { |
1989 | if (V->getType() == DestTy) |
1990 | return V; |
1991 | if (auto *VC = dyn_cast<Constant>(V)) |
1992 | return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name); |
1993 | return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name); |
1994 | } |
1995 | |
1996 | Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, |
1997 | const Twine &Name = "") { |
1998 | if (V->getType() == DestTy) |
1999 | return V; |
2000 | if (auto *VC = dyn_cast<Constant>(V)) |
2001 | return Insert(Folder.CreateCast(Op, VC, DestTy), Name); |
2002 | return Insert(CastInst::Create(Op, V, DestTy), Name); |
2003 | } |
2004 | |
2005 | Value *CreatePointerCast(Value *V, Type *DestTy, |
2006 | const Twine &Name = "") { |
2007 | if (V->getType() == DestTy) |
2008 | return V; |
2009 | if (auto *VC = dyn_cast<Constant>(V)) |
2010 | return Insert(Folder.CreatePointerCast(VC, DestTy), Name); |
2011 | return Insert(CastInst::CreatePointerCast(V, DestTy), Name); |
2012 | } |
2013 | |
2014 | Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy, |
2015 | const Twine &Name = "") { |
2016 | if (V->getType() == DestTy) |
2017 | return V; |
2018 | |
2019 | if (auto *VC = dyn_cast<Constant>(V)) { |
2020 | return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy), |
2021 | Name); |
2022 | } |
2023 | |
2024 | return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy), |
2025 | Name); |
2026 | } |
2027 | |
2028 | Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned, |
2029 | const Twine &Name = "") { |
2030 | if (V->getType() == DestTy) |
2031 | return V; |
2032 | if (auto *VC = dyn_cast<Constant>(V)) |
2033 | return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name); |
2034 | return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name); |
2035 | } |
2036 | |
2037 | Value *CreateBitOrPointerCast(Value *V, Type *DestTy, |
2038 | const Twine &Name = "") { |
2039 | if (V->getType() == DestTy) |
2040 | return V; |
2041 | if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy()) |
2042 | return CreatePtrToInt(V, DestTy, Name); |
2043 | if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy()) |
2044 | return CreateIntToPtr(V, DestTy, Name); |
2045 | |
2046 | return CreateBitCast(V, DestTy, Name); |
2047 | } |
2048 | |
2049 | Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") { |
2050 | if (V->getType() == DestTy) |
2051 | return V; |
2052 | if (auto *VC = dyn_cast<Constant>(V)) |
2053 | return Insert(Folder.CreateFPCast(VC, DestTy), Name); |
2054 | return Insert(CastInst::CreateFPCast(V, DestTy), Name); |
2055 | } |
2056 | |
2057 | CallInst *CreateConstrainedFPCast( |
2058 | Intrinsic::ID ID, Value *V, Type *DestTy, |
2059 | Instruction *FMFSource = nullptr, const Twine &Name = "", |
2060 | MDNode *FPMathTag = nullptr, |
2061 | Optional<RoundingMode> Rounding = None, |
2062 | Optional<fp::ExceptionBehavior> Except = None); |
2063 | |
2064 | // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a |
2065 | // compile time error, instead of converting the string to bool for the |
2066 | // isSigned parameter. |
2067 | Value *CreateIntCast(Value *, Type *, const char *) = delete; |
2068 | |
2069 | //===--------------------------------------------------------------------===// |
2070 | // Instruction creation methods: Compare Instructions |
2071 | //===--------------------------------------------------------------------===// |
2072 | |
2073 | Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") { |
2074 | return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name); |
2075 | } |
2076 | |
2077 | Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2078 | return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name); |
2079 | } |
2080 | |
2081 | Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") { |
2082 | return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name); |
2083 | } |
2084 | |
2085 | Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2086 | return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name); |
2087 | } |
2088 | |
2089 | Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") { |
2090 | return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name); |
2091 | } |
2092 | |
2093 | Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2094 | return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name); |
2095 | } |
2096 | |
2097 | Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") { |
2098 | return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name); |
2099 | } |
2100 | |
2101 | Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2102 | return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name); |
2103 | } |
2104 | |
2105 | Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") { |
2106 | return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name); |
2107 | } |
2108 | |
2109 | Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2110 | return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name); |
2111 | } |
2112 | |
2113 | Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "", |
2114 | MDNode *FPMathTag = nullptr) { |
2115 | return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag); |
2116 | } |
2117 | |
2118 | Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "", |
2119 | MDNode *FPMathTag = nullptr) { |
2120 | return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag); |
2121 | } |
2122 | |
2123 | Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "", |
2124 | MDNode *FPMathTag = nullptr) { |
2125 | return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag); |
2126 | } |
2127 | |
2128 | Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "", |
2129 | MDNode *FPMathTag = nullptr) { |
2130 | return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag); |
2131 | } |
2132 | |
2133 | Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "", |
2134 | MDNode *FPMathTag = nullptr) { |
2135 | return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag); |
2136 | } |
2137 | |
2138 | Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "", |
2139 | MDNode *FPMathTag = nullptr) { |
2140 | return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag); |
2141 | } |
2142 | |
2143 | Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "", |
2144 | MDNode *FPMathTag = nullptr) { |
2145 | return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag); |
2146 | } |
2147 | |
2148 | Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "", |
2149 | MDNode *FPMathTag = nullptr) { |
2150 | return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag); |
2151 | } |
2152 | |
2153 | Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "", |
2154 | MDNode *FPMathTag = nullptr) { |
2155 | return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag); |
2156 | } |
2157 | |
2158 | Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "", |
2159 | MDNode *FPMathTag = nullptr) { |
2160 | return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag); |
2161 | } |
2162 | |
2163 | Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "", |
2164 | MDNode *FPMathTag = nullptr) { |
2165 | return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag); |
2166 | } |
2167 | |
2168 | Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "", |
2169 | MDNode *FPMathTag = nullptr) { |
2170 | return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag); |
2171 | } |
2172 | |
2173 | Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "", |
2174 | MDNode *FPMathTag = nullptr) { |
2175 | return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag); |
2176 | } |
2177 | |
2178 | Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "", |
2179 | MDNode *FPMathTag = nullptr) { |
2180 | return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag); |
2181 | } |
2182 | |
2183 | Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, |
2184 | const Twine &Name = "") { |
2185 | if (auto *V = Folder.FoldICmp(P, LHS, RHS)) |
2186 | return V; |
2187 | return Insert(new ICmpInst(P, LHS, RHS), Name); |
2188 | } |
2189 | |
2190 | // Create a quiet floating-point comparison (i.e. one that raises an FP |
2191 | // exception only in the case where an input is a signaling NaN). |
2192 | // Note that this differs from CreateFCmpS only if IsFPConstrained is true. |
2193 | Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, |
2194 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2195 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false); |
2196 | } |
2197 | |
2198 | Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, |
2199 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2200 | return CmpInst::isFPPredicate(Pred) |
2201 | ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag) |
2202 | : CreateICmp(Pred, LHS, RHS, Name); |
2203 | } |
2204 | |
2205 | // Create a signaling floating-point comparison (i.e. one that raises an FP |
2206 | // exception whenever an input is any NaN, signaling or quiet). |
2207 | // Note that this differs from CreateFCmp only if IsFPConstrained is true. |
2208 | Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS, |
2209 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2210 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true); |
2211 | } |
2212 | |
2213 | private: |
2214 | // Helper routine to create either a signaling or a quiet FP comparison. |
2215 | Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS, |
2216 | const Twine &Name, MDNode *FPMathTag, |
2217 | bool IsSignaling); |
2218 | |
2219 | public: |
2220 | CallInst *CreateConstrainedFPCmp( |
2221 | Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, |
2222 | const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None); |
2223 | |
2224 | //===--------------------------------------------------------------------===// |
2225 | // Instruction creation methods: Other Instructions |
2226 | //===--------------------------------------------------------------------===// |
2227 | |
2228 | PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues, |
2229 | const Twine &Name = "") { |
2230 | PHINode *Phi = PHINode::Create(Ty, NumReservedValues); |
2231 | if (isa<FPMathOperator>(Phi)) |
2232 | setFPAttrs(Phi, nullptr /* MDNode* */, FMF); |
2233 | return Insert(Phi, Name); |
2234 | } |
2235 | |
2236 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, |
2237 | ArrayRef<Value *> Args = None, const Twine &Name = "", |
2238 | MDNode *FPMathTag = nullptr) { |
2239 | CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles); |
2240 | if (IsFPConstrained) |
2241 | setConstrainedFPCallAttr(CI); |
2242 | if (isa<FPMathOperator>(CI)) |
2243 | setFPAttrs(CI, FPMathTag, FMF); |
2244 | return Insert(CI, Name); |
2245 | } |
2246 | |
2247 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args, |
2248 | ArrayRef<OperandBundleDef> OpBundles, |
2249 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2250 | CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles); |
2251 | if (IsFPConstrained) |
2252 | setConstrainedFPCallAttr(CI); |
2253 | if (isa<FPMathOperator>(CI)) |
2254 | setFPAttrs(CI, FPMathTag, FMF); |
2255 | return Insert(CI, Name); |
2256 | } |
2257 | |
2258 | CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None, |
2259 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2260 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name, |
2261 | FPMathTag); |
2262 | } |
2263 | |
2264 | CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args, |
2265 | ArrayRef<OperandBundleDef> OpBundles, |
2266 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2267 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, |
2268 | OpBundles, Name, FPMathTag); |
2269 | } |
2270 | |
2271 | CallInst *CreateConstrainedFPCall( |
2272 | Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "", |
2273 | Optional<RoundingMode> Rounding = None, |
2274 | Optional<fp::ExceptionBehavior> Except = None); |
2275 | |
2276 | Value *CreateSelect(Value *C, Value *True, Value *False, |
2277 | const Twine &Name = "", Instruction *MDFrom = nullptr); |
2278 | |
2279 | VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") { |
2280 | return Insert(new VAArgInst(List, Ty), Name); |
2281 | } |
2282 | |
2283 | Value *CreateExtractElement(Value *Vec, Value *Idx, |
2284 | const Twine &Name = "") { |
2285 | if (auto *VC = dyn_cast<Constant>(Vec)) |
2286 | if (auto *IC = dyn_cast<Constant>(Idx)) |
2287 | return Insert(Folder.CreateExtractElement(VC, IC), Name); |
2288 | return Insert(ExtractElementInst::Create(Vec, Idx), Name); |
2289 | } |
2290 | |
2291 | Value *CreateExtractElement(Value *Vec, uint64_t Idx, |
2292 | const Twine &Name = "") { |
2293 | return CreateExtractElement(Vec, getInt64(Idx), Name); |
2294 | } |
2295 | |
2296 | Value *CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, |
2297 | const Twine &Name = "") { |
2298 | return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name); |
2299 | } |
2300 | |
2301 | Value *CreateInsertElement(Type *VecTy, Value *NewElt, uint64_t Idx, |
2302 | const Twine &Name = "") { |
2303 | return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name); |
2304 | } |
2305 | |
2306 | Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, |
2307 | const Twine &Name = "") { |
2308 | if (auto *VC = dyn_cast<Constant>(Vec)) |
2309 | if (auto *NC = dyn_cast<Constant>(NewElt)) |
2310 | if (auto *IC = dyn_cast<Constant>(Idx)) |
2311 | return Insert(Folder.CreateInsertElement(VC, NC, IC), Name); |
2312 | return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name); |
2313 | } |
2314 | |
2315 | Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx, |
2316 | const Twine &Name = "") { |
2317 | return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name); |
2318 | } |
2319 | |
2320 | Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask, |
2321 | const Twine &Name = "") { |
2322 | SmallVector<int, 16> IntMask; |
2323 | ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask); |
2324 | return CreateShuffleVector(V1, V2, IntMask, Name); |
2325 | } |
2326 | |
2327 | /// See class ShuffleVectorInst for a description of the mask representation. |
2328 | Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask, |
2329 | const Twine &Name = "") { |
2330 | if (auto *V1C = dyn_cast<Constant>(V1)) |
2331 | if (auto *V2C = dyn_cast<Constant>(V2)) |
2332 | return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name); |
2333 | return Insert(new ShuffleVectorInst(V1, V2, Mask), Name); |
2334 | } |
2335 | |
2336 | /// Create a unary shuffle. The second vector operand of the IR instruction |
2337 | /// is poison. |
2338 | Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask, |
2339 | const Twine &Name = "") { |
2340 | return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name); |
2341 | } |
2342 | |
2343 | Value *CreateExtractValue(Value *Agg, |
2344 | ArrayRef<unsigned> Idxs, |
2345 | const Twine &Name = "") { |
2346 | if (auto *AggC = dyn_cast<Constant>(Agg)) |
2347 | return Insert(Folder.CreateExtractValue(AggC, Idxs), Name); |
2348 | return Insert(ExtractValueInst::Create(Agg, Idxs), Name); |
2349 | } |
2350 | |
2351 | Value *CreateInsertValue(Value *Agg, Value *Val, |
2352 | ArrayRef<unsigned> Idxs, |
2353 | const Twine &Name = "") { |
2354 | if (auto *AggC = dyn_cast<Constant>(Agg)) |
2355 | if (auto *ValC = dyn_cast<Constant>(Val)) |
2356 | return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name); |
2357 | return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name); |
2358 | } |
2359 | |
2360 | LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses, |
2361 | const Twine &Name = "") { |
2362 | return Insert(LandingPadInst::Create(Ty, NumClauses), Name); |
2363 | } |
2364 | |
2365 | Value *CreateFreeze(Value *V, const Twine &Name = "") { |
2366 | return Insert(new FreezeInst(V), Name); |
2367 | } |
2368 | |
2369 | //===--------------------------------------------------------------------===// |
2370 | // Utility creation methods |
2371 | //===--------------------------------------------------------------------===// |
2372 | |
2373 | /// Return an i1 value testing if \p Arg is null. |
2374 | Value *CreateIsNull(Value *Arg, const Twine &Name = "") { |
2375 | return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()), |
2376 | Name); |
2377 | } |
2378 | |
2379 | /// Return an i1 value testing if \p Arg is not null. |
2380 | Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") { |
2381 | return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()), |
2382 | Name); |
2383 | } |
2384 | |
2385 | /// Return the i64 difference between two pointer values, dividing out |
2386 | /// the size of the pointed-to objects. |
2387 | /// |
2388 | /// This is intended to implement C-style pointer subtraction. As such, the |
2389 | /// pointers must be appropriately aligned for their element types and |
2390 | /// pointing into the same object. |
2391 | Value *CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, |
2392 | const Twine &Name = ""); |
2393 | |
2394 | /// Create a launder.invariant.group intrinsic call. If Ptr type is |
2395 | /// different from pointer to i8, it's casted to pointer to i8 in the same |
2396 | /// address space before call and casted back to Ptr type after call. |
2397 | Value *CreateLaunderInvariantGroup(Value *Ptr); |
2398 | |
2399 | /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is |
2400 | /// different from pointer to i8, it's casted to pointer to i8 in the same |
2401 | /// address space before call and casted back to Ptr type after call. |
2402 | Value *CreateStripInvariantGroup(Value *Ptr); |
2403 | |
2404 | /// Return a vector value that contains the vector V reversed |
2405 | Value *CreateVectorReverse(Value *V, const Twine &Name = ""); |
2406 | |
2407 | /// Return a vector splice intrinsic if using scalable vectors, otherwise |
2408 | /// return a shufflevector. If the immediate is positive, a vector is |
2409 | /// extracted from concat(V1, V2), starting at Imm. If the immediate |
2410 | /// is negative, we extract -Imm elements from V1 and the remaining |
2411 | /// elements from V2. Imm is a signed integer in the range |
2412 | /// -VL <= Imm < VL (where VL is the runtime vector length of the |
2413 | /// source/result vector) |
2414 | Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, |
2415 | const Twine &Name = ""); |
2416 | |
2417 | /// Return a vector value that contains \arg V broadcasted to \p |
2418 | /// NumElts elements. |
2419 | Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = ""); |
2420 | |
2421 | /// Return a vector value that contains \arg V broadcasted to \p |
2422 | /// EC elements. |
2423 | Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = ""); |
2424 | |
2425 | /// Return a value that has been extracted from a larger integer type. |
2426 | Value *CreateExtractInteger(const DataLayout &DL, Value *From, |
2427 | IntegerType *ExtractedTy, uint64_t Offset, |
2428 | const Twine &Name); |
2429 | |
2430 | Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, |
2431 | unsigned Dimension, unsigned LastIndex, |
2432 | MDNode *DbgInfo); |
2433 | |
2434 | Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex, |
2435 | MDNode *DbgInfo); |
2436 | |
2437 | Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base, |
2438 | unsigned Index, unsigned FieldIndex, |
2439 | MDNode *DbgInfo); |
2440 | |
2441 | private: |
2442 | /// Helper function that creates an assume intrinsic call that |
2443 | /// represents an alignment assumption on the provided pointer \p PtrValue |
2444 | /// with offset \p OffsetValue and alignment value \p AlignValue. |
2445 | CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL, |
2446 | Value *PtrValue, Value *AlignValue, |
2447 | Value *OffsetValue); |
2448 | |
2449 | public: |
2450 | /// Create an assume intrinsic call that represents an alignment |
2451 | /// assumption on the provided pointer. |
2452 | /// |
2453 | /// An optional offset can be provided, and if it is provided, the offset |
2454 | /// must be subtracted from the provided pointer to get the pointer with the |
2455 | /// specified alignment. |
2456 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, |
2457 | unsigned Alignment, |
2458 | Value *OffsetValue = nullptr); |
2459 | |
2460 | /// Create an assume intrinsic call that represents an alignment |
2461 | /// assumption on the provided pointer. |
2462 | /// |
2463 | /// An optional offset can be provided, and if it is provided, the offset |
2464 | /// must be subtracted from the provided pointer to get the pointer with the |
2465 | /// specified alignment. |
2466 | /// |
2467 | /// This overload handles the condition where the Alignment is dependent |
2468 | /// on an existing value rather than a static value. |
2469 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, |
2470 | Value *Alignment, |
2471 | Value *OffsetValue = nullptr); |
2472 | }; |
2473 | |
2474 | /// This provides a uniform API for creating instructions and inserting |
2475 | /// them into a basic block: either at the end of a BasicBlock, or at a specific |
2476 | /// iterator location in a block. |
2477 | /// |
2478 | /// Note that the builder does not expose the full generality of LLVM |
2479 | /// instructions. For access to extra instruction properties, use the mutators |
2480 | /// (e.g. setVolatile) on the instructions after they have been |
2481 | /// created. Convenience state exists to specify fast-math flags and fp-math |
2482 | /// tags. |
2483 | /// |
2484 | /// The first template argument specifies a class to use for creating constants. |
2485 | /// This defaults to creating minimally folded constants. The second template |
2486 | /// argument allows clients to specify custom insertion hooks that are called on |
2487 | /// every newly created insertion. |
2488 | template <typename FolderTy = ConstantFolder, |
2489 | typename InserterTy = IRBuilderDefaultInserter> |
2490 | class IRBuilder : public IRBuilderBase { |
2491 | private: |
2492 | FolderTy Folder; |
2493 | InserterTy Inserter; |
2494 | |
2495 | public: |
2496 | IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(), |
2497 | MDNode *FPMathTag = nullptr, |
2498 | ArrayRef<OperandBundleDef> OpBundles = None) |
2499 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles), |
2500 | Folder(Folder), Inserter(Inserter) {} |
2501 | |
2502 | explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr, |
2503 | ArrayRef<OperandBundleDef> OpBundles = None) |
2504 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {} |
2505 | |
2506 | explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder, |
2507 | MDNode *FPMathTag = nullptr, |
2508 | ArrayRef<OperandBundleDef> OpBundles = None) |
2509 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, |
2510 | FPMathTag, OpBundles), Folder(Folder) { |
2511 | SetInsertPoint(TheBB); |
2512 | } |
2513 | |
2514 | explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr, |
2515 | ArrayRef<OperandBundleDef> OpBundles = None) |
2516 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, |
2517 | FPMathTag, OpBundles) { |
2518 | SetInsertPoint(TheBB); |
2519 | } |
2520 | |
2521 | explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr, |
2522 | ArrayRef<OperandBundleDef> OpBundles = None) |
2523 | : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, |
2524 | FPMathTag, OpBundles) { |
2525 | SetInsertPoint(IP); |
2526 | } |
2527 | |
2528 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder, |
2529 | MDNode *FPMathTag = nullptr, |
2530 | ArrayRef<OperandBundleDef> OpBundles = None) |
2531 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, |
2532 | FPMathTag, OpBundles), Folder(Folder) { |
2533 | SetInsertPoint(TheBB, IP); |
2534 | } |
2535 | |
2536 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, |
2537 | MDNode *FPMathTag = nullptr, |
2538 | ArrayRef<OperandBundleDef> OpBundles = None) |
2539 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, |
2540 | FPMathTag, OpBundles) { |
2541 | SetInsertPoint(TheBB, IP); |
2542 | } |
2543 | |
2544 | /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard |
2545 | /// or FastMathFlagGuard instead. |
2546 | IRBuilder(const IRBuilder &) = delete; |
2547 | |
2548 | InserterTy &getInserter() { return Inserter; } |
2549 | }; |
2550 | |
2551 | // Create wrappers for C Binding types (see CBindingWrapping.h). |
2552 | DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast <IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef >(const_cast<IRBuilder<>*>(P)); } |
2553 | |
2554 | } // end namespace llvm |
2555 | |
2556 | #endif // LLVM_IR_IRBUILDER_H |
1 | //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file exposes the class definitions of all of the subclasses of the | |||
10 | // Instruction class. This is meant to be an easy way to get access to all | |||
11 | // instruction subclasses. | |||
12 | // | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #ifndef LLVM_IR_INSTRUCTIONS_H | |||
16 | #define LLVM_IR_INSTRUCTIONS_H | |||
17 | ||||
18 | #include "llvm/ADT/ArrayRef.h" | |||
19 | #include "llvm/ADT/Bitfields.h" | |||
20 | #include "llvm/ADT/MapVector.h" | |||
21 | #include "llvm/ADT/None.h" | |||
22 | #include "llvm/ADT/STLExtras.h" | |||
23 | #include "llvm/ADT/SmallVector.h" | |||
24 | #include "llvm/ADT/Twine.h" | |||
25 | #include "llvm/ADT/iterator.h" | |||
26 | #include "llvm/ADT/iterator_range.h" | |||
27 | #include "llvm/IR/CFG.h" | |||
28 | #include "llvm/IR/Constant.h" | |||
29 | #include "llvm/IR/DerivedTypes.h" | |||
30 | #include "llvm/IR/InstrTypes.h" | |||
31 | #include "llvm/IR/Instruction.h" | |||
32 | #include "llvm/IR/OperandTraits.h" | |||
33 | #include "llvm/IR/Use.h" | |||
34 | #include "llvm/IR/User.h" | |||
35 | #include "llvm/Support/AtomicOrdering.h" | |||
36 | #include "llvm/Support/ErrorHandling.h" | |||
37 | #include <cassert> | |||
38 | #include <cstddef> | |||
39 | #include <cstdint> | |||
40 | #include <iterator> | |||
41 | ||||
42 | namespace llvm { | |||
43 | ||||
44 | class APFloat; | |||
45 | class APInt; | |||
46 | class BasicBlock; | |||
47 | class ConstantInt; | |||
48 | class DataLayout; | |||
49 | class StringRef; | |||
50 | class Type; | |||
51 | class Value; | |||
52 | ||||
53 | //===----------------------------------------------------------------------===// | |||
54 | // AllocaInst Class | |||
55 | //===----------------------------------------------------------------------===// | |||
56 | ||||
57 | /// an instruction to allocate memory on the stack | |||
58 | class AllocaInst : public UnaryInstruction { | |||
59 | Type *AllocatedType; | |||
60 | ||||
61 | using AlignmentField = AlignmentBitfieldElementT<0>; | |||
62 | using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; | |||
63 | using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; | |||
64 | static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, | |||
65 | SwiftErrorField>(), | |||
66 | "Bitfields must be contiguous"); | |||
67 | ||||
68 | protected: | |||
69 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
70 | friend class Instruction; | |||
71 | ||||
72 | AllocaInst *cloneImpl() const; | |||
73 | ||||
74 | public: | |||
75 | explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, | |||
76 | const Twine &Name, Instruction *InsertBefore); | |||
77 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, | |||
78 | const Twine &Name, BasicBlock *InsertAtEnd); | |||
79 | ||||
80 | AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, | |||
81 | Instruction *InsertBefore); | |||
82 | AllocaInst(Type *Ty, unsigned AddrSpace, | |||
83 | const Twine &Name, BasicBlock *InsertAtEnd); | |||
84 | ||||
85 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, | |||
86 | const Twine &Name = "", Instruction *InsertBefore = nullptr); | |||
87 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, | |||
88 | const Twine &Name, BasicBlock *InsertAtEnd); | |||
89 | ||||
90 | /// Return true if there is an allocation size parameter to the allocation | |||
91 | /// instruction that is not 1. | |||
92 | bool isArrayAllocation() const; | |||
93 | ||||
94 | /// Get the number of elements allocated. For a simple allocation of a single | |||
95 | /// element, this will return a constant 1 value. | |||
96 | const Value *getArraySize() const { return getOperand(0); } | |||
97 | Value *getArraySize() { return getOperand(0); } | |||
98 | ||||
99 | /// Overload to return most specific pointer type. | |||
100 | PointerType *getType() const { | |||
101 | return cast<PointerType>(Instruction::getType()); | |||
102 | } | |||
103 | ||||
104 | /// Return the address space for the allocation. | |||
105 | unsigned getAddressSpace() const { | |||
106 | return getType()->getAddressSpace(); | |||
107 | } | |||
108 | ||||
109 | /// Get allocation size in bits. Returns None if size can't be determined, | |||
110 | /// e.g. in case of a VLA. | |||
111 | Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; | |||
112 | ||||
113 | /// Return the type that is being allocated by the instruction. | |||
114 | Type *getAllocatedType() const { return AllocatedType; } | |||
115 | /// for use only in special circumstances that need to generically | |||
116 | /// transform a whole instruction (eg: IR linking and vectorization). | |||
117 | void setAllocatedType(Type *Ty) { AllocatedType = Ty; } | |||
118 | ||||
119 | /// Return the alignment of the memory that is being allocated by the | |||
120 | /// instruction. | |||
121 | Align getAlign() const { | |||
122 | return Align(1ULL << getSubclassData<AlignmentField>()); | |||
123 | } | |||
124 | ||||
125 | void setAlignment(Align Align) { | |||
126 | setSubclassData<AlignmentField>(Log2(Align)); | |||
127 | } | |||
128 | ||||
129 | // FIXME: Remove this one transition to Align is over. | |||
130 | uint64_t getAlignment() const { return getAlign().value(); } | |||
131 | ||||
132 | /// Return true if this alloca is in the entry block of the function and is a | |||
133 | /// constant size. If so, the code generator will fold it into the | |||
134 | /// prolog/epilog code, so it is basically free. | |||
135 | bool isStaticAlloca() const; | |||
136 | ||||
137 | /// Return true if this alloca is used as an inalloca argument to a call. Such | |||
138 | /// allocas are never considered static even if they are in the entry block. | |||
139 | bool isUsedWithInAlloca() const { | |||
140 | return getSubclassData<UsedWithInAllocaField>(); | |||
141 | } | |||
142 | ||||
143 | /// Specify whether this alloca is used to represent the arguments to a call. | |||
144 | void setUsedWithInAlloca(bool V) { | |||
145 | setSubclassData<UsedWithInAllocaField>(V); | |||
146 | } | |||
147 | ||||
148 | /// Return true if this alloca is used as a swifterror argument to a call. | |||
149 | bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } | |||
150 | /// Specify whether this alloca is used to represent a swifterror. | |||
151 | void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } | |||
152 | ||||
153 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
154 | static bool classof(const Instruction *I) { | |||
155 | return (I->getOpcode() == Instruction::Alloca); | |||
156 | } | |||
157 | static bool classof(const Value *V) { | |||
158 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
159 | } | |||
160 | ||||
161 | private: | |||
162 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
163 | // method so that subclasses cannot accidentally use it. | |||
164 | template <typename Bitfield> | |||
165 | void setSubclassData(typename Bitfield::Type Value) { | |||
166 | Instruction::setSubclassData<Bitfield>(Value); | |||
167 | } | |||
168 | }; | |||
169 | ||||
170 | //===----------------------------------------------------------------------===// | |||
171 | // LoadInst Class | |||
172 | //===----------------------------------------------------------------------===// | |||
173 | ||||
174 | /// An instruction for reading from memory. This uses the SubclassData field in | |||
175 | /// Value to store whether or not the load is volatile. | |||
176 | class LoadInst : public UnaryInstruction { | |||
177 | using VolatileField = BoolBitfieldElementT<0>; | |||
178 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; | |||
179 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; | |||
180 | static_assert( | |||
181 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), | |||
182 | "Bitfields must be contiguous"); | |||
183 | ||||
184 | void AssertOK(); | |||
185 | ||||
186 | protected: | |||
187 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
188 | friend class Instruction; | |||
189 | ||||
190 | LoadInst *cloneImpl() const; | |||
191 | ||||
192 | public: | |||
193 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, | |||
194 | Instruction *InsertBefore); | |||
195 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
196 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
197 | Instruction *InsertBefore); | |||
198 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
199 | BasicBlock *InsertAtEnd); | |||
200 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
201 | Align Align, Instruction *InsertBefore = nullptr); | |||
202 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
203 | Align Align, BasicBlock *InsertAtEnd); | |||
204 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
205 | Align Align, AtomicOrdering Order, | |||
206 | SyncScope::ID SSID = SyncScope::System, | |||
207 | Instruction *InsertBefore = nullptr); | |||
208 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
209 | Align Align, AtomicOrdering Order, SyncScope::ID SSID, | |||
210 | BasicBlock *InsertAtEnd); | |||
211 | ||||
212 | /// Return true if this is a load from a volatile memory location. | |||
213 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
214 | ||||
215 | /// Specify whether this is a volatile load or not. | |||
216 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
217 | ||||
218 | /// Return the alignment of the access that is being performed. | |||
219 | /// FIXME: Remove this function once transition to Align is over. | |||
220 | /// Use getAlign() instead. | |||
221 | uint64_t getAlignment() const { return getAlign().value(); } | |||
222 | ||||
223 | /// Return the alignment of the access that is being performed. | |||
224 | Align getAlign() const { | |||
225 | return Align(1ULL << (getSubclassData<AlignmentField>())); | |||
226 | } | |||
227 | ||||
228 | void setAlignment(Align Align) { | |||
229 | setSubclassData<AlignmentField>(Log2(Align)); | |||
230 | } | |||
231 | ||||
232 | /// Returns the ordering constraint of this load instruction. | |||
233 | AtomicOrdering getOrdering() const { | |||
234 | return getSubclassData<OrderingField>(); | |||
235 | } | |||
236 | /// Sets the ordering constraint of this load instruction. May not be Release | |||
237 | /// or AcquireRelease. | |||
238 | void setOrdering(AtomicOrdering Ordering) { | |||
239 | setSubclassData<OrderingField>(Ordering); | |||
240 | } | |||
241 | ||||
242 | /// Returns the synchronization scope ID of this load instruction. | |||
243 | SyncScope::ID getSyncScopeID() const { | |||
244 | return SSID; | |||
245 | } | |||
246 | ||||
247 | /// Sets the synchronization scope ID of this load instruction. | |||
248 | void setSyncScopeID(SyncScope::ID SSID) { | |||
249 | this->SSID = SSID; | |||
250 | } | |||
251 | ||||
252 | /// Sets the ordering constraint and the synchronization scope ID of this load | |||
253 | /// instruction. | |||
254 | void setAtomic(AtomicOrdering Ordering, | |||
255 | SyncScope::ID SSID = SyncScope::System) { | |||
256 | setOrdering(Ordering); | |||
257 | setSyncScopeID(SSID); | |||
258 | } | |||
259 | ||||
260 | bool isSimple() const { return !isAtomic() && !isVolatile(); } | |||
261 | ||||
262 | bool isUnordered() const { | |||
263 | return (getOrdering() == AtomicOrdering::NotAtomic || | |||
264 | getOrdering() == AtomicOrdering::Unordered) && | |||
265 | !isVolatile(); | |||
266 | } | |||
267 | ||||
268 | Value *getPointerOperand() { return getOperand(0); } | |||
269 | const Value *getPointerOperand() const { return getOperand(0); } | |||
270 | static unsigned getPointerOperandIndex() { return 0U; } | |||
271 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } | |||
272 | ||||
273 | /// Returns the address space of the pointer operand. | |||
274 | unsigned getPointerAddressSpace() const { | |||
275 | return getPointerOperandType()->getPointerAddressSpace(); | |||
276 | } | |||
277 | ||||
278 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
279 | static bool classof(const Instruction *I) { | |||
280 | return I->getOpcode() == Instruction::Load; | |||
281 | } | |||
282 | static bool classof(const Value *V) { | |||
283 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
284 | } | |||
285 | ||||
286 | private: | |||
287 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
288 | // method so that subclasses cannot accidentally use it. | |||
289 | template <typename Bitfield> | |||
290 | void setSubclassData(typename Bitfield::Type Value) { | |||
291 | Instruction::setSubclassData<Bitfield>(Value); | |||
292 | } | |||
293 | ||||
294 | /// The synchronization scope ID of this load instruction. Not quite enough | |||
295 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
296 | /// own field. | |||
297 | SyncScope::ID SSID; | |||
298 | }; | |||
299 | ||||
300 | //===----------------------------------------------------------------------===// | |||
301 | // StoreInst Class | |||
302 | //===----------------------------------------------------------------------===// | |||
303 | ||||
304 | /// An instruction for storing to memory. | |||
305 | class StoreInst : public Instruction { | |||
306 | using VolatileField = BoolBitfieldElementT<0>; | |||
307 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; | |||
308 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; | |||
309 | static_assert( | |||
310 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), | |||
311 | "Bitfields must be contiguous"); | |||
312 | ||||
313 | void AssertOK(); | |||
314 | ||||
315 | protected: | |||
316 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
317 | friend class Instruction; | |||
318 | ||||
319 | StoreInst *cloneImpl() const; | |||
320 | ||||
321 | public: | |||
322 | StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); | |||
323 | StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); | |||
324 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); | |||
325 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); | |||
326 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
327 | Instruction *InsertBefore = nullptr); | |||
328 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
329 | BasicBlock *InsertAtEnd); | |||
330 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
331 | AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, | |||
332 | Instruction *InsertBefore = nullptr); | |||
333 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
334 | AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); | |||
335 | ||||
336 | // allocate space for exactly two operands | |||
337 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
338 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
339 | ||||
340 | /// Return true if this is a store to a volatile memory location. | |||
341 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
342 | ||||
343 | /// Specify whether this is a volatile store or not. | |||
344 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
345 | ||||
346 | /// Transparently provide more efficient getOperand methods. | |||
347 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
348 | ||||
349 | /// Return the alignment of the access that is being performed | |||
350 | /// FIXME: Remove this function once transition to Align is over. | |||
351 | /// Use getAlign() instead. | |||
352 | uint64_t getAlignment() const { return getAlign().value(); } | |||
353 | ||||
354 | Align getAlign() const { | |||
355 | return Align(1ULL << (getSubclassData<AlignmentField>())); | |||
356 | } | |||
357 | ||||
358 | void setAlignment(Align Align) { | |||
359 | setSubclassData<AlignmentField>(Log2(Align)); | |||
360 | } | |||
361 | ||||
362 | /// Returns the ordering constraint of this store instruction. | |||
363 | AtomicOrdering getOrdering() const { | |||
364 | return getSubclassData<OrderingField>(); | |||
365 | } | |||
366 | ||||
367 | /// Sets the ordering constraint of this store instruction. May not be | |||
368 | /// Acquire or AcquireRelease. | |||
369 | void setOrdering(AtomicOrdering Ordering) { | |||
370 | setSubclassData<OrderingField>(Ordering); | |||
371 | } | |||
372 | ||||
373 | /// Returns the synchronization scope ID of this store instruction. | |||
374 | SyncScope::ID getSyncScopeID() const { | |||
375 | return SSID; | |||
376 | } | |||
377 | ||||
378 | /// Sets the synchronization scope ID of this store instruction. | |||
379 | void setSyncScopeID(SyncScope::ID SSID) { | |||
380 | this->SSID = SSID; | |||
381 | } | |||
382 | ||||
383 | /// Sets the ordering constraint and the synchronization scope ID of this | |||
384 | /// store instruction. | |||
385 | void setAtomic(AtomicOrdering Ordering, | |||
386 | SyncScope::ID SSID = SyncScope::System) { | |||
387 | setOrdering(Ordering); | |||
388 | setSyncScopeID(SSID); | |||
389 | } | |||
390 | ||||
391 | bool isSimple() const { return !isAtomic() && !isVolatile(); } | |||
392 | ||||
393 | bool isUnordered() const { | |||
394 | return (getOrdering() == AtomicOrdering::NotAtomic || | |||
395 | getOrdering() == AtomicOrdering::Unordered) && | |||
396 | !isVolatile(); | |||
397 | } | |||
398 | ||||
399 | Value *getValueOperand() { return getOperand(0); } | |||
400 | const Value *getValueOperand() const { return getOperand(0); } | |||
401 | ||||
402 | Value *getPointerOperand() { return getOperand(1); } | |||
403 | const Value *getPointerOperand() const { return getOperand(1); } | |||
404 | static unsigned getPointerOperandIndex() { return 1U; } | |||
405 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } | |||
406 | ||||
407 | /// Returns the address space of the pointer operand. | |||
408 | unsigned getPointerAddressSpace() const { | |||
409 | return getPointerOperandType()->getPointerAddressSpace(); | |||
410 | } | |||
411 | ||||
412 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
413 | static bool classof(const Instruction *I) { | |||
414 | return I->getOpcode() == Instruction::Store; | |||
415 | } | |||
416 | static bool classof(const Value *V) { | |||
417 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
418 | } | |||
419 | ||||
420 | private: | |||
421 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
422 | // method so that subclasses cannot accidentally use it. | |||
423 | template <typename Bitfield> | |||
424 | void setSubclassData(typename Bitfield::Type Value) { | |||
425 | Instruction::setSubclassData<Bitfield>(Value); | |||
426 | } | |||
427 | ||||
428 | /// The synchronization scope ID of this store instruction. Not quite enough | |||
429 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
430 | /// own field. | |||
431 | SyncScope::ID SSID; | |||
432 | }; | |||
433 | ||||
434 | template <> | |||
435 | struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { | |||
436 | }; | |||
437 | ||||
438 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits <StoreInst>::op_begin(this); } StoreInst::const_op_iterator StoreInst::op_begin() const { return OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this)); } StoreInst ::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst >::op_end(this); } StoreInst::const_op_iterator StoreInst:: op_end() const { return OperandTraits<StoreInst>::op_end (const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand (unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<StoreInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 438, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this))[i_nocapture ].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 438, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<StoreInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned StoreInst::getNumOperands() const { return OperandTraits<StoreInst>::operands(this); } template <int Idx_nocapture> Use &StoreInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &StoreInst::Op() const { return this->OpFrom <Idx_nocapture>(this); } | |||
439 | ||||
440 | //===----------------------------------------------------------------------===// | |||
441 | // FenceInst Class | |||
442 | //===----------------------------------------------------------------------===// | |||
443 | ||||
444 | /// An instruction for ordering other memory operations. | |||
445 | class FenceInst : public Instruction { | |||
446 | using OrderingField = AtomicOrderingBitfieldElementT<0>; | |||
447 | ||||
448 | void Init(AtomicOrdering Ordering, SyncScope::ID SSID); | |||
449 | ||||
450 | protected: | |||
451 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
452 | friend class Instruction; | |||
453 | ||||
454 | FenceInst *cloneImpl() const; | |||
455 | ||||
456 | public: | |||
457 | // Ordering may only be Acquire, Release, AcquireRelease, or | |||
458 | // SequentiallyConsistent. | |||
459 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, | |||
460 | SyncScope::ID SSID = SyncScope::System, | |||
461 | Instruction *InsertBefore = nullptr); | |||
462 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, | |||
463 | BasicBlock *InsertAtEnd); | |||
464 | ||||
465 | // allocate space for exactly zero operands | |||
466 | void *operator new(size_t S) { return User::operator new(S, 0); } | |||
467 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
468 | ||||
469 | /// Returns the ordering constraint of this fence instruction. | |||
470 | AtomicOrdering getOrdering() const { | |||
471 | return getSubclassData<OrderingField>(); | |||
472 | } | |||
473 | ||||
474 | /// Sets the ordering constraint of this fence instruction. May only be | |||
475 | /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. | |||
476 | void setOrdering(AtomicOrdering Ordering) { | |||
477 | setSubclassData<OrderingField>(Ordering); | |||
478 | } | |||
479 | ||||
480 | /// Returns the synchronization scope ID of this fence instruction. | |||
481 | SyncScope::ID getSyncScopeID() const { | |||
482 | return SSID; | |||
483 | } | |||
484 | ||||
485 | /// Sets the synchronization scope ID of this fence instruction. | |||
486 | void setSyncScopeID(SyncScope::ID SSID) { | |||
487 | this->SSID = SSID; | |||
488 | } | |||
489 | ||||
490 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
491 | static bool classof(const Instruction *I) { | |||
492 | return I->getOpcode() == Instruction::Fence; | |||
493 | } | |||
494 | static bool classof(const Value *V) { | |||
495 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
496 | } | |||
497 | ||||
498 | private: | |||
499 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
500 | // method so that subclasses cannot accidentally use it. | |||
501 | template <typename Bitfield> | |||
502 | void setSubclassData(typename Bitfield::Type Value) { | |||
503 | Instruction::setSubclassData<Bitfield>(Value); | |||
504 | } | |||
505 | ||||
506 | /// The synchronization scope ID of this fence instruction. Not quite enough | |||
507 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
508 | /// own field. | |||
509 | SyncScope::ID SSID; | |||
510 | }; | |||
511 | ||||
512 | //===----------------------------------------------------------------------===// | |||
513 | // AtomicCmpXchgInst Class | |||
514 | //===----------------------------------------------------------------------===// | |||
515 | ||||
516 | /// An instruction that atomically checks whether a | |||
517 | /// specified value is in a memory location, and, if it is, stores a new value | |||
518 | /// there. The value returned by this instruction is a pair containing the | |||
519 | /// original value as first element, and an i1 indicating success (true) or | |||
520 | /// failure (false) as second element. | |||
521 | /// | |||
522 | class AtomicCmpXchgInst : public Instruction { | |||
523 | void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, | |||
524 | AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, | |||
525 | SyncScope::ID SSID); | |||
526 | ||||
527 | template <unsigned Offset> | |||
528 | using AtomicOrderingBitfieldElement = | |||
529 | typename Bitfield::Element<AtomicOrdering, Offset, 3, | |||
530 | AtomicOrdering::LAST>; | |||
531 | ||||
532 | protected: | |||
533 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
534 | friend class Instruction; | |||
535 | ||||
536 | AtomicCmpXchgInst *cloneImpl() const; | |||
537 | ||||
538 | public: | |||
539 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, | |||
540 | AtomicOrdering SuccessOrdering, | |||
541 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, | |||
542 | Instruction *InsertBefore = nullptr); | |||
543 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, | |||
544 | AtomicOrdering SuccessOrdering, | |||
545 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, | |||
546 | BasicBlock *InsertAtEnd); | |||
547 | ||||
548 | // allocate space for exactly three operands | |||
549 | void *operator new(size_t S) { return User::operator new(S, 3); } | |||
550 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
551 | ||||
552 | using VolatileField = BoolBitfieldElementT<0>; | |||
553 | using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; | |||
554 | using SuccessOrderingField = | |||
555 | AtomicOrderingBitfieldElementT<WeakField::NextBit>; | |||
556 | using FailureOrderingField = | |||
557 | AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; | |||
558 | using AlignmentField = | |||
559 | AlignmentBitfieldElementT<FailureOrderingField::NextBit>; | |||
560 | static_assert( | |||
561 | Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, | |||
562 | FailureOrderingField, AlignmentField>(), | |||
563 | "Bitfields must be contiguous"); | |||
564 | ||||
565 | /// Return the alignment of the memory that is being allocated by the | |||
566 | /// instruction. | |||
567 | Align getAlign() const { | |||
568 | return Align(1ULL << getSubclassData<AlignmentField>()); | |||
569 | } | |||
570 | ||||
571 | void setAlignment(Align Align) { | |||
572 | setSubclassData<AlignmentField>(Log2(Align)); | |||
573 | } | |||
574 | ||||
575 | /// Return true if this is a cmpxchg from a volatile memory | |||
576 | /// location. | |||
577 | /// | |||
578 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
579 | ||||
580 | /// Specify whether this is a volatile cmpxchg. | |||
581 | /// | |||
582 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
583 | ||||
584 | /// Return true if this cmpxchg may spuriously fail. | |||
585 | bool isWeak() const { return getSubclassData<WeakField>(); } | |||
586 | ||||
587 | void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } | |||
588 | ||||
589 | /// Transparently provide more efficient getOperand methods. | |||
590 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
591 | ||||
592 | static bool isValidSuccessOrdering(AtomicOrdering Ordering) { | |||
593 | return Ordering != AtomicOrdering::NotAtomic && | |||
594 | Ordering != AtomicOrdering::Unordered; | |||
595 | } | |||
596 | ||||
597 | static bool isValidFailureOrdering(AtomicOrdering Ordering) { | |||
598 | return Ordering != AtomicOrdering::NotAtomic && | |||
599 | Ordering != AtomicOrdering::Unordered && | |||
600 | Ordering != AtomicOrdering::AcquireRelease && | |||
601 | Ordering != AtomicOrdering::Release; | |||
602 | } | |||
603 | ||||
604 | /// Returns the success ordering constraint of this cmpxchg instruction. | |||
605 | AtomicOrdering getSuccessOrdering() const { | |||
606 | return getSubclassData<SuccessOrderingField>(); | |||
607 | } | |||
608 | ||||
609 | /// Sets the success ordering constraint of this cmpxchg instruction. | |||
610 | void setSuccessOrdering(AtomicOrdering Ordering) { | |||
611 | assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) && "invalid CmpXchg success ordering") ? void (0) : __assert_fail ("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\"" , "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__ )) | |||
612 | "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) && "invalid CmpXchg success ordering") ? void (0) : __assert_fail ("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\"" , "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__ )); | |||
613 | setSubclassData<SuccessOrderingField>(Ordering); | |||
614 | } | |||
615 | ||||
616 | /// Returns the failure ordering constraint of this cmpxchg instruction. | |||
617 | AtomicOrdering getFailureOrdering() const { | |||
618 | return getSubclassData<FailureOrderingField>(); | |||
619 | } | |||
620 | ||||
621 | /// Sets the failure ordering constraint of this cmpxchg instruction. | |||
622 | void setFailureOrdering(AtomicOrdering Ordering) { | |||
623 | assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) && "invalid CmpXchg failure ordering") ? void (0) : __assert_fail ("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\"" , "llvm/include/llvm/IR/Instructions.h", 624, __extension__ __PRETTY_FUNCTION__ )) | |||
624 | "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) && "invalid CmpXchg failure ordering") ? void (0) : __assert_fail ("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\"" , "llvm/include/llvm/IR/Instructions.h", 624, __extension__ __PRETTY_FUNCTION__ )); | |||
625 | setSubclassData<FailureOrderingField>(Ordering); | |||
626 | } | |||
627 | ||||
628 | /// Returns a single ordering which is at least as strong as both the | |||
629 | /// success and failure orderings for this cmpxchg. | |||
630 | AtomicOrdering getMergedOrdering() const { | |||
631 | if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) | |||
632 | return AtomicOrdering::SequentiallyConsistent; | |||
633 | if (getFailureOrdering() == AtomicOrdering::Acquire) { | |||
634 | if (getSuccessOrdering() == AtomicOrdering::Monotonic) | |||
635 | return AtomicOrdering::Acquire; | |||
636 | if (getSuccessOrdering() == AtomicOrdering::Release) | |||
637 | return AtomicOrdering::AcquireRelease; | |||
638 | } | |||
639 | return getSuccessOrdering(); | |||
640 | } | |||
641 | ||||
642 | /// Returns the synchronization scope ID of this cmpxchg instruction. | |||
643 | SyncScope::ID getSyncScopeID() const { | |||
644 | return SSID; | |||
645 | } | |||
646 | ||||
647 | /// Sets the synchronization scope ID of this cmpxchg instruction. | |||
648 | void setSyncScopeID(SyncScope::ID SSID) { | |||
649 | this->SSID = SSID; | |||
650 | } | |||
651 | ||||
652 | Value *getPointerOperand() { return getOperand(0); } | |||
653 | const Value *getPointerOperand() const { return getOperand(0); } | |||
654 | static unsigned getPointerOperandIndex() { return 0U; } | |||
655 | ||||
656 | Value *getCompareOperand() { return getOperand(1); } | |||
657 | const Value *getCompareOperand() const { return getOperand(1); } | |||
658 | ||||
659 | Value *getNewValOperand() { return getOperand(2); } | |||
660 | const Value *getNewValOperand() const { return getOperand(2); } | |||
661 | ||||
662 | /// Returns the address space of the pointer operand. | |||
663 | unsigned getPointerAddressSpace() const { | |||
664 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
665 | } | |||
666 | ||||
667 | /// Returns the strongest permitted ordering on failure, given the | |||
668 | /// desired ordering on success. | |||
669 | /// | |||
670 | /// If the comparison in a cmpxchg operation fails, there is no atomic store | |||
671 | /// so release semantics cannot be provided. So this function drops explicit | |||
672 | /// Release requests from the AtomicOrdering. A SequentiallyConsistent | |||
673 | /// operation would remain SequentiallyConsistent. | |||
674 | static AtomicOrdering | |||
675 | getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { | |||
676 | switch (SuccessOrdering) { | |||
677 | default: | |||
678 | llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering" , "llvm/include/llvm/IR/Instructions.h", 678); | |||
679 | case AtomicOrdering::Release: | |||
680 | case AtomicOrdering::Monotonic: | |||
681 | return AtomicOrdering::Monotonic; | |||
682 | case AtomicOrdering::AcquireRelease: | |||
683 | case AtomicOrdering::Acquire: | |||
684 | return AtomicOrdering::Acquire; | |||
685 | case AtomicOrdering::SequentiallyConsistent: | |||
686 | return AtomicOrdering::SequentiallyConsistent; | |||
687 | } | |||
688 | } | |||
689 | ||||
690 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
691 | static bool classof(const Instruction *I) { | |||
692 | return I->getOpcode() == Instruction::AtomicCmpXchg; | |||
693 | } | |||
694 | static bool classof(const Value *V) { | |||
695 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
696 | } | |||
697 | ||||
698 | private: | |||
699 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
700 | // method so that subclasses cannot accidentally use it. | |||
701 | template <typename Bitfield> | |||
702 | void setSubclassData(typename Bitfield::Type Value) { | |||
703 | Instruction::setSubclassData<Bitfield>(Value); | |||
704 | } | |||
705 | ||||
706 | /// The synchronization scope ID of this cmpxchg instruction. Not quite | |||
707 | /// enough room in SubClassData for everything, so synchronization scope ID | |||
708 | /// gets its own field. | |||
709 | SyncScope::ID SSID; | |||
710 | }; | |||
711 | ||||
712 | template <> | |||
713 | struct OperandTraits<AtomicCmpXchgInst> : | |||
714 | public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { | |||
715 | }; | |||
716 | ||||
717 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() { return OperandTraits<AtomicCmpXchgInst>::op_begin(this ); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst:: op_begin() const { return OperandTraits<AtomicCmpXchgInst> ::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst ::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits <AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst:: const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits <AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst *>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 717, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst >::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture ].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 717, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands () const { return OperandTraits<AtomicCmpXchgInst>::operands (this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &AtomicCmpXchgInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
718 | ||||
719 | //===----------------------------------------------------------------------===// | |||
720 | // AtomicRMWInst Class | |||
721 | //===----------------------------------------------------------------------===// | |||
722 | ||||
723 | /// an instruction that atomically reads a memory location, | |||
724 | /// combines it with another value, and then stores the result back. Returns | |||
725 | /// the old value. | |||
726 | /// | |||
727 | class AtomicRMWInst : public Instruction { | |||
728 | protected: | |||
729 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
730 | friend class Instruction; | |||
731 | ||||
732 | AtomicRMWInst *cloneImpl() const; | |||
733 | ||||
734 | public: | |||
735 | /// This enumeration lists the possible modifications atomicrmw can make. In | |||
736 | /// the descriptions, 'p' is the pointer to the instruction's memory location, | |||
737 | /// 'old' is the initial value of *p, and 'v' is the other value passed to the | |||
738 | /// instruction. These instructions always return 'old'. | |||
739 | enum BinOp : unsigned { | |||
740 | /// *p = v | |||
741 | Xchg, | |||
742 | /// *p = old + v | |||
743 | Add, | |||
744 | /// *p = old - v | |||
745 | Sub, | |||
746 | /// *p = old & v | |||
747 | And, | |||
748 | /// *p = ~(old & v) | |||
749 | Nand, | |||
750 | /// *p = old | v | |||
751 | Or, | |||
752 | /// *p = old ^ v | |||
753 | Xor, | |||
754 | /// *p = old >signed v ? old : v | |||
755 | Max, | |||
756 | /// *p = old <signed v ? old : v | |||
757 | Min, | |||
758 | /// *p = old >unsigned v ? old : v | |||
759 | UMax, | |||
760 | /// *p = old <unsigned v ? old : v | |||
761 | UMin, | |||
762 | ||||
763 | /// *p = old + v | |||
764 | FAdd, | |||
765 | ||||
766 | /// *p = old - v | |||
767 | FSub, | |||
768 | ||||
769 | FIRST_BINOP = Xchg, | |||
770 | LAST_BINOP = FSub, | |||
771 | BAD_BINOP | |||
772 | }; | |||
773 | ||||
774 | private: | |||
775 | template <unsigned Offset> | |||
776 | using AtomicOrderingBitfieldElement = | |||
777 | typename Bitfield::Element<AtomicOrdering, Offset, 3, | |||
778 | AtomicOrdering::LAST>; | |||
779 | ||||
780 | template <unsigned Offset> | |||
781 | using BinOpBitfieldElement = | |||
782 | typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>; | |||
783 | ||||
784 | public: | |||
785 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, | |||
786 | AtomicOrdering Ordering, SyncScope::ID SSID, | |||
787 | Instruction *InsertBefore = nullptr); | |||
788 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, | |||
789 | AtomicOrdering Ordering, SyncScope::ID SSID, | |||
790 | BasicBlock *InsertAtEnd); | |||
791 | ||||
792 | // allocate space for exactly two operands | |||
793 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
794 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
795 | ||||
796 | using VolatileField = BoolBitfieldElementT<0>; | |||
797 | using AtomicOrderingField = | |||
798 | AtomicOrderingBitfieldElementT<VolatileField::NextBit>; | |||
799 | using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; | |||
800 | using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; | |||
801 | static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, | |||
802 | OperationField, AlignmentField>(), | |||
803 | "Bitfields must be contiguous"); | |||
804 | ||||
805 | BinOp getOperation() const { return getSubclassData<OperationField>(); } | |||
806 | ||||
807 | static StringRef getOperationName(BinOp Op); | |||
808 | ||||
809 | static bool isFPOperation(BinOp Op) { | |||
810 | switch (Op) { | |||
811 | case AtomicRMWInst::FAdd: | |||
812 | case AtomicRMWInst::FSub: | |||
813 | return true; | |||
814 | default: | |||
815 | return false; | |||
816 | } | |||
817 | } | |||
818 | ||||
819 | void setOperation(BinOp Operation) { | |||
820 | setSubclassData<OperationField>(Operation); | |||
821 | } | |||
822 | ||||
823 | /// Return the alignment of the memory that is being allocated by the | |||
824 | /// instruction. | |||
825 | Align getAlign() const { | |||
826 | return Align(1ULL << getSubclassData<AlignmentField>()); | |||
827 | } | |||
828 | ||||
829 | void setAlignment(Align Align) { | |||
830 | setSubclassData<AlignmentField>(Log2(Align)); | |||
831 | } | |||
832 | ||||
833 | /// Return true if this is a RMW on a volatile memory location. | |||
834 | /// | |||
835 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
836 | ||||
837 | /// Specify whether this is a volatile RMW or not. | |||
838 | /// | |||
839 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
840 | ||||
841 | /// Transparently provide more efficient getOperand methods. | |||
842 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
843 | ||||
844 | /// Returns the ordering constraint of this rmw instruction. | |||
845 | AtomicOrdering getOrdering() const { | |||
846 | return getSubclassData<AtomicOrderingField>(); | |||
847 | } | |||
848 | ||||
849 | /// Sets the ordering constraint of this rmw instruction. | |||
850 | void setOrdering(AtomicOrdering Ordering) { | |||
851 | assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__ )) | |||
852 | "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__ )); | |||
853 | setSubclassData<AtomicOrderingField>(Ordering); | |||
854 | } | |||
855 | ||||
856 | /// Returns the synchronization scope ID of this rmw instruction. | |||
857 | SyncScope::ID getSyncScopeID() const { | |||
858 | return SSID; | |||
859 | } | |||
860 | ||||
861 | /// Sets the synchronization scope ID of this rmw instruction. | |||
862 | void setSyncScopeID(SyncScope::ID SSID) { | |||
863 | this->SSID = SSID; | |||
864 | } | |||
865 | ||||
866 | Value *getPointerOperand() { return getOperand(0); } | |||
867 | const Value *getPointerOperand() const { return getOperand(0); } | |||
868 | static unsigned getPointerOperandIndex() { return 0U; } | |||
869 | ||||
870 | Value *getValOperand() { return getOperand(1); } | |||
871 | const Value *getValOperand() const { return getOperand(1); } | |||
872 | ||||
873 | /// Returns the address space of the pointer operand. | |||
874 | unsigned getPointerAddressSpace() const { | |||
875 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
876 | } | |||
877 | ||||
878 | bool isFloatingPointOperation() const { | |||
879 | return isFPOperation(getOperation()); | |||
880 | } | |||
881 | ||||
882 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
883 | static bool classof(const Instruction *I) { | |||
884 | return I->getOpcode() == Instruction::AtomicRMW; | |||
885 | } | |||
886 | static bool classof(const Value *V) { | |||
887 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
888 | } | |||
889 | ||||
890 | private: | |||
891 | void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, | |||
892 | AtomicOrdering Ordering, SyncScope::ID SSID); | |||
893 | ||||
894 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
895 | // method so that subclasses cannot accidentally use it. | |||
896 | template <typename Bitfield> | |||
897 | void setSubclassData(typename Bitfield::Type Value) { | |||
898 | Instruction::setSubclassData<Bitfield>(Value); | |||
899 | } | |||
900 | ||||
901 | /// The synchronization scope ID of this rmw instruction. Not quite enough | |||
902 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
903 | /// own field. | |||
904 | SyncScope::ID SSID; | |||
905 | }; | |||
906 | ||||
907 | template <> | |||
908 | struct OperandTraits<AtomicRMWInst> | |||
909 | : public FixedNumOperandTraits<AtomicRMWInst,2> { | |||
910 | }; | |||
911 | ||||
912 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst ::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits <AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*> (this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end() { return OperandTraits<AtomicRMWInst>::op_end(this); } AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const { return OperandTraits<AtomicRMWInst>::op_end(const_cast <AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand (unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<AtomicRMWInst >::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture ].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits<AtomicRMWInst>::operands( this); } template <int Idx_nocapture> Use &AtomicRMWInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &AtomicRMWInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
913 | ||||
914 | //===----------------------------------------------------------------------===// | |||
915 | // GetElementPtrInst Class | |||
916 | //===----------------------------------------------------------------------===// | |||
917 | ||||
918 | // checkGEPType - Simple wrapper function to give a better assertion failure | |||
919 | // message on bad indexes for a gep instruction. | |||
920 | // | |||
921 | inline Type *checkGEPType(Type *Ty) { | |||
922 | assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!" ) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\"" , "llvm/include/llvm/IR/Instructions.h", 922, __extension__ __PRETTY_FUNCTION__ )); | |||
923 | return Ty; | |||
924 | } | |||
925 | ||||
926 | /// an instruction for type-safe pointer arithmetic to | |||
927 | /// access elements of arrays and structs | |||
928 | /// | |||
929 | class GetElementPtrInst : public Instruction { | |||
930 | Type *SourceElementType; | |||
931 | Type *ResultElementType; | |||
932 | ||||
933 | GetElementPtrInst(const GetElementPtrInst &GEPI); | |||
934 | ||||
935 | /// Constructors - Create a getelementptr instruction with a base pointer an | |||
936 | /// list of indices. The first ctor can optionally insert before an existing | |||
937 | /// instruction, the second appends the new instruction to the specified | |||
938 | /// BasicBlock. | |||
939 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
940 | ArrayRef<Value *> IdxList, unsigned Values, | |||
941 | const Twine &NameStr, Instruction *InsertBefore); | |||
942 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
943 | ArrayRef<Value *> IdxList, unsigned Values, | |||
944 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
945 | ||||
946 | void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); | |||
947 | ||||
948 | protected: | |||
949 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
950 | friend class Instruction; | |||
951 | ||||
952 | GetElementPtrInst *cloneImpl() const; | |||
953 | ||||
954 | public: | |||
955 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, | |||
956 | ArrayRef<Value *> IdxList, | |||
957 | const Twine &NameStr = "", | |||
958 | Instruction *InsertBefore = nullptr) { | |||
959 | unsigned Values = 1 + unsigned(IdxList.size()); | |||
960 | assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type" ) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\"" , "llvm/include/llvm/IR/Instructions.h", 960, __extension__ __PRETTY_FUNCTION__ )); | |||
961 | assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__ )) | |||
962 | ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__ )); | |||
963 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, | |||
964 | NameStr, InsertBefore); | |||
965 | } | |||
966 | ||||
967 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, | |||
968 | ArrayRef<Value *> IdxList, | |||
969 | const Twine &NameStr, | |||
970 | BasicBlock *InsertAtEnd) { | |||
971 | unsigned Values = 1 + unsigned(IdxList.size()); | |||
972 | assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type" ) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\"" , "llvm/include/llvm/IR/Instructions.h", 972, __extension__ __PRETTY_FUNCTION__ )); | |||
973 | assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__ )) | |||
974 | ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__ )); | |||
975 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, | |||
976 | NameStr, InsertAtEnd); | |||
977 | } | |||
978 | ||||
979 | /// Create an "inbounds" getelementptr. See the documentation for the | |||
980 | /// "inbounds" flag in LangRef.html for details. | |||
981 | static GetElementPtrInst * | |||
982 | CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, | |||
983 | const Twine &NameStr = "", | |||
984 | Instruction *InsertBefore = nullptr) { | |||
985 | GetElementPtrInst *GEP = | |||
986 | Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); | |||
987 | GEP->setIsInBounds(true); | |||
988 | return GEP; | |||
989 | } | |||
990 | ||||
991 | static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, | |||
992 | ArrayRef<Value *> IdxList, | |||
993 | const Twine &NameStr, | |||
994 | BasicBlock *InsertAtEnd) { | |||
995 | GetElementPtrInst *GEP = | |||
996 | Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); | |||
997 | GEP->setIsInBounds(true); | |||
998 | return GEP; | |||
999 | } | |||
1000 | ||||
1001 | /// Transparently provide more efficient getOperand methods. | |||
1002 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1003 | ||||
1004 | Type *getSourceElementType() const { return SourceElementType; } | |||
1005 | ||||
1006 | void setSourceElementType(Type *Ty) { SourceElementType = Ty; } | |||
1007 | void setResultElementType(Type *Ty) { ResultElementType = Ty; } | |||
1008 | ||||
1009 | Type *getResultElementType() const { | |||
1010 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__ )) | |||
1011 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__ )); | |||
1012 | return ResultElementType; | |||
1013 | } | |||
1014 | ||||
1015 | /// Returns the address space of this instruction's pointer type. | |||
1016 | unsigned getAddressSpace() const { | |||
1017 | // Note that this is always the same as the pointer operand's address space | |||
1018 | // and that is cheaper to compute, so cheat here. | |||
1019 | return getPointerAddressSpace(); | |||
1020 | } | |||
1021 | ||||
1022 | /// Returns the result type of a getelementptr with the given source | |||
1023 | /// element type and indexes. | |||
1024 | /// | |||
1025 | /// Null is returned if the indices are invalid for the specified | |||
1026 | /// source element type. | |||
1027 | static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); | |||
1028 | static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); | |||
1029 | static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); | |||
1030 | ||||
1031 | /// Return the type of the element at the given index of an indexable | |||
1032 | /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". | |||
1033 | /// | |||
1034 | /// Returns null if the type can't be indexed, or the given index is not | |||
1035 | /// legal for the given type. | |||
1036 | static Type *getTypeAtIndex(Type *Ty, Value *Idx); | |||
1037 | static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); | |||
1038 | ||||
1039 | inline op_iterator idx_begin() { return op_begin()+1; } | |||
1040 | inline const_op_iterator idx_begin() const { return op_begin()+1; } | |||
1041 | inline op_iterator idx_end() { return op_end(); } | |||
1042 | inline const_op_iterator idx_end() const { return op_end(); } | |||
1043 | ||||
1044 | inline iterator_range<op_iterator> indices() { | |||
1045 | return make_range(idx_begin(), idx_end()); | |||
1046 | } | |||
1047 | ||||
1048 | inline iterator_range<const_op_iterator> indices() const { | |||
1049 | return make_range(idx_begin(), idx_end()); | |||
1050 | } | |||
1051 | ||||
1052 | Value *getPointerOperand() { | |||
1053 | return getOperand(0); | |||
1054 | } | |||
1055 | const Value *getPointerOperand() const { | |||
1056 | return getOperand(0); | |||
1057 | } | |||
1058 | static unsigned getPointerOperandIndex() { | |||
1059 | return 0U; // get index for modifying correct operand. | |||
1060 | } | |||
1061 | ||||
1062 | /// Method to return the pointer operand as a | |||
1063 | /// PointerType. | |||
1064 | Type *getPointerOperandType() const { | |||
1065 | return getPointerOperand()->getType(); | |||
1066 | } | |||
1067 | ||||
1068 | /// Returns the address space of the pointer operand. | |||
1069 | unsigned getPointerAddressSpace() const { | |||
1070 | return getPointerOperandType()->getPointerAddressSpace(); | |||
1071 | } | |||
1072 | ||||
1073 | /// Returns the pointer type returned by the GEP | |||
1074 | /// instruction, which may be a vector of pointers. | |||
1075 | static Type *getGEPReturnType(Type *ElTy, Value *Ptr, | |||
1076 | ArrayRef<Value *> IdxList) { | |||
1077 | PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); | |||
1078 | unsigned AddrSpace = OrigPtrTy->getAddressSpace(); | |||
1079 | Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList)); | |||
1080 | Type *PtrTy = OrigPtrTy->isOpaque() | |||
1081 | ? PointerType::get(OrigPtrTy->getContext(), AddrSpace) | |||
1082 | : PointerType::get(ResultElemTy, AddrSpace); | |||
1083 | // Vector GEP | |||
1084 | if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { | |||
1085 | ElementCount EltCount = PtrVTy->getElementCount(); | |||
1086 | return VectorType::get(PtrTy, EltCount); | |||
1087 | } | |||
1088 | for (Value *Index : IdxList) | |||
1089 | if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { | |||
1090 | ElementCount EltCount = IndexVTy->getElementCount(); | |||
1091 | return VectorType::get(PtrTy, EltCount); | |||
1092 | } | |||
1093 | // Scalar GEP | |||
1094 | return PtrTy; | |||
1095 | } | |||
1096 | ||||
1097 | unsigned getNumIndices() const { // Note: always non-negative | |||
1098 | return getNumOperands() - 1; | |||
1099 | } | |||
1100 | ||||
1101 | bool hasIndices() const { | |||
1102 | return getNumOperands() > 1; | |||
1103 | } | |||
1104 | ||||
1105 | /// Return true if all of the indices of this GEP are | |||
1106 | /// zeros. If so, the result pointer and the first operand have the same | |||
1107 | /// value, just potentially different types. | |||
1108 | bool hasAllZeroIndices() const; | |||
1109 | ||||
1110 | /// Return true if all of the indices of this GEP are | |||
1111 | /// constant integers. If so, the result pointer and the first operand have | |||
1112 | /// a constant offset between them. | |||
1113 | bool hasAllConstantIndices() const; | |||
1114 | ||||
1115 | /// Set or clear the inbounds flag on this GEP instruction. | |||
1116 | /// See LangRef.html for the meaning of inbounds on a getelementptr. | |||
1117 | void setIsInBounds(bool b = true); | |||
1118 | ||||
1119 | /// Determine whether the GEP has the inbounds flag. | |||
1120 | bool isInBounds() const; | |||
1121 | ||||
1122 | /// Accumulate the constant address offset of this GEP if possible. | |||
1123 | /// | |||
1124 | /// This routine accepts an APInt into which it will accumulate the constant | |||
1125 | /// offset of this GEP if the GEP is in fact constant. If the GEP is not | |||
1126 | /// all-constant, it returns false and the value of the offset APInt is | |||
1127 | /// undefined (it is *not* preserved!). The APInt passed into this routine | |||
1128 | /// must be at least as wide as the IntPtr type for the address space of | |||
1129 | /// the base GEP pointer. | |||
1130 | bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; | |||
1131 | bool collectOffset(const DataLayout &DL, unsigned BitWidth, | |||
1132 | MapVector<Value *, APInt> &VariableOffsets, | |||
1133 | APInt &ConstantOffset) const; | |||
1134 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1135 | static bool classof(const Instruction *I) { | |||
1136 | return (I->getOpcode() == Instruction::GetElementPtr); | |||
1137 | } | |||
1138 | static bool classof(const Value *V) { | |||
1139 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1140 | } | |||
1141 | }; | |||
1142 | ||||
1143 | template <> | |||
1144 | struct OperandTraits<GetElementPtrInst> : | |||
1145 | public VariadicOperandTraits<GetElementPtrInst, 1> { | |||
1146 | }; | |||
1147 | ||||
1148 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
1149 | ArrayRef<Value *> IdxList, unsigned Values, | |||
1150 | const Twine &NameStr, | |||
1151 | Instruction *InsertBefore) | |||
1152 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, | |||
1153 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, | |||
1154 | Values, InsertBefore), | |||
1155 | SourceElementType(PointeeType), | |||
1156 | ResultElementType(getIndexedType(PointeeType, IdxList)) { | |||
1157 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__ )) | |||
1158 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__ )); | |||
1159 | init(Ptr, IdxList, NameStr); | |||
1160 | } | |||
1161 | ||||
1162 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
1163 | ArrayRef<Value *> IdxList, unsigned Values, | |||
1164 | const Twine &NameStr, | |||
1165 | BasicBlock *InsertAtEnd) | |||
1166 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, | |||
1167 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, | |||
1168 | Values, InsertAtEnd), | |||
1169 | SourceElementType(PointeeType), | |||
1170 | ResultElementType(getIndexedType(PointeeType, IdxList)) { | |||
1171 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__ )) | |||
1172 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__ )); | |||
1173 | init(Ptr, IdxList, NameStr); | |||
1174 | } | |||
1175 | ||||
1176 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() { return OperandTraits<GetElementPtrInst>::op_begin(this ); } GetElementPtrInst::const_op_iterator GetElementPtrInst:: op_begin() const { return OperandTraits<GetElementPtrInst> ::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst ::op_iterator GetElementPtrInst::op_end() { return OperandTraits <GetElementPtrInst>::op_end(this); } GetElementPtrInst:: const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits <GetElementPtrInst>::op_end(const_cast<GetElementPtrInst *>(this)); } Value *GetElementPtrInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<GetElementPtrInst >::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture ].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands () const { return OperandTraits<GetElementPtrInst>::operands (this); } template <int Idx_nocapture> Use &GetElementPtrInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &GetElementPtrInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
1177 | ||||
1178 | //===----------------------------------------------------------------------===// | |||
1179 | // ICmpInst Class | |||
1180 | //===----------------------------------------------------------------------===// | |||
1181 | ||||
1182 | /// This instruction compares its operands according to the predicate given | |||
1183 | /// to the constructor. It only operates on integers or pointers. The operands | |||
1184 | /// must be identical types. | |||
1185 | /// Represent an integer comparison operator. | |||
1186 | class ICmpInst: public CmpInst { | |||
1187 | void AssertOK() { | |||
1188 | assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value" ) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__ )) | |||
1189 | "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value" ) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__ )); | |||
1190 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__ )) | |||
1191 | "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__ )); | |||
1192 | // Check that the operands are the right type | |||
1193 | assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__ )) | |||
1194 | getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__ )) | |||
1195 | "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__ )); | |||
1196 | } | |||
1197 | ||||
1198 | protected: | |||
1199 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1200 | friend class Instruction; | |||
1201 | ||||
1202 | /// Clone an identical ICmpInst | |||
1203 | ICmpInst *cloneImpl() const; | |||
1204 | ||||
1205 | public: | |||
1206 | /// Constructor with insert-before-instruction semantics. | |||
1207 | ICmpInst( | |||
1208 | Instruction *InsertBefore, ///< Where to insert | |||
1209 | Predicate pred, ///< The predicate to use for the comparison | |||
1210 | Value *LHS, ///< The left-hand-side of the expression | |||
1211 | Value *RHS, ///< The right-hand-side of the expression | |||
1212 | const Twine &NameStr = "" ///< Name of the instruction | |||
1213 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1214 | Instruction::ICmp, pred, LHS, RHS, NameStr, | |||
1215 | InsertBefore) { | |||
1216 | #ifndef NDEBUG | |||
1217 | AssertOK(); | |||
1218 | #endif | |||
1219 | } | |||
1220 | ||||
1221 | /// Constructor with insert-at-end semantics. | |||
1222 | ICmpInst( | |||
1223 | BasicBlock &InsertAtEnd, ///< Block to insert into. | |||
1224 | Predicate pred, ///< The predicate to use for the comparison | |||
1225 | Value *LHS, ///< The left-hand-side of the expression | |||
1226 | Value *RHS, ///< The right-hand-side of the expression | |||
1227 | const Twine &NameStr = "" ///< Name of the instruction | |||
1228 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1229 | Instruction::ICmp, pred, LHS, RHS, NameStr, | |||
1230 | &InsertAtEnd) { | |||
1231 | #ifndef NDEBUG | |||
1232 | AssertOK(); | |||
1233 | #endif | |||
1234 | } | |||
1235 | ||||
1236 | /// Constructor with no-insertion semantics | |||
1237 | ICmpInst( | |||
1238 | Predicate pred, ///< The predicate to use for the comparison | |||
1239 | Value *LHS, ///< The left-hand-side of the expression | |||
1240 | Value *RHS, ///< The right-hand-side of the expression | |||
1241 | const Twine &NameStr = "" ///< Name of the instruction | |||
1242 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
| ||||
1243 | Instruction::ICmp, pred, LHS, RHS, NameStr) { | |||
1244 | #ifndef NDEBUG | |||
1245 | AssertOK(); | |||
1246 | #endif | |||
1247 | } | |||
1248 | ||||
1249 | /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. | |||
1250 | /// @returns the predicate that would be the result if the operand were | |||
1251 | /// regarded as signed. | |||
1252 | /// Return the signed version of the predicate | |||
1253 | Predicate getSignedPredicate() const { | |||
1254 | return getSignedPredicate(getPredicate()); | |||
1255 | } | |||
1256 | ||||
1257 | /// This is a static version that you can use without an instruction. | |||
1258 | /// Return the signed version of the predicate. | |||
1259 | static Predicate getSignedPredicate(Predicate pred); | |||
1260 | ||||
1261 | /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. | |||
1262 | /// @returns the predicate that would be the result if the operand were | |||
1263 | /// regarded as unsigned. | |||
1264 | /// Return the unsigned version of the predicate | |||
1265 | Predicate getUnsignedPredicate() const { | |||
1266 | return getUnsignedPredicate(getPredicate()); | |||
1267 | } | |||
1268 | ||||
1269 | /// This is a static version that you can use without an instruction. | |||
1270 | /// Return the unsigned version of the predicate. | |||
1271 | static Predicate getUnsignedPredicate(Predicate pred); | |||
1272 | ||||
1273 | /// Return true if this predicate is either EQ or NE. This also | |||
1274 | /// tests for commutativity. | |||
1275 | static bool isEquality(Predicate P) { | |||
1276 | return P == ICMP_EQ || P == ICMP_NE; | |||
1277 | } | |||
1278 | ||||
1279 | /// Return true if this predicate is either EQ or NE. This also | |||
1280 | /// tests for commutativity. | |||
1281 | bool isEquality() const { | |||
1282 | return isEquality(getPredicate()); | |||
1283 | } | |||
1284 | ||||
1285 | /// @returns true if the predicate of this ICmpInst is commutative | |||
1286 | /// Determine if this relation is commutative. | |||
1287 | bool isCommutative() const { return isEquality(); } | |||
1288 | ||||
1289 | /// Return true if the predicate is relational (not EQ or NE). | |||
1290 | /// | |||
1291 | bool isRelational() const { | |||
1292 | return !isEquality(); | |||
1293 | } | |||
1294 | ||||
1295 | /// Return true if the predicate is relational (not EQ or NE). | |||
1296 | /// | |||
1297 | static bool isRelational(Predicate P) { | |||
1298 | return !isEquality(P); | |||
1299 | } | |||
1300 | ||||
1301 | /// Return true if the predicate is SGT or UGT. | |||
1302 | /// | |||
1303 | static bool isGT(Predicate P) { | |||
1304 | return P == ICMP_SGT || P == ICMP_UGT; | |||
1305 | } | |||
1306 | ||||
1307 | /// Return true if the predicate is SLT or ULT. | |||
1308 | /// | |||
1309 | static bool isLT(Predicate P) { | |||
1310 | return P == ICMP_SLT || P == ICMP_ULT; | |||
1311 | } | |||
1312 | ||||
1313 | /// Return true if the predicate is SGE or UGE. | |||
1314 | /// | |||
1315 | static bool isGE(Predicate P) { | |||
1316 | return P == ICMP_SGE || P == ICMP_UGE; | |||
1317 | } | |||
1318 | ||||
1319 | /// Return true if the predicate is SLE or ULE. | |||
1320 | /// | |||
1321 | static bool isLE(Predicate P) { | |||
1322 | return P == ICMP_SLE || P == ICMP_ULE; | |||
1323 | } | |||
1324 | ||||
1325 | /// Returns the sequence of all ICmp predicates. | |||
1326 | /// | |||
1327 | static auto predicates() { return ICmpPredicates(); } | |||
1328 | ||||
1329 | /// Exchange the two operands to this instruction in such a way that it does | |||
1330 | /// not modify the semantics of the instruction. The predicate value may be | |||
1331 | /// changed to retain the same result if the predicate is order dependent | |||
1332 | /// (e.g. ult). | |||
1333 | /// Swap operands and adjust predicate. | |||
1334 | void swapOperands() { | |||
1335 | setPredicate(getSwappedPredicate()); | |||
1336 | Op<0>().swap(Op<1>()); | |||
1337 | } | |||
1338 | ||||
1339 | /// Return result of `LHS Pred RHS` comparison. | |||
1340 | static bool compare(const APInt &LHS, const APInt &RHS, | |||
1341 | ICmpInst::Predicate Pred); | |||
1342 | ||||
1343 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1344 | static bool classof(const Instruction *I) { | |||
1345 | return I->getOpcode() == Instruction::ICmp; | |||
1346 | } | |||
1347 | static bool classof(const Value *V) { | |||
1348 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1349 | } | |||
1350 | }; | |||
1351 | ||||
1352 | //===----------------------------------------------------------------------===// | |||
1353 | // FCmpInst Class | |||
1354 | //===----------------------------------------------------------------------===// | |||
1355 | ||||
1356 | /// This instruction compares its operands according to the predicate given | |||
1357 | /// to the constructor. It only operates on floating point values or packed | |||
1358 | /// vectors of floating point values. The operands must be identical types. | |||
1359 | /// Represents a floating point comparison operator. | |||
1360 | class FCmpInst: public CmpInst { | |||
1361 | void AssertOK() { | |||
1362 | assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value" ) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1362, __extension__ __PRETTY_FUNCTION__ )); | |||
1363 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__ )) | |||
1364 | "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__ )); | |||
1365 | // Check that the operands are the right type | |||
1366 | assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy () && "Invalid operand types for FCmp instruction") ? void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__ )) | |||
1367 | "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy () && "Invalid operand types for FCmp instruction") ? void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__ )); | |||
1368 | } | |||
1369 | ||||
1370 | protected: | |||
1371 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1372 | friend class Instruction; | |||
1373 | ||||
1374 | /// Clone an identical FCmpInst | |||
1375 | FCmpInst *cloneImpl() const; | |||
1376 | ||||
1377 | public: | |||
1378 | /// Constructor with insert-before-instruction semantics. | |||
1379 | FCmpInst( | |||
1380 | Instruction *InsertBefore, ///< Where to insert | |||
1381 | Predicate pred, ///< The predicate to use for the comparison | |||
1382 | Value *LHS, ///< The left-hand-side of the expression | |||
1383 | Value *RHS, ///< The right-hand-side of the expression | |||
1384 | const Twine &NameStr = "" ///< Name of the instruction | |||
1385 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1386 | Instruction::FCmp, pred, LHS, RHS, NameStr, | |||
1387 | InsertBefore) { | |||
1388 | AssertOK(); | |||
1389 | } | |||
1390 | ||||
1391 | /// Constructor with insert-at-end semantics. | |||
1392 | FCmpInst( | |||
1393 | BasicBlock &InsertAtEnd, ///< Block to insert into. | |||
1394 | Predicate pred, ///< The predicate to use for the comparison | |||
1395 | Value *LHS, ///< The left-hand-side of the expression | |||
1396 | Value *RHS, ///< The right-hand-side of the expression | |||
1397 | const Twine &NameStr = "" ///< Name of the instruction | |||
1398 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1399 | Instruction::FCmp, pred, LHS, RHS, NameStr, | |||
1400 | &InsertAtEnd) { | |||
1401 | AssertOK(); | |||
1402 | } | |||
1403 | ||||
1404 | /// Constructor with no-insertion semantics | |||
1405 | FCmpInst( | |||
1406 | Predicate Pred, ///< The predicate to use for the comparison | |||
1407 | Value *LHS, ///< The left-hand-side of the expression | |||
1408 | Value *RHS, ///< The right-hand-side of the expression | |||
1409 | const Twine &NameStr = "", ///< Name of the instruction | |||
1410 | Instruction *FlagsSource = nullptr | |||
1411 | ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, | |||
1412 | RHS, NameStr, nullptr, FlagsSource) { | |||
1413 | AssertOK(); | |||
1414 | } | |||
1415 | ||||
1416 | /// @returns true if the predicate of this instruction is EQ or NE. | |||
1417 | /// Determine if this is an equality predicate. | |||
1418 | static bool isEquality(Predicate Pred) { | |||
1419 | return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || | |||
1420 | Pred == FCMP_UNE; | |||
1421 | } | |||
1422 | ||||
1423 | /// @returns true if the predicate of this instruction is EQ or NE. | |||
1424 | /// Determine if this is an equality predicate. | |||
1425 | bool isEquality() const { return isEquality(getPredicate()); } | |||
1426 | ||||
1427 | /// @returns true if the predicate of this instruction is commutative. | |||
1428 | /// Determine if this is a commutative predicate. | |||
1429 | bool isCommutative() const { | |||
1430 | return isEquality() || | |||
1431 | getPredicate() == FCMP_FALSE || | |||
1432 | getPredicate() == FCMP_TRUE || | |||
1433 | getPredicate() == FCMP_ORD || | |||
1434 | getPredicate() == FCMP_UNO; | |||
1435 | } | |||
1436 | ||||
1437 | /// @returns true if the predicate is relational (not EQ or NE). | |||
1438 | /// Determine if this a relational predicate. | |||
1439 | bool isRelational() const { return !isEquality(); } | |||
1440 | ||||
1441 | /// Exchange the two operands to this instruction in such a way that it does | |||
1442 | /// not modify the semantics of the instruction. The predicate value may be | |||
1443 | /// changed to retain the same result if the predicate is order dependent | |||
1444 | /// (e.g. ult). | |||
1445 | /// Swap operands and adjust predicate. | |||
1446 | void swapOperands() { | |||
1447 | setPredicate(getSwappedPredicate()); | |||
1448 | Op<0>().swap(Op<1>()); | |||
1449 | } | |||
1450 | ||||
1451 | /// Returns the sequence of all FCmp predicates. | |||
1452 | /// | |||
1453 | static auto predicates() { return FCmpPredicates(); } | |||
1454 | ||||
1455 | /// Return result of `LHS Pred RHS` comparison. | |||
1456 | static bool compare(const APFloat &LHS, const APFloat &RHS, | |||
1457 | FCmpInst::Predicate Pred); | |||
1458 | ||||
1459 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1460 | static bool classof(const Instruction *I) { | |||
1461 | return I->getOpcode() == Instruction::FCmp; | |||
1462 | } | |||
1463 | static bool classof(const Value *V) { | |||
1464 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1465 | } | |||
1466 | }; | |||
1467 | ||||
1468 | //===----------------------------------------------------------------------===// | |||
1469 | /// This class represents a function call, abstracting a target | |||
1470 | /// machine's calling convention. This class uses low bit of the SubClassData | |||
1471 | /// field to indicate whether or not this is a tail call. The rest of the bits | |||
1472 | /// hold the calling convention of the call. | |||
1473 | /// | |||
1474 | class CallInst : public CallBase { | |||
1475 | CallInst(const CallInst &CI); | |||
1476 | ||||
1477 | /// Construct a CallInst given a range of arguments. | |||
1478 | /// Construct a CallInst from a range of arguments | |||
1479 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1480 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1481 | Instruction *InsertBefore); | |||
1482 | ||||
1483 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1484 | const Twine &NameStr, Instruction *InsertBefore) | |||
1485 | : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {} | |||
1486 | ||||
1487 | /// Construct a CallInst given a range of arguments. | |||
1488 | /// Construct a CallInst from a range of arguments | |||
1489 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1490 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1491 | BasicBlock *InsertAtEnd); | |||
1492 | ||||
1493 | explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, | |||
1494 | Instruction *InsertBefore); | |||
1495 | ||||
1496 | CallInst(FunctionType *ty, Value *F, const Twine &NameStr, | |||
1497 | BasicBlock *InsertAtEnd); | |||
1498 | ||||
1499 | void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, | |||
1500 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); | |||
1501 | void init(FunctionType *FTy, Value *Func, const Twine &NameStr); | |||
1502 | ||||
1503 | /// Compute the number of operands to allocate. | |||
1504 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { | |||
1505 | // We need one operand for the called function, plus the input operand | |||
1506 | // counts provided. | |||
1507 | return 1 + NumArgs + NumBundleInputs; | |||
1508 | } | |||
1509 | ||||
1510 | protected: | |||
1511 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1512 | friend class Instruction; | |||
1513 | ||||
1514 | CallInst *cloneImpl() const; | |||
1515 | ||||
1516 | public: | |||
1517 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", | |||
1518 | Instruction *InsertBefore = nullptr) { | |||
1519 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); | |||
1520 | } | |||
1521 | ||||
1522 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1523 | const Twine &NameStr, | |||
1524 | Instruction *InsertBefore = nullptr) { | |||
1525 | return new (ComputeNumOperands(Args.size())) | |||
1526 | CallInst(Ty, Func, Args, None, NameStr, InsertBefore); | |||
1527 | } | |||
1528 | ||||
1529 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1530 | ArrayRef<OperandBundleDef> Bundles = None, | |||
1531 | const Twine &NameStr = "", | |||
1532 | Instruction *InsertBefore = nullptr) { | |||
1533 | const int NumOperands = | |||
1534 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
1535 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
1536 | ||||
1537 | return new (NumOperands, DescriptorBytes) | |||
1538 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); | |||
1539 | } | |||
1540 | ||||
1541 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, | |||
1542 | BasicBlock *InsertAtEnd) { | |||
1543 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); | |||
1544 | } | |||
1545 | ||||
1546 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1547 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1548 | return new (ComputeNumOperands(Args.size())) | |||
1549 | CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd); | |||
1550 | } | |||
1551 | ||||
1552 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1553 | ArrayRef<OperandBundleDef> Bundles, | |||
1554 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1555 | const int NumOperands = | |||
1556 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
1557 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
1558 | ||||
1559 | return new (NumOperands, DescriptorBytes) | |||
1560 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); | |||
1561 | } | |||
1562 | ||||
1563 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", | |||
1564 | Instruction *InsertBefore = nullptr) { | |||
1565 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, | |||
1566 | InsertBefore); | |||
1567 | } | |||
1568 | ||||
1569 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1570 | ArrayRef<OperandBundleDef> Bundles = None, | |||
1571 | const Twine &NameStr = "", | |||
1572 | Instruction *InsertBefore = nullptr) { | |||
1573 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, | |||
1574 | NameStr, InsertBefore); | |||
1575 | } | |||
1576 | ||||
1577 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1578 | const Twine &NameStr, | |||
1579 | Instruction *InsertBefore = nullptr) { | |||
1580 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, | |||
1581 | InsertBefore); | |||
1582 | } | |||
1583 | ||||
1584 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr, | |||
1585 | BasicBlock *InsertAtEnd) { | |||
1586 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, | |||
1587 | InsertAtEnd); | |||
1588 | } | |||
1589 | ||||
1590 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1591 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1592 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, | |||
1593 | InsertAtEnd); | |||
1594 | } | |||
1595 | ||||
1596 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1597 | ArrayRef<OperandBundleDef> Bundles, | |||
1598 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1599 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, | |||
1600 | NameStr, InsertAtEnd); | |||
1601 | } | |||
1602 | ||||
1603 | /// Create a clone of \p CI with a different set of operand bundles and | |||
1604 | /// insert it before \p InsertPt. | |||
1605 | /// | |||
1606 | /// The returned call instruction is identical \p CI in every way except that | |||
1607 | /// the operand bundles for the new instruction are set to the operand bundles | |||
1608 | /// in \p Bundles. | |||
1609 | static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, | |||
1610 | Instruction *InsertPt = nullptr); | |||
1611 | ||||
1612 | /// Generate the IR for a call to malloc: | |||
1613 | /// 1. Compute the malloc call's argument as the specified type's size, | |||
1614 | /// possibly multiplied by the array size if the array size is not | |||
1615 | /// constant 1. | |||
1616 | /// 2. Call malloc with that argument. | |||
1617 | /// 3. Bitcast the result of the malloc call to the specified type. | |||
1618 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, | |||
1619 | Type *AllocTy, Value *AllocSize, | |||
1620 | Value *ArraySize = nullptr, | |||
1621 | Function *MallocF = nullptr, | |||
1622 | const Twine &Name = ""); | |||
1623 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, | |||
1624 | Type *AllocTy, Value *AllocSize, | |||
1625 | Value *ArraySize = nullptr, | |||
1626 | Function *MallocF = nullptr, | |||
1627 | const Twine &Name = ""); | |||
1628 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, | |||
1629 | Type *AllocTy, Value *AllocSize, | |||
1630 | Value *ArraySize = nullptr, | |||
1631 | ArrayRef<OperandBundleDef> Bundles = None, | |||
1632 | Function *MallocF = nullptr, | |||
1633 | const Twine &Name = ""); | |||
1634 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, | |||
1635 | Type *AllocTy, Value *AllocSize, | |||
1636 | Value *ArraySize = nullptr, | |||
1637 | ArrayRef<OperandBundleDef> Bundles = None, | |||
1638 | Function *MallocF = nullptr, | |||
1639 | const Twine &Name = ""); | |||
1640 | /// Generate the IR for a call to the builtin free function. | |||
1641 | static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); | |||
1642 | static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); | |||
1643 | static Instruction *CreateFree(Value *Source, | |||
1644 | ArrayRef<OperandBundleDef> Bundles, | |||
1645 | Instruction *InsertBefore); | |||
1646 | static Instruction *CreateFree(Value *Source, | |||
1647 | ArrayRef<OperandBundleDef> Bundles, | |||
1648 | BasicBlock *InsertAtEnd); | |||
1649 | ||||
1650 | // Note that 'musttail' implies 'tail'. | |||
1651 | enum TailCallKind : unsigned { | |||
1652 | TCK_None = 0, | |||
1653 | TCK_Tail = 1, | |||
1654 | TCK_MustTail = 2, | |||
1655 | TCK_NoTail = 3, | |||
1656 | TCK_LAST = TCK_NoTail | |||
1657 | }; | |||
1658 | ||||
1659 | using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; | |||
1660 | static_assert( | |||
1661 | Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), | |||
1662 | "Bitfields must be contiguous"); | |||
1663 | ||||
1664 | TailCallKind getTailCallKind() const { | |||
1665 | return getSubclassData<TailCallKindField>(); | |||
1666 | } | |||
1667 | ||||
1668 | bool isTailCall() const { | |||
1669 | TailCallKind Kind = getTailCallKind(); | |||
1670 | return Kind == TCK_Tail || Kind == TCK_MustTail; | |||
1671 | } | |||
1672 | ||||
1673 | bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } | |||
1674 | ||||
1675 | bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } | |||
1676 | ||||
1677 | void setTailCallKind(TailCallKind TCK) { | |||
1678 | setSubclassData<TailCallKindField>(TCK); | |||
1679 | } | |||
1680 | ||||
1681 | void setTailCall(bool IsTc = true) { | |||
1682 | setTailCallKind(IsTc ? TCK_Tail : TCK_None); | |||
1683 | } | |||
1684 | ||||
1685 | /// Return true if the call can return twice | |||
1686 | bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } | |||
1687 | void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } | |||
1688 | ||||
1689 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1690 | static bool classof(const Instruction *I) { | |||
1691 | return I->getOpcode() == Instruction::Call; | |||
1692 | } | |||
1693 | static bool classof(const Value *V) { | |||
1694 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1695 | } | |||
1696 | ||||
1697 | /// Updates profile metadata by scaling it by \p S / \p T. | |||
1698 | void updateProfWeight(uint64_t S, uint64_t T); | |||
1699 | ||||
1700 | private: | |||
1701 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
1702 | // method so that subclasses cannot accidentally use it. | |||
1703 | template <typename Bitfield> | |||
1704 | void setSubclassData(typename Bitfield::Type Value) { | |||
1705 | Instruction::setSubclassData<Bitfield>(Value); | |||
1706 | } | |||
1707 | }; | |||
1708 | ||||
1709 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1710 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1711 | BasicBlock *InsertAtEnd) | |||
1712 | : CallBase(Ty->getReturnType(), Instruction::Call, | |||
1713 | OperandTraits<CallBase>::op_end(this) - | |||
1714 | (Args.size() + CountBundleInputs(Bundles) + 1), | |||
1715 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), | |||
1716 | InsertAtEnd) { | |||
1717 | init(Ty, Func, Args, Bundles, NameStr); | |||
1718 | } | |||
1719 | ||||
1720 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1721 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1722 | Instruction *InsertBefore) | |||
1723 | : CallBase(Ty->getReturnType(), Instruction::Call, | |||
1724 | OperandTraits<CallBase>::op_end(this) - | |||
1725 | (Args.size() + CountBundleInputs(Bundles) + 1), | |||
1726 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), | |||
1727 | InsertBefore) { | |||
1728 | init(Ty, Func, Args, Bundles, NameStr); | |||
1729 | } | |||
1730 | ||||
1731 | //===----------------------------------------------------------------------===// | |||
1732 | // SelectInst Class | |||
1733 | //===----------------------------------------------------------------------===// | |||
1734 | ||||
1735 | /// This class represents the LLVM 'select' instruction. | |||
1736 | /// | |||
1737 | class SelectInst : public Instruction { | |||
1738 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, | |||
1739 | Instruction *InsertBefore) | |||
1740 | : Instruction(S1->getType(), Instruction::Select, | |||
1741 | &Op<0>(), 3, InsertBefore) { | |||
1742 | init(C, S1, S2); | |||
1743 | setName(NameStr); | |||
1744 | } | |||
1745 | ||||
1746 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, | |||
1747 | BasicBlock *InsertAtEnd) | |||
1748 | : Instruction(S1->getType(), Instruction::Select, | |||
1749 | &Op<0>(), 3, InsertAtEnd) { | |||
1750 | init(C, S1, S2); | |||
1751 | setName(NameStr); | |||
1752 | } | |||
1753 | ||||
1754 | void init(Value *C, Value *S1, Value *S2) { | |||
1755 | assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) && "Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\"" , "llvm/include/llvm/IR/Instructions.h", 1755, __extension__ __PRETTY_FUNCTION__ )); | |||
1756 | Op<0>() = C; | |||
1757 | Op<1>() = S1; | |||
1758 | Op<2>() = S2; | |||
1759 | } | |||
1760 | ||||
1761 | protected: | |||
1762 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1763 | friend class Instruction; | |||
1764 | ||||
1765 | SelectInst *cloneImpl() const; | |||
1766 | ||||
1767 | public: | |||
1768 | static SelectInst *Create(Value *C, Value *S1, Value *S2, | |||
1769 | const Twine &NameStr = "", | |||
1770 | Instruction *InsertBefore = nullptr, | |||
1771 | Instruction *MDFrom = nullptr) { | |||
1772 | SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); | |||
1773 | if (MDFrom) | |||
1774 | Sel->copyMetadata(*MDFrom); | |||
1775 | return Sel; | |||
1776 | } | |||
1777 | ||||
1778 | static SelectInst *Create(Value *C, Value *S1, Value *S2, | |||
1779 | const Twine &NameStr, | |||
1780 | BasicBlock *InsertAtEnd) { | |||
1781 | return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); | |||
1782 | } | |||
1783 | ||||
1784 | const Value *getCondition() const { return Op<0>(); } | |||
1785 | const Value *getTrueValue() const { return Op<1>(); } | |||
1786 | const Value *getFalseValue() const { return Op<2>(); } | |||
1787 | Value *getCondition() { return Op<0>(); } | |||
1788 | Value *getTrueValue() { return Op<1>(); } | |||
1789 | Value *getFalseValue() { return Op<2>(); } | |||
1790 | ||||
1791 | void setCondition(Value *V) { Op<0>() = V; } | |||
1792 | void setTrueValue(Value *V) { Op<1>() = V; } | |||
1793 | void setFalseValue(Value *V) { Op<2>() = V; } | |||
1794 | ||||
1795 | /// Swap the true and false values of the select instruction. | |||
1796 | /// This doesn't swap prof metadata. | |||
1797 | void swapValues() { Op<1>().swap(Op<2>()); } | |||
1798 | ||||
1799 | /// Return a string if the specified operands are invalid | |||
1800 | /// for a select operation, otherwise return null. | |||
1801 | static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); | |||
1802 | ||||
1803 | /// Transparently provide more efficient getOperand methods. | |||
1804 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1805 | ||||
1806 | OtherOps getOpcode() const { | |||
1807 | return static_cast<OtherOps>(Instruction::getOpcode()); | |||
1808 | } | |||
1809 | ||||
1810 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1811 | static bool classof(const Instruction *I) { | |||
1812 | return I->getOpcode() == Instruction::Select; | |||
1813 | } | |||
1814 | static bool classof(const Value *V) { | |||
1815 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1816 | } | |||
1817 | }; | |||
1818 | ||||
1819 | template <> | |||
1820 | struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { | |||
1821 | }; | |||
1822 | ||||
1823 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits <SelectInst>::op_begin(this); } SelectInst::const_op_iterator SelectInst::op_begin() const { return OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this)); } SelectInst ::op_iterator SelectInst::op_end() { return OperandTraits< SelectInst>::op_end(this); } SelectInst::const_op_iterator SelectInst::op_end() const { return OperandTraits<SelectInst >::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<SelectInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this))[i_nocapture ].get()); } void SelectInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<SelectInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned SelectInst::getNumOperands() const { return OperandTraits<SelectInst>::operands(this); } template <int Idx_nocapture> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &SelectInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
1824 | ||||
1825 | //===----------------------------------------------------------------------===// | |||
1826 | // VAArgInst Class | |||
1827 | //===----------------------------------------------------------------------===// | |||
1828 | ||||
1829 | /// This class represents the va_arg llvm instruction, which returns | |||
1830 | /// an argument of the specified type given a va_list and increments that list | |||
1831 | /// | |||
1832 | class VAArgInst : public UnaryInstruction { | |||
1833 | protected: | |||
1834 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1835 | friend class Instruction; | |||
1836 | ||||
1837 | VAArgInst *cloneImpl() const; | |||
1838 | ||||
1839 | public: | |||
1840 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", | |||
1841 | Instruction *InsertBefore = nullptr) | |||
1842 | : UnaryInstruction(Ty, VAArg, List, InsertBefore) { | |||
1843 | setName(NameStr); | |||
1844 | } | |||
1845 | ||||
1846 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr, | |||
1847 | BasicBlock *InsertAtEnd) | |||
1848 | : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { | |||
1849 | setName(NameStr); | |||
1850 | } | |||
1851 | ||||
1852 | Value *getPointerOperand() { return getOperand(0); } | |||
1853 | const Value *getPointerOperand() const { return getOperand(0); } | |||
1854 | static unsigned getPointerOperandIndex() { return 0U; } | |||
1855 | ||||
1856 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1857 | static bool classof(const Instruction *I) { | |||
1858 | return I->getOpcode() == VAArg; | |||
1859 | } | |||
1860 | static bool classof(const Value *V) { | |||
1861 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1862 | } | |||
1863 | }; | |||
1864 | ||||
1865 | //===----------------------------------------------------------------------===// | |||
1866 | // ExtractElementInst Class | |||
1867 | //===----------------------------------------------------------------------===// | |||
1868 | ||||
1869 | /// This instruction extracts a single (scalar) | |||
1870 | /// element from a VectorType value | |||
1871 | /// | |||
1872 | class ExtractElementInst : public Instruction { | |||
1873 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", | |||
1874 | Instruction *InsertBefore = nullptr); | |||
1875 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, | |||
1876 | BasicBlock *InsertAtEnd); | |||
1877 | ||||
1878 | protected: | |||
1879 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1880 | friend class Instruction; | |||
1881 | ||||
1882 | ExtractElementInst *cloneImpl() const; | |||
1883 | ||||
1884 | public: | |||
1885 | static ExtractElementInst *Create(Value *Vec, Value *Idx, | |||
1886 | const Twine &NameStr = "", | |||
1887 | Instruction *InsertBefore = nullptr) { | |||
1888 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); | |||
1889 | } | |||
1890 | ||||
1891 | static ExtractElementInst *Create(Value *Vec, Value *Idx, | |||
1892 | const Twine &NameStr, | |||
1893 | BasicBlock *InsertAtEnd) { | |||
1894 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); | |||
1895 | } | |||
1896 | ||||
1897 | /// Return true if an extractelement instruction can be | |||
1898 | /// formed with the specified operands. | |||
1899 | static bool isValidOperands(const Value *Vec, const Value *Idx); | |||
1900 | ||||
1901 | Value *getVectorOperand() { return Op<0>(); } | |||
1902 | Value *getIndexOperand() { return Op<1>(); } | |||
1903 | const Value *getVectorOperand() const { return Op<0>(); } | |||
1904 | const Value *getIndexOperand() const { return Op<1>(); } | |||
1905 | ||||
1906 | VectorType *getVectorOperandType() const { | |||
1907 | return cast<VectorType>(getVectorOperand()->getType()); | |||
1908 | } | |||
1909 | ||||
1910 | /// Transparently provide more efficient getOperand methods. | |||
1911 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1912 | ||||
1913 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1914 | static bool classof(const Instruction *I) { | |||
1915 | return I->getOpcode() == Instruction::ExtractElement; | |||
1916 | } | |||
1917 | static bool classof(const Value *V) { | |||
1918 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1919 | } | |||
1920 | }; | |||
1921 | ||||
1922 | template <> | |||
1923 | struct OperandTraits<ExtractElementInst> : | |||
1924 | public FixedNumOperandTraits<ExtractElementInst, 2> { | |||
1925 | }; | |||
1926 | ||||
1927 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin( ) { return OperandTraits<ExtractElementInst>::op_begin( this); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_begin() const { return OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this)); } ExtractElementInst::op_iterator ExtractElementInst::op_end() { return OperandTraits<ExtractElementInst>::op_end(this ); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_end() const { return OperandTraits<ExtractElementInst >::op_end(const_cast<ExtractElementInst*>(this)); } Value *ExtractElementInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits< ExtractElementInst>::operands(this) && "getOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture ].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands () const { return OperandTraits<ExtractElementInst>::operands (this); } template <int Idx_nocapture> Use &ExtractElementInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ExtractElementInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
1928 | ||||
1929 | //===----------------------------------------------------------------------===// | |||
1930 | // InsertElementInst Class | |||
1931 | //===----------------------------------------------------------------------===// | |||
1932 | ||||
1933 | /// This instruction inserts a single (scalar) | |||
1934 | /// element into a VectorType value | |||
1935 | /// | |||
1936 | class InsertElementInst : public Instruction { | |||
1937 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, | |||
1938 | const Twine &NameStr = "", | |||
1939 | Instruction *InsertBefore = nullptr); | |||
1940 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, | |||
1941 | BasicBlock *InsertAtEnd); | |||
1942 | ||||
1943 | protected: | |||
1944 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1945 | friend class Instruction; | |||
1946 | ||||
1947 | InsertElementInst *cloneImpl() const; | |||
1948 | ||||
1949 | public: | |||
1950 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, | |||
1951 | const Twine &NameStr = "", | |||
1952 | Instruction *InsertBefore = nullptr) { | |||
1953 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); | |||
1954 | } | |||
1955 | ||||
1956 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, | |||
1957 | const Twine &NameStr, | |||
1958 | BasicBlock *InsertAtEnd) { | |||
1959 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); | |||
1960 | } | |||
1961 | ||||
1962 | /// Return true if an insertelement instruction can be | |||
1963 | /// formed with the specified operands. | |||
1964 | static bool isValidOperands(const Value *Vec, const Value *NewElt, | |||
1965 | const Value *Idx); | |||
1966 | ||||
1967 | /// Overload to return most specific vector type. | |||
1968 | /// | |||
1969 | VectorType *getType() const { | |||
1970 | return cast<VectorType>(Instruction::getType()); | |||
1971 | } | |||
1972 | ||||
1973 | /// Transparently provide more efficient getOperand methods. | |||
1974 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1975 | ||||
1976 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1977 | static bool classof(const Instruction *I) { | |||
1978 | return I->getOpcode() == Instruction::InsertElement; | |||
1979 | } | |||
1980 | static bool classof(const Value *V) { | |||
1981 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1982 | } | |||
1983 | }; | |||
1984 | ||||
1985 | template <> | |||
1986 | struct OperandTraits<InsertElementInst> : | |||
1987 | public FixedNumOperandTraits<InsertElementInst, 3> { | |||
1988 | }; | |||
1989 | ||||
1990 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() { return OperandTraits<InsertElementInst>::op_begin(this ); } InsertElementInst::const_op_iterator InsertElementInst:: op_begin() const { return OperandTraits<InsertElementInst> ::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst ::op_iterator InsertElementInst::op_end() { return OperandTraits <InsertElementInst>::op_end(this); } InsertElementInst:: const_op_iterator InsertElementInst::op_end() const { return OperandTraits <InsertElementInst>::op_end(const_cast<InsertElementInst *>(this)); } Value *InsertElementInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<InsertElementInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<InsertElementInst >::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture ].get()); } void InsertElementInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<InsertElementInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned InsertElementInst::getNumOperands () const { return OperandTraits<InsertElementInst>::operands (this); } template <int Idx_nocapture> Use &InsertElementInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertElementInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
1991 | ||||
1992 | //===----------------------------------------------------------------------===// | |||
1993 | // ShuffleVectorInst Class | |||
1994 | //===----------------------------------------------------------------------===// | |||
1995 | ||||
1996 | constexpr int UndefMaskElem = -1; | |||
1997 | ||||
1998 | /// This instruction constructs a fixed permutation of two | |||
1999 | /// input vectors. | |||
2000 | /// | |||
2001 | /// For each element of the result vector, the shuffle mask selects an element | |||
2002 | /// from one of the input vectors to copy to the result. Non-negative elements | |||
2003 | /// in the mask represent an index into the concatenated pair of input vectors. | |||
2004 | /// UndefMaskElem (-1) specifies that the result element is undefined. | |||
2005 | /// | |||
2006 | /// For scalable vectors, all the elements of the mask must be 0 or -1. This | |||
2007 | /// requirement may be relaxed in the future. | |||
2008 | class ShuffleVectorInst : public Instruction { | |||
2009 | SmallVector<int, 4> ShuffleMask; | |||
2010 | Constant *ShuffleMaskForBitcode; | |||
2011 | ||||
2012 | protected: | |||
2013 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2014 | friend class Instruction; | |||
2015 | ||||
2016 | ShuffleVectorInst *cloneImpl() const; | |||
2017 | ||||
2018 | public: | |||
2019 | ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", | |||
2020 | Instruction *InsertBefore = nullptr); | |||
2021 | ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, | |||
2022 | BasicBlock *InsertAtEnd); | |||
2023 | ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", | |||
2024 | Instruction *InsertBefore = nullptr); | |||
2025 | ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, | |||
2026 | BasicBlock *InsertAtEnd); | |||
2027 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, | |||
2028 | const Twine &NameStr = "", | |||
2029 | Instruction *InsertBefor = nullptr); | |||
2030 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, | |||
2031 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2032 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, | |||
2033 | const Twine &NameStr = "", | |||
2034 | Instruction *InsertBefor = nullptr); | |||
2035 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, | |||
2036 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2037 | ||||
2038 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
2039 | void operator delete(void *Ptr) { return User::operator delete(Ptr); } | |||
2040 | ||||
2041 | /// Swap the operands and adjust the mask to preserve the semantics | |||
2042 | /// of the instruction. | |||
2043 | void commute(); | |||
2044 | ||||
2045 | /// Return true if a shufflevector instruction can be | |||
2046 | /// formed with the specified operands. | |||
2047 | static bool isValidOperands(const Value *V1, const Value *V2, | |||
2048 | const Value *Mask); | |||
2049 | static bool isValidOperands(const Value *V1, const Value *V2, | |||
2050 | ArrayRef<int> Mask); | |||
2051 | ||||
2052 | /// Overload to return most specific vector type. | |||
2053 | /// | |||
2054 | VectorType *getType() const { | |||
2055 | return cast<VectorType>(Instruction::getType()); | |||
2056 | } | |||
2057 | ||||
2058 | /// Transparently provide more efficient getOperand methods. | |||
2059 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2060 | ||||
2061 | /// Return the shuffle mask value of this instruction for the given element | |||
2062 | /// index. Return UndefMaskElem if the element is undef. | |||
2063 | int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } | |||
2064 | ||||
2065 | /// Convert the input shuffle mask operand to a vector of integers. Undefined | |||
2066 | /// elements of the mask are returned as UndefMaskElem. | |||
2067 | static void getShuffleMask(const Constant *Mask, | |||
2068 | SmallVectorImpl<int> &Result); | |||
2069 | ||||
2070 | /// Return the mask for this instruction as a vector of integers. Undefined | |||
2071 | /// elements of the mask are returned as UndefMaskElem. | |||
2072 | void getShuffleMask(SmallVectorImpl<int> &Result) const { | |||
2073 | Result.assign(ShuffleMask.begin(), ShuffleMask.end()); | |||
2074 | } | |||
2075 | ||||
2076 | /// Return the mask for this instruction, for use in bitcode. | |||
2077 | /// | |||
2078 | /// TODO: This is temporary until we decide a new bitcode encoding for | |||
2079 | /// shufflevector. | |||
2080 | Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } | |||
2081 | ||||
2082 | static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, | |||
2083 | Type *ResultTy); | |||
2084 | ||||
2085 | void setShuffleMask(ArrayRef<int> Mask); | |||
2086 | ||||
2087 | ArrayRef<int> getShuffleMask() const { return ShuffleMask; } | |||
2088 | ||||
2089 | /// Return true if this shuffle returns a vector with a different number of | |||
2090 | /// elements than its source vectors. | |||
2091 | /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> | |||
2092 | /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> | |||
2093 | bool changesLength() const { | |||
2094 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) | |||
2095 | ->getElementCount() | |||
2096 | .getKnownMinValue(); | |||
2097 | unsigned NumMaskElts = ShuffleMask.size(); | |||
2098 | return NumSourceElts != NumMaskElts; | |||
2099 | } | |||
2100 | ||||
2101 | /// Return true if this shuffle returns a vector with a greater number of | |||
2102 | /// elements than its source vectors. | |||
2103 | /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> | |||
2104 | bool increasesLength() const { | |||
2105 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) | |||
2106 | ->getElementCount() | |||
2107 | .getKnownMinValue(); | |||
2108 | unsigned NumMaskElts = ShuffleMask.size(); | |||
2109 | return NumSourceElts < NumMaskElts; | |||
2110 | } | |||
2111 | ||||
2112 | /// Return true if this shuffle mask chooses elements from exactly one source | |||
2113 | /// vector. | |||
2114 | /// Example: <7,5,undef,7> | |||
2115 | /// This assumes that vector operands are the same length as the mask. | |||
2116 | static bool isSingleSourceMask(ArrayRef<int> Mask); | |||
2117 | static bool isSingleSourceMask(const Constant *Mask) { | |||
2118 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2118, __extension__ __PRETTY_FUNCTION__ )); | |||
2119 | SmallVector<int, 16> MaskAsInts; | |||
2120 | getShuffleMask(Mask, MaskAsInts); | |||
2121 | return isSingleSourceMask(MaskAsInts); | |||
2122 | } | |||
2123 | ||||
2124 | /// Return true if this shuffle chooses elements from exactly one source | |||
2125 | /// vector without changing the length of that vector. | |||
2126 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> | |||
2127 | /// TODO: Optionally allow length-changing shuffles. | |||
2128 | bool isSingleSource() const { | |||
2129 | return !changesLength() && isSingleSourceMask(ShuffleMask); | |||
2130 | } | |||
2131 | ||||
2132 | /// Return true if this shuffle mask chooses elements from exactly one source | |||
2133 | /// vector without lane crossings. A shuffle using this mask is not | |||
2134 | /// necessarily a no-op because it may change the number of elements from its | |||
2135 | /// input vectors or it may provide demanded bits knowledge via undef lanes. | |||
2136 | /// Example: <undef,undef,2,3> | |||
2137 | static bool isIdentityMask(ArrayRef<int> Mask); | |||
2138 | static bool isIdentityMask(const Constant *Mask) { | |||
2139 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2139, __extension__ __PRETTY_FUNCTION__ )); | |||
2140 | SmallVector<int, 16> MaskAsInts; | |||
2141 | getShuffleMask(Mask, MaskAsInts); | |||
2142 | return isIdentityMask(MaskAsInts); | |||
2143 | } | |||
2144 | ||||
2145 | /// Return true if this shuffle chooses elements from exactly one source | |||
2146 | /// vector without lane crossings and does not change the number of elements | |||
2147 | /// from its input vectors. | |||
2148 | /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> | |||
2149 | bool isIdentity() const { | |||
2150 | return !changesLength() && isIdentityMask(ShuffleMask); | |||
2151 | } | |||
2152 | ||||
2153 | /// Return true if this shuffle lengthens exactly one source vector with | |||
2154 | /// undefs in the high elements. | |||
2155 | bool isIdentityWithPadding() const; | |||
2156 | ||||
2157 | /// Return true if this shuffle extracts the first N elements of exactly one | |||
2158 | /// source vector. | |||
2159 | bool isIdentityWithExtract() const; | |||
2160 | ||||
2161 | /// Return true if this shuffle concatenates its 2 source vectors. This | |||
2162 | /// returns false if either input is undefined. In that case, the shuffle is | |||
2163 | /// is better classified as an identity with padding operation. | |||
2164 | bool isConcat() const; | |||
2165 | ||||
2166 | /// Return true if this shuffle mask chooses elements from its source vectors | |||
2167 | /// without lane crossings. A shuffle using this mask would be | |||
2168 | /// equivalent to a vector select with a constant condition operand. | |||
2169 | /// Example: <4,1,6,undef> | |||
2170 | /// This returns false if the mask does not choose from both input vectors. | |||
2171 | /// In that case, the shuffle is better classified as an identity shuffle. | |||
2172 | /// This assumes that vector operands are the same length as the mask | |||
2173 | /// (a length-changing shuffle can never be equivalent to a vector select). | |||
2174 | static bool isSelectMask(ArrayRef<int> Mask); | |||
2175 | static bool isSelectMask(const Constant *Mask) { | |||
2176 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2176, __extension__ __PRETTY_FUNCTION__ )); | |||
2177 | SmallVector<int, 16> MaskAsInts; | |||
2178 | getShuffleMask(Mask, MaskAsInts); | |||
2179 | return isSelectMask(MaskAsInts); | |||
2180 | } | |||
2181 | ||||
2182 | /// Return true if this shuffle chooses elements from its source vectors | |||
2183 | /// without lane crossings and all operands have the same number of elements. | |||
2184 | /// In other words, this shuffle is equivalent to a vector select with a | |||
2185 | /// constant condition operand. | |||
2186 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> | |||
2187 | /// This returns false if the mask does not choose from both input vectors. | |||
2188 | /// In that case, the shuffle is better classified as an identity shuffle. | |||
2189 | /// TODO: Optionally allow length-changing shuffles. | |||
2190 | bool isSelect() const { | |||
2191 | return !changesLength() && isSelectMask(ShuffleMask); | |||
2192 | } | |||
2193 | ||||
2194 | /// Return true if this shuffle mask swaps the order of elements from exactly | |||
2195 | /// one source vector. | |||
2196 | /// Example: <7,6,undef,4> | |||
2197 | /// This assumes that vector operands are the same length as the mask. | |||
2198 | static bool isReverseMask(ArrayRef<int> Mask); | |||
2199 | static bool isReverseMask(const Constant *Mask) { | |||
2200 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2200, __extension__ __PRETTY_FUNCTION__ )); | |||
2201 | SmallVector<int, 16> MaskAsInts; | |||
2202 | getShuffleMask(Mask, MaskAsInts); | |||
2203 | return isReverseMask(MaskAsInts); | |||
2204 | } | |||
2205 | ||||
2206 | /// Return true if this shuffle swaps the order of elements from exactly | |||
2207 | /// one source vector. | |||
2208 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> | |||
2209 | /// TODO: Optionally allow length-changing shuffles. | |||
2210 | bool isReverse() const { | |||
2211 | return !changesLength() && isReverseMask(ShuffleMask); | |||
2212 | } | |||
2213 | ||||
2214 | /// Return true if this shuffle mask chooses all elements with the same value | |||
2215 | /// as the first element of exactly one source vector. | |||
2216 | /// Example: <4,undef,undef,4> | |||
2217 | /// This assumes that vector operands are the same length as the mask. | |||
2218 | static bool isZeroEltSplatMask(ArrayRef<int> Mask); | |||
2219 | static bool isZeroEltSplatMask(const Constant *Mask) { | |||
2220 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2220, __extension__ __PRETTY_FUNCTION__ )); | |||
2221 | SmallVector<int, 16> MaskAsInts; | |||
2222 | getShuffleMask(Mask, MaskAsInts); | |||
2223 | return isZeroEltSplatMask(MaskAsInts); | |||
2224 | } | |||
2225 | ||||
2226 | /// Return true if all elements of this shuffle are the same value as the | |||
2227 | /// first element of exactly one source vector without changing the length | |||
2228 | /// of that vector. | |||
2229 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> | |||
2230 | /// TODO: Optionally allow length-changing shuffles. | |||
2231 | /// TODO: Optionally allow splats from other elements. | |||
2232 | bool isZeroEltSplat() const { | |||
2233 | return !changesLength() && isZeroEltSplatMask(ShuffleMask); | |||
2234 | } | |||
2235 | ||||
2236 | /// Return true if this shuffle mask is a transpose mask. | |||
2237 | /// Transpose vector masks transpose a 2xn matrix. They read corresponding | |||
2238 | /// even- or odd-numbered vector elements from two n-dimensional source | |||
2239 | /// vectors and write each result into consecutive elements of an | |||
2240 | /// n-dimensional destination vector. Two shuffles are necessary to complete | |||
2241 | /// the transpose, one for the even elements and another for the odd elements. | |||
2242 | /// This description closely follows how the TRN1 and TRN2 AArch64 | |||
2243 | /// instructions operate. | |||
2244 | /// | |||
2245 | /// For example, a simple 2x2 matrix can be transposed with: | |||
2246 | /// | |||
2247 | /// ; Original matrix | |||
2248 | /// m0 = < a, b > | |||
2249 | /// m1 = < c, d > | |||
2250 | /// | |||
2251 | /// ; Transposed matrix | |||
2252 | /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > | |||
2253 | /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > | |||
2254 | /// | |||
2255 | /// For matrices having greater than n columns, the resulting nx2 transposed | |||
2256 | /// matrix is stored in two result vectors such that one vector contains | |||
2257 | /// interleaved elements from all the even-numbered rows and the other vector | |||
2258 | /// contains interleaved elements from all the odd-numbered rows. For example, | |||
2259 | /// a 2x4 matrix can be transposed with: | |||
2260 | /// | |||
2261 | /// ; Original matrix | |||
2262 | /// m0 = < a, b, c, d > | |||
2263 | /// m1 = < e, f, g, h > | |||
2264 | /// | |||
2265 | /// ; Transposed matrix | |||
2266 | /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > | |||
2267 | /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > | |||
2268 | static bool isTransposeMask(ArrayRef<int> Mask); | |||
2269 | static bool isTransposeMask(const Constant *Mask) { | |||
2270 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2270, __extension__ __PRETTY_FUNCTION__ )); | |||
2271 | SmallVector<int, 16> MaskAsInts; | |||
2272 | getShuffleMask(Mask, MaskAsInts); | |||
2273 | return isTransposeMask(MaskAsInts); | |||
2274 | } | |||
2275 | ||||
2276 | /// Return true if this shuffle transposes the elements of its inputs without | |||
2277 | /// changing the length of the vectors. This operation may also be known as a | |||
2278 | /// merge or interleave. See the description for isTransposeMask() for the | |||
2279 | /// exact specification. | |||
2280 | /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> | |||
2281 | bool isTranspose() const { | |||
2282 | return !changesLength() && isTransposeMask(ShuffleMask); | |||
2283 | } | |||
2284 | ||||
2285 | /// Return true if this shuffle mask is an extract subvector mask. | |||
2286 | /// A valid extract subvector mask returns a smaller vector from a single | |||
2287 | /// source operand. The base extraction index is returned as well. | |||
2288 | static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, | |||
2289 | int &Index); | |||
2290 | static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, | |||
2291 | int &Index) { | |||
2292 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2292, __extension__ __PRETTY_FUNCTION__ )); | |||
2293 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2294 | // case. | |||
2295 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2296 | return false; | |||
2297 | SmallVector<int, 16> MaskAsInts; | |||
2298 | getShuffleMask(Mask, MaskAsInts); | |||
2299 | return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); | |||
2300 | } | |||
2301 | ||||
2302 | /// Return true if this shuffle mask is an extract subvector mask. | |||
2303 | bool isExtractSubvectorMask(int &Index) const { | |||
2304 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2305 | // case. | |||
2306 | if (isa<ScalableVectorType>(getType())) | |||
2307 | return false; | |||
2308 | ||||
2309 | int NumSrcElts = | |||
2310 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); | |||
2311 | return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); | |||
2312 | } | |||
2313 | ||||
2314 | /// Return true if this shuffle mask is an insert subvector mask. | |||
2315 | /// A valid insert subvector mask inserts the lowest elements of a second | |||
2316 | /// source operand into an in-place first source operand operand. | |||
2317 | /// Both the sub vector width and the insertion index is returned. | |||
2318 | static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, | |||
2319 | int &NumSubElts, int &Index); | |||
2320 | static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, | |||
2321 | int &NumSubElts, int &Index) { | |||
2322 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2322, __extension__ __PRETTY_FUNCTION__ )); | |||
2323 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2324 | // case. | |||
2325 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2326 | return false; | |||
2327 | SmallVector<int, 16> MaskAsInts; | |||
2328 | getShuffleMask(Mask, MaskAsInts); | |||
2329 | return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); | |||
2330 | } | |||
2331 | ||||
2332 | /// Return true if this shuffle mask is an insert subvector mask. | |||
2333 | bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { | |||
2334 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2335 | // case. | |||
2336 | if (isa<ScalableVectorType>(getType())) | |||
2337 | return false; | |||
2338 | ||||
2339 | int NumSrcElts = | |||
2340 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); | |||
2341 | return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); | |||
2342 | } | |||
2343 | ||||
2344 | /// Return true if this shuffle mask replicates each of the \p VF elements | |||
2345 | /// in a vector \p ReplicationFactor times. | |||
2346 | /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: | |||
2347 | /// <0,0,0,1,1,1,2,2,2,3,3,3> | |||
2348 | static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, | |||
2349 | int &VF); | |||
2350 | static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, | |||
2351 | int &VF) { | |||
2352 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2352, __extension__ __PRETTY_FUNCTION__ )); | |||
2353 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2354 | // case. | |||
2355 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2356 | return false; | |||
2357 | SmallVector<int, 16> MaskAsInts; | |||
2358 | getShuffleMask(Mask, MaskAsInts); | |||
2359 | return isReplicationMask(MaskAsInts, ReplicationFactor, VF); | |||
2360 | } | |||
2361 | ||||
2362 | /// Return true if this shuffle mask is a replication mask. | |||
2363 | bool isReplicationMask(int &ReplicationFactor, int &VF) const; | |||
2364 | ||||
2365 | /// Change values in a shuffle permute mask assuming the two vector operands | |||
2366 | /// of length InVecNumElts have swapped position. | |||
2367 | static void commuteShuffleMask(MutableArrayRef<int> Mask, | |||
2368 | unsigned InVecNumElts) { | |||
2369 | for (int &Idx : Mask) { | |||
2370 | if (Idx == -1) | |||
2371 | continue; | |||
2372 | Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; | |||
2373 | assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int )InVecNumElts * 2 && "shufflevector mask index out of range" ) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "llvm/include/llvm/IR/Instructions.h", 2374, __extension__ __PRETTY_FUNCTION__ )) | |||
2374 | "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int )InVecNumElts * 2 && "shufflevector mask index out of range" ) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "llvm/include/llvm/IR/Instructions.h", 2374, __extension__ __PRETTY_FUNCTION__ )); | |||
2375 | } | |||
2376 | } | |||
2377 | ||||
2378 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2379 | static bool classof(const Instruction *I) { | |||
2380 | return I->getOpcode() == Instruction::ShuffleVector; | |||
2381 | } | |||
2382 | static bool classof(const Value *V) { | |||
2383 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2384 | } | |||
2385 | }; | |||
2386 | ||||
2387 | template <> | |||
2388 | struct OperandTraits<ShuffleVectorInst> | |||
2389 | : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; | |||
2390 | ||||
2391 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() { return OperandTraits<ShuffleVectorInst>::op_begin(this ); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst:: op_begin() const { return OperandTraits<ShuffleVectorInst> ::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst ::op_iterator ShuffleVectorInst::op_end() { return OperandTraits <ShuffleVectorInst>::op_end(this); } ShuffleVectorInst:: const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits <ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst *>(this)); } Value *ShuffleVectorInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2391, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst >::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture ].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2391, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands () const { return OperandTraits<ShuffleVectorInst>::operands (this); } template <int Idx_nocapture> Use &ShuffleVectorInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ShuffleVectorInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
2392 | ||||
2393 | //===----------------------------------------------------------------------===// | |||
2394 | // ExtractValueInst Class | |||
2395 | //===----------------------------------------------------------------------===// | |||
2396 | ||||
2397 | /// This instruction extracts a struct member or array | |||
2398 | /// element value from an aggregate value. | |||
2399 | /// | |||
2400 | class ExtractValueInst : public UnaryInstruction { | |||
2401 | SmallVector<unsigned, 4> Indices; | |||
2402 | ||||
2403 | ExtractValueInst(const ExtractValueInst &EVI); | |||
2404 | ||||
2405 | /// Constructors - Create a extractvalue instruction with a base aggregate | |||
2406 | /// value and a list of indices. The first ctor can optionally insert before | |||
2407 | /// an existing instruction, the second appends the new instruction to the | |||
2408 | /// specified BasicBlock. | |||
2409 | inline ExtractValueInst(Value *Agg, | |||
2410 | ArrayRef<unsigned> Idxs, | |||
2411 | const Twine &NameStr, | |||
2412 | Instruction *InsertBefore); | |||
2413 | inline ExtractValueInst(Value *Agg, | |||
2414 | ArrayRef<unsigned> Idxs, | |||
2415 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2416 | ||||
2417 | void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); | |||
2418 | ||||
2419 | protected: | |||
2420 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2421 | friend class Instruction; | |||
2422 | ||||
2423 | ExtractValueInst *cloneImpl() const; | |||
2424 | ||||
2425 | public: | |||
2426 | static ExtractValueInst *Create(Value *Agg, | |||
2427 | ArrayRef<unsigned> Idxs, | |||
2428 | const Twine &NameStr = "", | |||
2429 | Instruction *InsertBefore = nullptr) { | |||
2430 | return new | |||
2431 | ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); | |||
2432 | } | |||
2433 | ||||
2434 | static ExtractValueInst *Create(Value *Agg, | |||
2435 | ArrayRef<unsigned> Idxs, | |||
2436 | const Twine &NameStr, | |||
2437 | BasicBlock *InsertAtEnd) { | |||
2438 | return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); | |||
2439 | } | |||
2440 | ||||
2441 | /// Returns the type of the element that would be extracted | |||
2442 | /// with an extractvalue instruction with the specified parameters. | |||
2443 | /// | |||
2444 | /// Null is returned if the indices are invalid for the specified type. | |||
2445 | static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); | |||
2446 | ||||
2447 | using idx_iterator = const unsigned*; | |||
2448 | ||||
2449 | inline idx_iterator idx_begin() const { return Indices.begin(); } | |||
2450 | inline idx_iterator idx_end() const { return Indices.end(); } | |||
2451 | inline iterator_range<idx_iterator> indices() const { | |||
2452 | return make_range(idx_begin(), idx_end()); | |||
2453 | } | |||
2454 | ||||
2455 | Value *getAggregateOperand() { | |||
2456 | return getOperand(0); | |||
2457 | } | |||
2458 | const Value *getAggregateOperand() const { | |||
2459 | return getOperand(0); | |||
2460 | } | |||
2461 | static unsigned getAggregateOperandIndex() { | |||
2462 | return 0U; // get index for modifying correct operand | |||
2463 | } | |||
2464 | ||||
2465 | ArrayRef<unsigned> getIndices() const { | |||
2466 | return Indices; | |||
2467 | } | |||
2468 | ||||
2469 | unsigned getNumIndices() const { | |||
2470 | return (unsigned)Indices.size(); | |||
2471 | } | |||
2472 | ||||
2473 | bool hasIndices() const { | |||
2474 | return true; | |||
2475 | } | |||
2476 | ||||
2477 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2478 | static bool classof(const Instruction *I) { | |||
2479 | return I->getOpcode() == Instruction::ExtractValue; | |||
2480 | } | |||
2481 | static bool classof(const Value *V) { | |||
2482 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2483 | } | |||
2484 | }; | |||
2485 | ||||
2486 | ExtractValueInst::ExtractValueInst(Value *Agg, | |||
2487 | ArrayRef<unsigned> Idxs, | |||
2488 | const Twine &NameStr, | |||
2489 | Instruction *InsertBefore) | |||
2490 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), | |||
2491 | ExtractValue, Agg, InsertBefore) { | |||
2492 | init(Idxs, NameStr); | |||
2493 | } | |||
2494 | ||||
2495 | ExtractValueInst::ExtractValueInst(Value *Agg, | |||
2496 | ArrayRef<unsigned> Idxs, | |||
2497 | const Twine &NameStr, | |||
2498 | BasicBlock *InsertAtEnd) | |||
2499 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), | |||
2500 | ExtractValue, Agg, InsertAtEnd) { | |||
2501 | init(Idxs, NameStr); | |||
2502 | } | |||
2503 | ||||
2504 | //===----------------------------------------------------------------------===// | |||
2505 | // InsertValueInst Class | |||
2506 | //===----------------------------------------------------------------------===// | |||
2507 | ||||
2508 | /// This instruction inserts a struct field of array element | |||
2509 | /// value into an aggregate value. | |||
2510 | /// | |||
2511 | class InsertValueInst : public Instruction { | |||
2512 | SmallVector<unsigned, 4> Indices; | |||
2513 | ||||
2514 | InsertValueInst(const InsertValueInst &IVI); | |||
2515 | ||||
2516 | /// Constructors - Create a insertvalue instruction with a base aggregate | |||
2517 | /// value, a value to insert, and a list of indices. The first ctor can | |||
2518 | /// optionally insert before an existing instruction, the second appends | |||
2519 | /// the new instruction to the specified BasicBlock. | |||
2520 | inline InsertValueInst(Value *Agg, Value *Val, | |||
2521 | ArrayRef<unsigned> Idxs, | |||
2522 | const Twine &NameStr, | |||
2523 | Instruction *InsertBefore); | |||
2524 | inline InsertValueInst(Value *Agg, Value *Val, | |||
2525 | ArrayRef<unsigned> Idxs, | |||
2526 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2527 | ||||
2528 | /// Constructors - These two constructors are convenience methods because one | |||
2529 | /// and two index insertvalue instructions are so common. | |||
2530 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, | |||
2531 | const Twine &NameStr = "", | |||
2532 | Instruction *InsertBefore = nullptr); | |||
2533 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, | |||
2534 | BasicBlock *InsertAtEnd); | |||
2535 | ||||
2536 | void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, | |||
2537 | const Twine &NameStr); | |||
2538 | ||||
2539 | protected: | |||
2540 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2541 | friend class Instruction; | |||
2542 | ||||
2543 | InsertValueInst *cloneImpl() const; | |||
2544 | ||||
2545 | public: | |||
2546 | // allocate space for exactly two operands | |||
2547 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
2548 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
2549 | ||||
2550 | static InsertValueInst *Create(Value *Agg, Value *Val, | |||
2551 | ArrayRef<unsigned> Idxs, | |||
2552 | const Twine &NameStr = "", | |||
2553 | Instruction *InsertBefore = nullptr) { | |||
2554 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); | |||
2555 | } | |||
2556 | ||||
2557 | static InsertValueInst *Create(Value *Agg, Value *Val, | |||
2558 | ArrayRef<unsigned> Idxs, | |||
2559 | const Twine &NameStr, | |||
2560 | BasicBlock *InsertAtEnd) { | |||
2561 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); | |||
2562 | } | |||
2563 | ||||
2564 | /// Transparently provide more efficient getOperand methods. | |||
2565 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2566 | ||||
2567 | using idx_iterator = const unsigned*; | |||
2568 | ||||
2569 | inline idx_iterator idx_begin() const { return Indices.begin(); } | |||
2570 | inline idx_iterator idx_end() const { return Indices.end(); } | |||
2571 | inline iterator_range<idx_iterator> indices() const { | |||
2572 | return make_range(idx_begin(), idx_end()); | |||
2573 | } | |||
2574 | ||||
2575 | Value *getAggregateOperand() { | |||
2576 | return getOperand(0); | |||
2577 | } | |||
2578 | const Value *getAggregateOperand() const { | |||
2579 | return getOperand(0); | |||
2580 | } | |||
2581 | static unsigned getAggregateOperandIndex() { | |||
2582 | return 0U; // get index for modifying correct operand | |||
2583 | } | |||
2584 | ||||
2585 | Value *getInsertedValueOperand() { | |||
2586 | return getOperand(1); | |||
2587 | } | |||
2588 | const Value *getInsertedValueOperand() const { | |||
2589 | return getOperand(1); | |||
2590 | } | |||
2591 | static unsigned getInsertedValueOperandIndex() { | |||
2592 | return 1U; // get index for modifying correct operand | |||
2593 | } | |||
2594 | ||||
2595 | ArrayRef<unsigned> getIndices() const { | |||
2596 | return Indices; | |||
2597 | } | |||
2598 | ||||
2599 | unsigned getNumIndices() const { | |||
2600 | return (unsigned)Indices.size(); | |||
2601 | } | |||
2602 | ||||
2603 | bool hasIndices() const { | |||
2604 | return true; | |||
2605 | } | |||
2606 | ||||
2607 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2608 | static bool classof(const Instruction *I) { | |||
2609 | return I->getOpcode() == Instruction::InsertValue; | |||
2610 | } | |||
2611 | static bool classof(const Value *V) { | |||
2612 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2613 | } | |||
2614 | }; | |||
2615 | ||||
2616 | template <> | |||
2617 | struct OperandTraits<InsertValueInst> : | |||
2618 | public FixedNumOperandTraits<InsertValueInst, 2> { | |||
2619 | }; | |||
2620 | ||||
2621 | InsertValueInst::InsertValueInst(Value *Agg, | |||
2622 | Value *Val, | |||
2623 | ArrayRef<unsigned> Idxs, | |||
2624 | const Twine &NameStr, | |||
2625 | Instruction *InsertBefore) | |||
2626 | : Instruction(Agg->getType(), InsertValue, | |||
2627 | OperandTraits<InsertValueInst>::op_begin(this), | |||
2628 | 2, InsertBefore) { | |||
2629 | init(Agg, Val, Idxs, NameStr); | |||
2630 | } | |||
2631 | ||||
2632 | InsertValueInst::InsertValueInst(Value *Agg, | |||
2633 | Value *Val, | |||
2634 | ArrayRef<unsigned> Idxs, | |||
2635 | const Twine &NameStr, | |||
2636 | BasicBlock *InsertAtEnd) | |||
2637 | : Instruction(Agg->getType(), InsertValue, | |||
2638 | OperandTraits<InsertValueInst>::op_begin(this), | |||
2639 | 2, InsertAtEnd) { | |||
2640 | init(Agg, Val, Idxs, NameStr); | |||
2641 | } | |||
2642 | ||||
2643 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst ::const_op_iterator InsertValueInst::op_begin() const { return OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst ::op_end() { return OperandTraits<InsertValueInst>::op_end (this); } InsertValueInst::const_op_iterator InsertValueInst:: op_end() const { return OperandTraits<InsertValueInst>:: op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<InsertValueInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2643, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<InsertValueInst >::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture ].get()); } void InsertValueInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<InsertValueInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2643, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned InsertValueInst::getNumOperands () const { return OperandTraits<InsertValueInst>::operands (this); } template <int Idx_nocapture> Use &InsertValueInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertValueInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
2644 | ||||
2645 | //===----------------------------------------------------------------------===// | |||
2646 | // PHINode Class | |||
2647 | //===----------------------------------------------------------------------===// | |||
2648 | ||||
2649 | // PHINode - The PHINode class is used to represent the magical mystical PHI | |||
2650 | // node, that can not exist in nature, but can be synthesized in a computer | |||
2651 | // scientist's overactive imagination. | |||
2652 | // | |||
2653 | class PHINode : public Instruction { | |||
2654 | /// The number of operands actually allocated. NumOperands is | |||
2655 | /// the number actually in use. | |||
2656 | unsigned ReservedSpace; | |||
2657 | ||||
2658 | PHINode(const PHINode &PN); | |||
2659 | ||||
2660 | explicit PHINode(Type *Ty, unsigned NumReservedValues, | |||
2661 | const Twine &NameStr = "", | |||
2662 | Instruction *InsertBefore = nullptr) | |||
2663 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), | |||
2664 | ReservedSpace(NumReservedValues) { | |||
2665 | assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!" ) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\"" , "llvm/include/llvm/IR/Instructions.h", 2665, __extension__ __PRETTY_FUNCTION__ )); | |||
2666 | setName(NameStr); | |||
2667 | allocHungoffUses(ReservedSpace); | |||
2668 | } | |||
2669 | ||||
2670 | PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, | |||
2671 | BasicBlock *InsertAtEnd) | |||
2672 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), | |||
2673 | ReservedSpace(NumReservedValues) { | |||
2674 | assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!" ) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\"" , "llvm/include/llvm/IR/Instructions.h", 2674, __extension__ __PRETTY_FUNCTION__ )); | |||
2675 | setName(NameStr); | |||
2676 | allocHungoffUses(ReservedSpace); | |||
2677 | } | |||
2678 | ||||
2679 | protected: | |||
2680 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2681 | friend class Instruction; | |||
2682 | ||||
2683 | PHINode *cloneImpl() const; | |||
2684 | ||||
2685 | // allocHungoffUses - this is more complicated than the generic | |||
2686 | // User::allocHungoffUses, because we have to allocate Uses for the incoming | |||
2687 | // values and pointers to the incoming blocks, all in one allocation. | |||
2688 | void allocHungoffUses(unsigned N) { | |||
2689 | User::allocHungoffUses(N, /* IsPhi */ true); | |||
2690 | } | |||
2691 | ||||
2692 | public: | |||
2693 | /// Constructors - NumReservedValues is a hint for the number of incoming | |||
2694 | /// edges that this phi node will have (use 0 if you really have no idea). | |||
2695 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, | |||
2696 | const Twine &NameStr = "", | |||
2697 | Instruction *InsertBefore = nullptr) { | |||
2698 | return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); | |||
2699 | } | |||
2700 | ||||
2701 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, | |||
2702 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
2703 | return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); | |||
2704 | } | |||
2705 | ||||
2706 | /// Provide fast operand accessors | |||
2707 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2708 | ||||
2709 | // Block iterator interface. This provides access to the list of incoming | |||
2710 | // basic blocks, which parallels the list of incoming values. | |||
2711 | ||||
2712 | using block_iterator = BasicBlock **; | |||
2713 | using const_block_iterator = BasicBlock * const *; | |||
2714 | ||||
2715 | block_iterator block_begin() { | |||
2716 | return reinterpret_cast<block_iterator>(op_begin() + ReservedSpace); | |||
2717 | } | |||
2718 | ||||
2719 | const_block_iterator block_begin() const { | |||
2720 | return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); | |||
2721 | } | |||
2722 | ||||
2723 | block_iterator block_end() { | |||
2724 | return block_begin() + getNumOperands(); | |||
2725 | } | |||
2726 | ||||
2727 | const_block_iterator block_end() const { | |||
2728 | return block_begin() + getNumOperands(); | |||
2729 | } | |||
2730 | ||||
2731 | iterator_range<block_iterator> blocks() { | |||
2732 | return make_range(block_begin(), block_end()); | |||
2733 | } | |||
2734 | ||||
2735 | iterator_range<const_block_iterator> blocks() const { | |||
2736 | return make_range(block_begin(), block_end()); | |||
2737 | } | |||
2738 | ||||
2739 | op_range incoming_values() { return operands(); } | |||
2740 | ||||
2741 | const_op_range incoming_values() const { return operands(); } | |||
2742 | ||||
2743 | /// Return the number of incoming edges | |||
2744 | /// | |||
2745 | unsigned getNumIncomingValues() const { return getNumOperands(); } | |||
2746 | ||||
2747 | /// Return incoming value number x | |||
2748 | /// | |||
2749 | Value *getIncomingValue(unsigned i) const { | |||
2750 | return getOperand(i); | |||
2751 | } | |||
2752 | void setIncomingValue(unsigned i, Value *V) { | |||
2753 | assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!" ) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "llvm/include/llvm/IR/Instructions.h", 2753, __extension__ __PRETTY_FUNCTION__ )); | |||
2754 | assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "llvm/include/llvm/IR/Instructions.h", 2755, __extension__ __PRETTY_FUNCTION__ )) | |||
2755 | "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "llvm/include/llvm/IR/Instructions.h", 2755, __extension__ __PRETTY_FUNCTION__ )); | |||
2756 | setOperand(i, V); | |||
2757 | } | |||
2758 | ||||
2759 | static unsigned getOperandNumForIncomingValue(unsigned i) { | |||
2760 | return i; | |||
2761 | } | |||
2762 | ||||
2763 | static unsigned getIncomingValueNumForOperand(unsigned i) { | |||
2764 | return i; | |||
2765 | } | |||
2766 | ||||
2767 | /// Return incoming basic block number @p i. | |||
2768 | /// | |||
2769 | BasicBlock *getIncomingBlock(unsigned i) const { | |||
2770 | return block_begin()[i]; | |||
2771 | } | |||
2772 | ||||
2773 | /// Return incoming basic block corresponding | |||
2774 | /// to an operand of the PHI. | |||
2775 | /// | |||
2776 | BasicBlock *getIncomingBlock(const Use &U) const { | |||
2777 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "llvm/include/llvm/IR/Instructions.h", 2777, __extension__ __PRETTY_FUNCTION__ )); | |||
2778 | return getIncomingBlock(unsigned(&U - op_begin())); | |||
2779 | } | |||
2780 | ||||
2781 | /// Return incoming basic block corresponding | |||
2782 | /// to value use iterator. | |||
2783 | /// | |||
2784 | BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { | |||
2785 | return getIncomingBlock(I.getUse()); | |||
2786 | } | |||
2787 | ||||
2788 | void setIncomingBlock(unsigned i, BasicBlock *BB) { | |||
2789 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2789, __extension__ __PRETTY_FUNCTION__ )); | |||
2790 | block_begin()[i] = BB; | |||
2791 | } | |||
2792 | ||||
2793 | /// Replace every incoming basic block \p Old to basic block \p New. | |||
2794 | void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { | |||
2795 | assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2795, __extension__ __PRETTY_FUNCTION__ )); | |||
2796 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) | |||
2797 | if (getIncomingBlock(Op) == Old) | |||
2798 | setIncomingBlock(Op, New); | |||
2799 | } | |||
2800 | ||||
2801 | /// Add an incoming value to the end of the PHI list | |||
2802 | /// | |||
2803 | void addIncoming(Value *V, BasicBlock *BB) { | |||
2804 | if (getNumOperands() == ReservedSpace) | |||
2805 | growOperands(); // Get more space! | |||
2806 | // Initialize some new operands. | |||
2807 | setNumHungOffUseOperands(getNumOperands() + 1); | |||
2808 | setIncomingValue(getNumOperands() - 1, V); | |||
2809 | setIncomingBlock(getNumOperands() - 1, BB); | |||
2810 | } | |||
2811 | ||||
2812 | /// Remove an incoming value. This is useful if a | |||
2813 | /// predecessor basic block is deleted. The value removed is returned. | |||
2814 | /// | |||
2815 | /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty | |||
2816 | /// is true), the PHI node is destroyed and any uses of it are replaced with | |||
2817 | /// dummy values. The only time there should be zero incoming values to a PHI | |||
2818 | /// node is when the block is dead, so this strategy is sound. | |||
2819 | /// | |||
2820 | Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); | |||
2821 | ||||
2822 | Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { | |||
2823 | int Idx = getBasicBlockIndex(BB); | |||
2824 | assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\"" , "llvm/include/llvm/IR/Instructions.h", 2824, __extension__ __PRETTY_FUNCTION__ )); | |||
2825 | return removeIncomingValue(Idx, DeletePHIIfEmpty); | |||
2826 | } | |||
2827 | ||||
2828 | /// Return the first index of the specified basic | |||
2829 | /// block in the value list for this PHI. Returns -1 if no instance. | |||
2830 | /// | |||
2831 | int getBasicBlockIndex(const BasicBlock *BB) const { | |||
2832 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) | |||
2833 | if (block_begin()[i] == BB) | |||
2834 | return i; | |||
2835 | return -1; | |||
2836 | } | |||
2837 | ||||
2838 | Value *getIncomingValueForBlock(const BasicBlock *BB) const { | |||
2839 | int Idx = getBasicBlockIndex(BB); | |||
2840 | assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "llvm/include/llvm/IR/Instructions.h", 2840, __extension__ __PRETTY_FUNCTION__ )); | |||
2841 | return getIncomingValue(Idx); | |||
2842 | } | |||
2843 | ||||
2844 | /// Set every incoming value(s) for block \p BB to \p V. | |||
2845 | void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { | |||
2846 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2846, __extension__ __PRETTY_FUNCTION__ )); | |||
2847 | bool Found = false; | |||
2848 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) | |||
2849 | if (getIncomingBlock(Op) == BB) { | |||
2850 | Found = true; | |||
2851 | setIncomingValue(Op, V); | |||
2852 | } | |||
2853 | (void)Found; | |||
2854 | assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!" ) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\"" , "llvm/include/llvm/IR/Instructions.h", 2854, __extension__ __PRETTY_FUNCTION__ )); | |||
2855 | } | |||
2856 | ||||
2857 | /// If the specified PHI node always merges together the | |||
2858 | /// same value, return the value, otherwise return null. | |||
2859 | Value *hasConstantValue() const; | |||
2860 | ||||
2861 | /// Whether the specified PHI node always merges | |||
2862 | /// together the same value, assuming undefs are equal to a unique | |||
2863 | /// non-undef value. | |||
2864 | bool hasConstantOrUndefValue() const; | |||
2865 | ||||
2866 | /// If the PHI node is complete which means all of its parent's predecessors | |||
2867 | /// have incoming value in this PHI, return true, otherwise return false. | |||
2868 | bool isComplete() const { | |||
2869 | return llvm::all_of(predecessors(getParent()), | |||
2870 | [this](const BasicBlock *Pred) { | |||
2871 | return getBasicBlockIndex(Pred) >= 0; | |||
2872 | }); | |||
2873 | } | |||
2874 | ||||
2875 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2876 | static bool classof(const Instruction *I) { | |||
2877 | return I->getOpcode() == Instruction::PHI; | |||
2878 | } | |||
2879 | static bool classof(const Value *V) { | |||
2880 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2881 | } | |||
2882 | ||||
2883 | private: | |||
2884 | void growOperands(); | |||
2885 | }; | |||
2886 | ||||
2887 | template <> | |||
2888 | struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { | |||
2889 | }; | |||
2890 | ||||
2891 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits <PHINode>::op_begin(this); } PHINode::const_op_iterator PHINode::op_begin() const { return OperandTraits<PHINode> ::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator PHINode::op_end() { return OperandTraits<PHINode>::op_end (this); } PHINode::const_op_iterator PHINode::op_end() const { return OperandTraits<PHINode>::op_end(const_cast<PHINode *>(this)); } Value *PHINode::getOperand(unsigned i_nocapture ) const { (static_cast <bool> (i_nocapture < OperandTraits <PHINode>::operands(this) && "getOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2891, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<PHINode >::op_begin(const_cast<PHINode*>(this))[i_nocapture] .get()); } void PHINode::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<PHINode>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2891, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode::getNumOperands() const { return OperandTraits<PHINode>::operands(this); } template <int Idx_nocapture> Use &PHINode::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &PHINode::Op() const { return this->OpFrom <Idx_nocapture>(this); } | |||
2892 | ||||
2893 | //===----------------------------------------------------------------------===// | |||
2894 | // LandingPadInst Class | |||
2895 | //===----------------------------------------------------------------------===// | |||
2896 | ||||
2897 | //===--------------------------------------------------------------------------- | |||
2898 | /// The landingpad instruction holds all of the information | |||
2899 | /// necessary to generate correct exception handling. The landingpad instruction | |||
2900 | /// cannot be moved from the top of a landing pad block, which itself is | |||
2901 | /// accessible only from the 'unwind' edge of an invoke. This uses the | |||
2902 | /// SubclassData field in Value to store whether or not the landingpad is a | |||
2903 | /// cleanup. | |||
2904 | /// | |||
2905 | class LandingPadInst : public Instruction { | |||
2906 | using CleanupField = BoolBitfieldElementT<0>; | |||
2907 | ||||
2908 | /// The number of operands actually allocated. NumOperands is | |||
2909 | /// the number actually in use. | |||
2910 | unsigned ReservedSpace; | |||
2911 | ||||
2912 | LandingPadInst(const LandingPadInst &LP); | |||
2913 | ||||
2914 | public: | |||
2915 | enum ClauseType { Catch, Filter }; | |||
2916 | ||||
2917 | private: | |||
2918 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, | |||
2919 | const Twine &NameStr, Instruction *InsertBefore); | |||
2920 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, | |||
2921 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2922 | ||||
2923 | // Allocate space for exactly zero operands. | |||
2924 | void *operator new(size_t S) { return User::operator new(S); } | |||
2925 | ||||
2926 | void growOperands(unsigned Size); | |||
2927 | void init(unsigned NumReservedValues, const Twine &NameStr); | |||
2928 | ||||
2929 | protected: | |||
2930 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2931 | friend class Instruction; | |||
2932 | ||||
2933 | LandingPadInst *cloneImpl() const; | |||
2934 | ||||
2935 | public: | |||
2936 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
2937 | ||||
2938 | /// Constructors - NumReservedClauses is a hint for the number of incoming | |||
2939 | /// clauses that this landingpad will have (use 0 if you really have no idea). | |||
2940 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, | |||
2941 | const Twine &NameStr = "", | |||
2942 | Instruction *InsertBefore = nullptr); | |||
2943 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, | |||
2944 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2945 | ||||
2946 | /// Provide fast operand accessors | |||
2947 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2948 | ||||
2949 | /// Return 'true' if this landingpad instruction is a | |||
2950 | /// cleanup. I.e., it should be run when unwinding even if its landing pad | |||
2951 | /// doesn't catch the exception. | |||
2952 | bool isCleanup() const { return getSubclassData<CleanupField>(); } | |||
2953 | ||||
2954 | /// Indicate that this landingpad instruction is a cleanup. | |||
2955 | void setCleanup(bool V) { setSubclassData<CleanupField>(V); } | |||
2956 | ||||
2957 | /// Add a catch or filter clause to the landing pad. | |||
2958 | void addClause(Constant *ClauseVal); | |||
2959 | ||||
2960 | /// Get the value of the clause at index Idx. Use isCatch/isFilter to | |||
2961 | /// determine what type of clause this is. | |||
2962 | Constant *getClause(unsigned Idx) const { | |||
2963 | return cast<Constant>(getOperandList()[Idx]); | |||
2964 | } | |||
2965 | ||||
2966 | /// Return 'true' if the clause and index Idx is a catch clause. | |||
2967 | bool isCatch(unsigned Idx) const { | |||
2968 | return !isa<ArrayType>(getOperandList()[Idx]->getType()); | |||
2969 | } | |||
2970 | ||||
2971 | /// Return 'true' if the clause and index Idx is a filter clause. | |||
2972 | bool isFilter(unsigned Idx) const { | |||
2973 | return isa<ArrayType>(getOperandList()[Idx]->getType()); | |||
2974 | } | |||
2975 | ||||
2976 | /// Get the number of clauses for this landing pad. | |||
2977 | unsigned getNumClauses() const { return getNumOperands(); } | |||
2978 | ||||
2979 | /// Grow the size of the operand list to accommodate the new | |||
2980 | /// number of clauses. | |||
2981 | void reserveClauses(unsigned Size) { growOperands(Size); } | |||
2982 | ||||
2983 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2984 | static bool classof(const Instruction *I) { | |||
2985 | return I->getOpcode() == Instruction::LandingPad; | |||
2986 | } | |||
2987 | static bool classof(const Value *V) { | |||
2988 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2989 | } | |||
2990 | }; | |||
2991 | ||||
2992 | template <> | |||
2993 | struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { | |||
2994 | }; | |||
2995 | ||||
2996 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst ::const_op_iterator LandingPadInst::op_begin() const { return OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst ::op_end() { return OperandTraits<LandingPadInst>::op_end (this); } LandingPadInst::const_op_iterator LandingPadInst::op_end () const { return OperandTraits<LandingPadInst>::op_end (const_cast<LandingPadInst*>(this)); } Value *LandingPadInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<LandingPadInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2996, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<LandingPadInst >::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture ].get()); } void LandingPadInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<LandingPadInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2996, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned LandingPadInst::getNumOperands( ) const { return OperandTraits<LandingPadInst>::operands (this); } template <int Idx_nocapture> Use &LandingPadInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &LandingPadInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
2997 | ||||
2998 | //===----------------------------------------------------------------------===// | |||
2999 | // ReturnInst Class | |||
3000 | //===----------------------------------------------------------------------===// | |||
3001 | ||||
3002 | //===--------------------------------------------------------------------------- | |||
3003 | /// Return a value (possibly void), from a function. Execution | |||
3004 | /// does not continue in this function any longer. | |||
3005 | /// | |||
3006 | class ReturnInst : public Instruction { | |||
3007 | ReturnInst(const ReturnInst &RI); | |||
3008 | ||||
3009 | private: | |||
3010 | // ReturnInst constructors: | |||
3011 | // ReturnInst() - 'ret void' instruction | |||
3012 | // ReturnInst( null) - 'ret void' instruction | |||
3013 | // ReturnInst(Value* X) - 'ret X' instruction | |||
3014 | // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I | |||
3015 | // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I | |||
3016 | // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B | |||
3017 | // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B | |||
3018 | // | |||
3019 | // NOTE: If the Value* passed is of type void then the constructor behaves as | |||
3020 | // if it was passed NULL. | |||
3021 | explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, | |||
3022 | Instruction *InsertBefore = nullptr); | |||
3023 | ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); | |||
3024 | explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); | |||
3025 | ||||
3026 | protected: | |||
3027 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3028 | friend class Instruction; | |||
3029 | ||||
3030 | ReturnInst *cloneImpl() const; | |||
3031 | ||||
3032 | public: | |||
3033 | static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, | |||
3034 | Instruction *InsertBefore = nullptr) { | |||
3035 | return new(!!retVal) ReturnInst(C, retVal, InsertBefore); | |||
3036 | } | |||
3037 | ||||
3038 | static ReturnInst* Create(LLVMContext &C, Value *retVal, | |||
3039 | BasicBlock *InsertAtEnd) { | |||
3040 | return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); | |||
3041 | } | |||
3042 | ||||
3043 | static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { | |||
3044 | return new(0) ReturnInst(C, InsertAtEnd); | |||
3045 | } | |||
3046 | ||||
3047 | /// Provide fast operand accessors | |||
3048 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3049 | ||||
3050 | /// Convenience accessor. Returns null if there is no return value. | |||
3051 | Value *getReturnValue() const { | |||
3052 | return getNumOperands() != 0 ? getOperand(0) : nullptr; | |||
3053 | } | |||
3054 | ||||
3055 | unsigned getNumSuccessors() const { return 0; } | |||
3056 | ||||
3057 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3058 | static bool classof(const Instruction *I) { | |||
3059 | return (I->getOpcode() == Instruction::Ret); | |||
3060 | } | |||
3061 | static bool classof(const Value *V) { | |||
3062 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3063 | } | |||
3064 | ||||
3065 | private: | |||
3066 | BasicBlock *getSuccessor(unsigned idx) const { | |||
3067 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 3067); | |||
3068 | } | |||
3069 | ||||
3070 | void setSuccessor(unsigned idx, BasicBlock *B) { | |||
3071 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 3071); | |||
3072 | } | |||
3073 | }; | |||
3074 | ||||
3075 | template <> | |||
3076 | struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { | |||
3077 | }; | |||
3078 | ||||
3079 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits <ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator ReturnInst::op_begin() const { return OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst ::op_iterator ReturnInst::op_end() { return OperandTraits< ReturnInst>::op_end(this); } ReturnInst::const_op_iterator ReturnInst::op_end() const { return OperandTraits<ReturnInst >::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<ReturnInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3079, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this))[i_nocapture ].get()); } void ReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3079, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const { return OperandTraits<ReturnInst>::operands(this); } template <int Idx_nocapture> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ReturnInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
3080 | ||||
3081 | //===----------------------------------------------------------------------===// | |||
3082 | // BranchInst Class | |||
3083 | //===----------------------------------------------------------------------===// | |||
3084 | ||||
3085 | //===--------------------------------------------------------------------------- | |||
3086 | /// Conditional or Unconditional Branch instruction. | |||
3087 | /// | |||
3088 | class BranchInst : public Instruction { | |||
3089 | /// Ops list - Branches are strange. The operands are ordered: | |||
3090 | /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because | |||
3091 | /// they don't have to check for cond/uncond branchness. These are mostly | |||
3092 | /// accessed relative from op_end(). | |||
3093 | BranchInst(const BranchInst &BI); | |||
3094 | // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): | |||
3095 | // BranchInst(BB *B) - 'br B' | |||
3096 | // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' | |||
3097 | // BranchInst(BB* B, Inst *I) - 'br B' insert before I | |||
3098 | // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I | |||
3099 | // BranchInst(BB* B, BB *I) - 'br B' insert at end | |||
3100 | // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end | |||
3101 | explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); | |||
3102 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, | |||
3103 | Instruction *InsertBefore = nullptr); | |||
3104 | BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); | |||
3105 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, | |||
3106 | BasicBlock *InsertAtEnd); | |||
3107 | ||||
3108 | void AssertOK(); | |||
3109 | ||||
3110 | protected: | |||
3111 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3112 | friend class Instruction; | |||
3113 | ||||
3114 | BranchInst *cloneImpl() const; | |||
3115 | ||||
3116 | public: | |||
3117 | /// Iterator type that casts an operand to a basic block. | |||
3118 | /// | |||
3119 | /// This only makes sense because the successors are stored as adjacent | |||
3120 | /// operands for branch instructions. | |||
3121 | struct succ_op_iterator | |||
3122 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, | |||
3123 | std::random_access_iterator_tag, BasicBlock *, | |||
3124 | ptrdiff_t, BasicBlock *, BasicBlock *> { | |||
3125 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} | |||
3126 | ||||
3127 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3128 | BasicBlock *operator->() const { return operator*(); } | |||
3129 | }; | |||
3130 | ||||
3131 | /// The const version of `succ_op_iterator`. | |||
3132 | struct const_succ_op_iterator | |||
3133 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, | |||
3134 | std::random_access_iterator_tag, | |||
3135 | const BasicBlock *, ptrdiff_t, const BasicBlock *, | |||
3136 | const BasicBlock *> { | |||
3137 | explicit const_succ_op_iterator(const_value_op_iterator I) | |||
3138 | : iterator_adaptor_base(I) {} | |||
3139 | ||||
3140 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3141 | const BasicBlock *operator->() const { return operator*(); } | |||
3142 | }; | |||
3143 | ||||
3144 | static BranchInst *Create(BasicBlock *IfTrue, | |||
3145 | Instruction *InsertBefore = nullptr) { | |||
3146 | return new(1) BranchInst(IfTrue, InsertBefore); | |||
3147 | } | |||
3148 | ||||
3149 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, | |||
3150 | Value *Cond, Instruction *InsertBefore = nullptr) { | |||
3151 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); | |||
3152 | } | |||
3153 | ||||
3154 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { | |||
3155 | return new(1) BranchInst(IfTrue, InsertAtEnd); | |||
3156 | } | |||
3157 | ||||
3158 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, | |||
3159 | Value *Cond, BasicBlock *InsertAtEnd) { | |||
3160 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); | |||
3161 | } | |||
3162 | ||||
3163 | /// Transparently provide more efficient getOperand methods. | |||
3164 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3165 | ||||
3166 | bool isUnconditional() const { return getNumOperands() == 1; } | |||
3167 | bool isConditional() const { return getNumOperands() == 3; } | |||
3168 | ||||
3169 | Value *getCondition() const { | |||
3170 | assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!" ) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3170, __extension__ __PRETTY_FUNCTION__ )); | |||
3171 | return Op<-3>(); | |||
3172 | } | |||
3173 | ||||
3174 | void setCondition(Value *V) { | |||
3175 | assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!" ) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3175, __extension__ __PRETTY_FUNCTION__ )); | |||
3176 | Op<-3>() = V; | |||
3177 | } | |||
3178 | ||||
3179 | unsigned getNumSuccessors() const { return 1+isConditional(); } | |||
3180 | ||||
3181 | BasicBlock *getSuccessor(unsigned i) const { | |||
3182 | assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() && "Successor # out of range for Branch!") ? void (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3182, __extension__ __PRETTY_FUNCTION__ )); | |||
3183 | return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); | |||
3184 | } | |||
3185 | ||||
3186 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { | |||
3187 | assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor # out of range for Branch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3187, __extension__ __PRETTY_FUNCTION__ )); | |||
3188 | *(&Op<-1>() - idx) = NewSucc; | |||
3189 | } | |||
3190 | ||||
3191 | /// Swap the successors of this branch instruction. | |||
3192 | /// | |||
3193 | /// Swaps the successors of the branch instruction. This also swaps any | |||
3194 | /// branch weight metadata associated with the instruction so that it | |||
3195 | /// continues to map correctly to each operand. | |||
3196 | void swapSuccessors(); | |||
3197 | ||||
3198 | iterator_range<succ_op_iterator> successors() { | |||
3199 | return make_range( | |||
3200 | succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), | |||
3201 | succ_op_iterator(value_op_end())); | |||
3202 | } | |||
3203 | ||||
3204 | iterator_range<const_succ_op_iterator> successors() const { | |||
3205 | return make_range(const_succ_op_iterator( | |||
3206 | std::next(value_op_begin(), isConditional() ? 1 : 0)), | |||
3207 | const_succ_op_iterator(value_op_end())); | |||
3208 | } | |||
3209 | ||||
3210 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3211 | static bool classof(const Instruction *I) { | |||
3212 | return (I->getOpcode() == Instruction::Br); | |||
3213 | } | |||
3214 | static bool classof(const Value *V) { | |||
3215 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3216 | } | |||
3217 | }; | |||
3218 | ||||
3219 | template <> | |||
3220 | struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { | |||
3221 | }; | |||
3222 | ||||
3223 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits <BranchInst>::op_begin(this); } BranchInst::const_op_iterator BranchInst::op_begin() const { return OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this)); } BranchInst ::op_iterator BranchInst::op_end() { return OperandTraits< BranchInst>::op_end(this); } BranchInst::const_op_iterator BranchInst::op_end() const { return OperandTraits<BranchInst >::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<BranchInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3223, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this))[i_nocapture ].get()); } void BranchInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3223, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<BranchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned BranchInst::getNumOperands() const { return OperandTraits<BranchInst>::operands(this); } template <int Idx_nocapture> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &BranchInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
3224 | ||||
3225 | //===----------------------------------------------------------------------===// | |||
3226 | // SwitchInst Class | |||
3227 | //===----------------------------------------------------------------------===// | |||
3228 | ||||
3229 | //===--------------------------------------------------------------------------- | |||
3230 | /// Multiway switch | |||
3231 | /// | |||
3232 | class SwitchInst : public Instruction { | |||
3233 | unsigned ReservedSpace; | |||
3234 | ||||
3235 | // Operand[0] = Value to switch on | |||
3236 | // Operand[1] = Default basic block destination | |||
3237 | // Operand[2n ] = Value to match | |||
3238 | // Operand[2n+1] = BasicBlock to go to on match | |||
3239 | SwitchInst(const SwitchInst &SI); | |||
3240 | ||||
3241 | /// Create a new switch instruction, specifying a value to switch on and a | |||
3242 | /// default destination. The number of additional cases can be specified here | |||
3243 | /// to make memory allocation more efficient. This constructor can also | |||
3244 | /// auto-insert before another instruction. | |||
3245 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, | |||
3246 | Instruction *InsertBefore); | |||
3247 | ||||
3248 | /// Create a new switch instruction, specifying a value to switch on and a | |||
3249 | /// default destination. The number of additional cases can be specified here | |||
3250 | /// to make memory allocation more efficient. This constructor also | |||
3251 | /// auto-inserts at the end of the specified BasicBlock. | |||
3252 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, | |||
3253 | BasicBlock *InsertAtEnd); | |||
3254 | ||||
3255 | // allocate space for exactly zero operands | |||
3256 | void *operator new(size_t S) { return User::operator new(S); } | |||
3257 | ||||
3258 | void init(Value *Value, BasicBlock *Default, unsigned NumReserved); | |||
3259 | void growOperands(); | |||
3260 | ||||
3261 | protected: | |||
3262 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3263 | friend class Instruction; | |||
3264 | ||||
3265 | SwitchInst *cloneImpl() const; | |||
3266 | ||||
3267 | public: | |||
3268 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
3269 | ||||
3270 | // -2 | |||
3271 | static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); | |||
3272 | ||||
3273 | template <typename CaseHandleT> class CaseIteratorImpl; | |||
3274 | ||||
3275 | /// A handle to a particular switch case. It exposes a convenient interface | |||
3276 | /// to both the case value and the successor block. | |||
3277 | /// | |||
3278 | /// We define this as a template and instantiate it to form both a const and | |||
3279 | /// non-const handle. | |||
3280 | template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> | |||
3281 | class CaseHandleImpl { | |||
3282 | // Directly befriend both const and non-const iterators. | |||
3283 | friend class SwitchInst::CaseIteratorImpl< | |||
3284 | CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; | |||
3285 | ||||
3286 | protected: | |||
3287 | // Expose the switch type we're parameterized with to the iterator. | |||
3288 | using SwitchInstType = SwitchInstT; | |||
3289 | ||||
3290 | SwitchInstT *SI; | |||
3291 | ptrdiff_t Index; | |||
3292 | ||||
3293 | CaseHandleImpl() = default; | |||
3294 | CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} | |||
3295 | ||||
3296 | public: | |||
3297 | /// Resolves case value for current case. | |||
3298 | ConstantIntT *getCaseValue() const { | |||
3299 | assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3300, __extension__ __PRETTY_FUNCTION__ )) | |||
3300 | "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3300, __extension__ __PRETTY_FUNCTION__ )); | |||
3301 | return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); | |||
3302 | } | |||
3303 | ||||
3304 | /// Resolves successor for current case. | |||
3305 | BasicBlockT *getCaseSuccessor() const { | |||
3306 | assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3308, __extension__ __PRETTY_FUNCTION__ )) | |||
3307 | (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3308, __extension__ __PRETTY_FUNCTION__ )) | |||
3308 | "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3308, __extension__ __PRETTY_FUNCTION__ )); | |||
3309 | return SI->getSuccessor(getSuccessorIndex()); | |||
3310 | } | |||
3311 | ||||
3312 | /// Returns number of current case. | |||
3313 | unsigned getCaseIndex() const { return Index; } | |||
3314 | ||||
3315 | /// Returns successor index for current case successor. | |||
3316 | unsigned getSuccessorIndex() const { | |||
3317 | assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3319, __extension__ __PRETTY_FUNCTION__ )) | |||
3318 | (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3319, __extension__ __PRETTY_FUNCTION__ )) | |||
3319 | "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3319, __extension__ __PRETTY_FUNCTION__ )); | |||
3320 | return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; | |||
3321 | } | |||
3322 | ||||
3323 | bool operator==(const CaseHandleImpl &RHS) const { | |||
3324 | assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3324, __extension__ __PRETTY_FUNCTION__ )); | |||
3325 | return Index == RHS.Index; | |||
3326 | } | |||
3327 | }; | |||
3328 | ||||
3329 | using ConstCaseHandle = | |||
3330 | CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; | |||
3331 | ||||
3332 | class CaseHandle | |||
3333 | : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { | |||
3334 | friend class SwitchInst::CaseIteratorImpl<CaseHandle>; | |||
3335 | ||||
3336 | public: | |||
3337 | CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} | |||
3338 | ||||
3339 | /// Sets the new value for current case. | |||
3340 | void setValue(ConstantInt *V) const { | |||
3341 | assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3342, __extension__ __PRETTY_FUNCTION__ )) | |||
3342 | "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3342, __extension__ __PRETTY_FUNCTION__ )); | |||
3343 | SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); | |||
3344 | } | |||
3345 | ||||
3346 | /// Sets the new successor for current case. | |||
3347 | void setSuccessor(BasicBlock *S) const { | |||
3348 | SI->setSuccessor(getSuccessorIndex(), S); | |||
3349 | } | |||
3350 | }; | |||
3351 | ||||
3352 | template <typename CaseHandleT> | |||
3353 | class CaseIteratorImpl | |||
3354 | : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, | |||
3355 | std::random_access_iterator_tag, | |||
3356 | const CaseHandleT> { | |||
3357 | using SwitchInstT = typename CaseHandleT::SwitchInstType; | |||
3358 | ||||
3359 | CaseHandleT Case; | |||
3360 | ||||
3361 | public: | |||
3362 | /// Default constructed iterator is in an invalid state until assigned to | |||
3363 | /// a case for a particular switch. | |||
3364 | CaseIteratorImpl() = default; | |||
3365 | ||||
3366 | /// Initializes case iterator for given SwitchInst and for given | |||
3367 | /// case number. | |||
3368 | CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} | |||
3369 | ||||
3370 | /// Initializes case iterator for given SwitchInst and for given | |||
3371 | /// successor index. | |||
3372 | static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, | |||
3373 | unsigned SuccessorIndex) { | |||
3374 | assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors () && "Successor index # out of range!") ? void (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3375, __extension__ __PRETTY_FUNCTION__ )) | |||
3375 | "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors () && "Successor index # out of range!") ? void (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3375, __extension__ __PRETTY_FUNCTION__ )); | |||
3376 | return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) | |||
3377 | : CaseIteratorImpl(SI, DefaultPseudoIndex); | |||
3378 | } | |||
3379 | ||||
3380 | /// Support converting to the const variant. This will be a no-op for const | |||
3381 | /// variant. | |||
3382 | operator CaseIteratorImpl<ConstCaseHandle>() const { | |||
3383 | return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); | |||
3384 | } | |||
3385 | ||||
3386 | CaseIteratorImpl &operator+=(ptrdiff_t N) { | |||
3387 | // Check index correctness after addition. | |||
3388 | // Note: Index == getNumCases() means end(). | |||
3389 | assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3391, __extension__ __PRETTY_FUNCTION__ )) | |||
3390 | (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3391, __extension__ __PRETTY_FUNCTION__ )) | |||
3391 | "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3391, __extension__ __PRETTY_FUNCTION__ )); | |||
3392 | Case.Index += N; | |||
3393 | return *this; | |||
3394 | } | |||
3395 | CaseIteratorImpl &operator-=(ptrdiff_t N) { | |||
3396 | // Check index correctness after subtraction. | |||
3397 | // Note: Case.Index == getNumCases() means end(). | |||
3398 | assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3400, __extension__ __PRETTY_FUNCTION__ )) | |||
3399 | (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3400, __extension__ __PRETTY_FUNCTION__ )) | |||
3400 | "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3400, __extension__ __PRETTY_FUNCTION__ )); | |||
3401 | Case.Index -= N; | |||
3402 | return *this; | |||
3403 | } | |||
3404 | ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { | |||
3405 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3405, __extension__ __PRETTY_FUNCTION__ )); | |||
3406 | return Case.Index - RHS.Case.Index; | |||
3407 | } | |||
3408 | bool operator==(const CaseIteratorImpl &RHS) const { | |||
3409 | return Case == RHS.Case; | |||
3410 | } | |||
3411 | bool operator<(const CaseIteratorImpl &RHS) const { | |||
3412 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3412, __extension__ __PRETTY_FUNCTION__ )); | |||
3413 | return Case.Index < RHS.Case.Index; | |||
3414 | } | |||
3415 | const CaseHandleT &operator*() const { return Case; } | |||
3416 | }; | |||
3417 | ||||
3418 | using CaseIt = CaseIteratorImpl<CaseHandle>; | |||
3419 | using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; | |||
3420 | ||||
3421 | static SwitchInst *Create(Value *Value, BasicBlock *Default, | |||
3422 | unsigned NumCases, | |||
3423 | Instruction *InsertBefore = nullptr) { | |||
3424 | return new SwitchInst(Value, Default, NumCases, InsertBefore); | |||
3425 | } | |||
3426 | ||||
3427 | static SwitchInst *Create(Value *Value, BasicBlock *Default, | |||
3428 | unsigned NumCases, BasicBlock *InsertAtEnd) { | |||
3429 | return new SwitchInst(Value, Default, NumCases, InsertAtEnd); | |||
3430 | } | |||
3431 | ||||
3432 | /// Provide fast operand accessors | |||
3433 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3434 | ||||
3435 | // Accessor Methods for Switch stmt | |||
3436 | Value *getCondition() const { return getOperand(0); } | |||
3437 | void setCondition(Value *V) { setOperand(0, V); } | |||
3438 | ||||
3439 | BasicBlock *getDefaultDest() const { | |||
3440 | return cast<BasicBlock>(getOperand(1)); | |||
3441 | } | |||
3442 | ||||
3443 | void setDefaultDest(BasicBlock *DefaultCase) { | |||
3444 | setOperand(1, reinterpret_cast<Value*>(DefaultCase)); | |||
3445 | } | |||
3446 | ||||
3447 | /// Return the number of 'cases' in this switch instruction, excluding the | |||
3448 | /// default case. | |||
3449 | unsigned getNumCases() const { | |||
3450 | return getNumOperands()/2 - 1; | |||
3451 | } | |||
3452 | ||||
3453 | /// Returns a read/write iterator that points to the first case in the | |||
3454 | /// SwitchInst. | |||
3455 | CaseIt case_begin() { | |||
3456 | return CaseIt(this, 0); | |||
3457 | } | |||
3458 | ||||
3459 | /// Returns a read-only iterator that points to the first case in the | |||
3460 | /// SwitchInst. | |||
3461 | ConstCaseIt case_begin() const { | |||
3462 | return ConstCaseIt(this, 0); | |||
3463 | } | |||
3464 | ||||
3465 | /// Returns a read/write iterator that points one past the last in the | |||
3466 | /// SwitchInst. | |||
3467 | CaseIt case_end() { | |||
3468 | return CaseIt(this, getNumCases()); | |||
3469 | } | |||
3470 | ||||
3471 | /// Returns a read-only iterator that points one past the last in the | |||
3472 | /// SwitchInst. | |||
3473 | ConstCaseIt case_end() const { | |||
3474 | return ConstCaseIt(this, getNumCases()); | |||
3475 | } | |||
3476 | ||||
3477 | /// Iteration adapter for range-for loops. | |||
3478 | iterator_range<CaseIt> cases() { | |||
3479 | return make_range(case_begin(), case_end()); | |||
3480 | } | |||
3481 | ||||
3482 | /// Constant iteration adapter for range-for loops. | |||
3483 | iterator_range<ConstCaseIt> cases() const { | |||
3484 | return make_range(case_begin(), case_end()); | |||
3485 | } | |||
3486 | ||||
3487 | /// Returns an iterator that points to the default case. | |||
3488 | /// Note: this iterator allows to resolve successor only. Attempt | |||
3489 | /// to resolve case value causes an assertion. | |||
3490 | /// Also note, that increment and decrement also causes an assertion and | |||
3491 | /// makes iterator invalid. | |||
3492 | CaseIt case_default() { | |||
3493 | return CaseIt(this, DefaultPseudoIndex); | |||
3494 | } | |||
3495 | ConstCaseIt case_default() const { | |||
3496 | return ConstCaseIt(this, DefaultPseudoIndex); | |||
3497 | } | |||
3498 | ||||
3499 | /// Search all of the case values for the specified constant. If it is | |||
3500 | /// explicitly handled, return the case iterator of it, otherwise return | |||
3501 | /// default case iterator to indicate that it is handled by the default | |||
3502 | /// handler. | |||
3503 | CaseIt findCaseValue(const ConstantInt *C) { | |||
3504 | return CaseIt( | |||
3505 | this, | |||
3506 | const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); | |||
3507 | } | |||
3508 | ConstCaseIt findCaseValue(const ConstantInt *C) const { | |||
3509 | ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { | |||
3510 | return Case.getCaseValue() == C; | |||
3511 | }); | |||
3512 | if (I != case_end()) | |||
3513 | return I; | |||
3514 | ||||
3515 | return case_default(); | |||
3516 | } | |||
3517 | ||||
3518 | /// Finds the unique case value for a given successor. Returns null if the | |||
3519 | /// successor is not found, not unique, or is the default case. | |||
3520 | ConstantInt *findCaseDest(BasicBlock *BB) { | |||
3521 | if (BB == getDefaultDest()) | |||
3522 | return nullptr; | |||
3523 | ||||
3524 | ConstantInt *CI = nullptr; | |||
3525 | for (auto Case : cases()) { | |||
3526 | if (Case.getCaseSuccessor() != BB) | |||
3527 | continue; | |||
3528 | ||||
3529 | if (CI) | |||
3530 | return nullptr; // Multiple cases lead to BB. | |||
3531 | ||||
3532 | CI = Case.getCaseValue(); | |||
3533 | } | |||
3534 | ||||
3535 | return CI; | |||
3536 | } | |||
3537 | ||||
3538 | /// Add an entry to the switch instruction. | |||
3539 | /// Note: | |||
3540 | /// This action invalidates case_end(). Old case_end() iterator will | |||
3541 | /// point to the added case. | |||
3542 | void addCase(ConstantInt *OnVal, BasicBlock *Dest); | |||
3543 | ||||
3544 | /// This method removes the specified case and its successor from the switch | |||
3545 | /// instruction. Note that this operation may reorder the remaining cases at | |||
3546 | /// index idx and above. | |||
3547 | /// Note: | |||
3548 | /// This action invalidates iterators for all cases following the one removed, | |||
3549 | /// including the case_end() iterator. It returns an iterator for the next | |||
3550 | /// case. | |||
3551 | CaseIt removeCase(CaseIt I); | |||
3552 | ||||
3553 | unsigned getNumSuccessors() const { return getNumOperands()/2; } | |||
3554 | BasicBlock *getSuccessor(unsigned idx) const { | |||
3555 | assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor idx out of range for switch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\"" , "llvm/include/llvm/IR/Instructions.h", 3555, __extension__ __PRETTY_FUNCTION__ )); | |||
3556 | return cast<BasicBlock>(getOperand(idx*2+1)); | |||
3557 | } | |||
3558 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { | |||
3559 | assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor # out of range for switch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\"" , "llvm/include/llvm/IR/Instructions.h", 3559, __extension__ __PRETTY_FUNCTION__ )); | |||
3560 | setOperand(idx * 2 + 1, NewSucc); | |||
3561 | } | |||
3562 | ||||
3563 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3564 | static bool classof(const Instruction *I) { | |||
3565 | return I->getOpcode() == Instruction::Switch; | |||
3566 | } | |||
3567 | static bool classof(const Value *V) { | |||
3568 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3569 | } | |||
3570 | }; | |||
3571 | ||||
3572 | /// A wrapper class to simplify modification of SwitchInst cases along with | |||
3573 | /// their prof branch_weights metadata. | |||
3574 | class SwitchInstProfUpdateWrapper { | |||
3575 | SwitchInst &SI; | |||
3576 | Optional<SmallVector<uint32_t, 8> > Weights = None; | |||
3577 | bool Changed = false; | |||
3578 | ||||
3579 | protected: | |||
3580 | static MDNode *getProfBranchWeightsMD(const SwitchInst &SI); | |||
3581 | ||||
3582 | MDNode *buildProfBranchWeightsMD(); | |||
3583 | ||||
3584 | void init(); | |||
3585 | ||||
3586 | public: | |||
3587 | using CaseWeightOpt = Optional<uint32_t>; | |||
3588 | SwitchInst *operator->() { return &SI; } | |||
3589 | SwitchInst &operator*() { return SI; } | |||
3590 | operator SwitchInst *() { return &SI; } | |||
3591 | ||||
3592 | SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } | |||
3593 | ||||
3594 | ~SwitchInstProfUpdateWrapper() { | |||
3595 | if (Changed) | |||
3596 | SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); | |||
3597 | } | |||
3598 | ||||
3599 | /// Delegate the call to the underlying SwitchInst::removeCase() and remove | |||
3600 | /// correspondent branch weight. | |||
3601 | SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); | |||
3602 | ||||
3603 | /// Delegate the call to the underlying SwitchInst::addCase() and set the | |||
3604 | /// specified branch weight for the added case. | |||
3605 | void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); | |||
3606 | ||||
3607 | /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark | |||
3608 | /// this object to not touch the underlying SwitchInst in destructor. | |||
3609 | SymbolTableList<Instruction>::iterator eraseFromParent(); | |||
3610 | ||||
3611 | void setSuccessorWeight(unsigned idx, CaseWeightOpt W); | |||
3612 | CaseWeightOpt getSuccessorWeight(unsigned idx); | |||
3613 | ||||
3614 | static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); | |||
3615 | }; | |||
3616 | ||||
3617 | template <> | |||
3618 | struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { | |||
3619 | }; | |||
3620 | ||||
3621 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits <SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator SwitchInst::op_begin() const { return OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst ::op_iterator SwitchInst::op_end() { return OperandTraits< SwitchInst>::op_end(this); } SwitchInst::const_op_iterator SwitchInst::op_end() const { return OperandTraits<SwitchInst >::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<SwitchInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3621, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this))[i_nocapture ].get()); } void SwitchInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3621, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const { return OperandTraits<SwitchInst>::operands(this); } template <int Idx_nocapture> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &SwitchInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
3622 | ||||
3623 | //===----------------------------------------------------------------------===// | |||
3624 | // IndirectBrInst Class | |||
3625 | //===----------------------------------------------------------------------===// | |||
3626 | ||||
3627 | //===--------------------------------------------------------------------------- | |||
3628 | /// Indirect Branch Instruction. | |||
3629 | /// | |||
3630 | class IndirectBrInst : public Instruction { | |||
3631 | unsigned ReservedSpace; | |||
3632 | ||||
3633 | // Operand[0] = Address to jump to | |||
3634 | // Operand[n+1] = n-th destination | |||
3635 | IndirectBrInst(const IndirectBrInst &IBI); | |||
3636 | ||||
3637 | /// Create a new indirectbr instruction, specifying an | |||
3638 | /// Address to jump to. The number of expected destinations can be specified | |||
3639 | /// here to make memory allocation more efficient. This constructor can also | |||
3640 | /// autoinsert before another instruction. | |||
3641 | IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); | |||
3642 | ||||
3643 | /// Create a new indirectbr instruction, specifying an | |||
3644 | /// Address to jump to. The number of expected destinations can be specified | |||
3645 | /// here to make memory allocation more efficient. This constructor also | |||
3646 | /// autoinserts at the end of the specified BasicBlock. | |||
3647 | IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); | |||
3648 | ||||
3649 | // allocate space for exactly zero operands | |||
3650 | void *operator new(size_t S) { return User::operator new(S); } | |||
3651 | ||||
3652 | void init(Value *Address, unsigned NumDests); | |||
3653 | void growOperands(); | |||
3654 | ||||
3655 | protected: | |||
3656 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3657 | friend class Instruction; | |||
3658 | ||||
3659 | IndirectBrInst *cloneImpl() const; | |||
3660 | ||||
3661 | public: | |||
3662 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
3663 | ||||
3664 | /// Iterator type that casts an operand to a basic block. | |||
3665 | /// | |||
3666 | /// This only makes sense because the successors are stored as adjacent | |||
3667 | /// operands for indirectbr instructions. | |||
3668 | struct succ_op_iterator | |||
3669 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, | |||
3670 | std::random_access_iterator_tag, BasicBlock *, | |||
3671 | ptrdiff_t, BasicBlock *, BasicBlock *> { | |||
3672 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} | |||
3673 | ||||
3674 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3675 | BasicBlock *operator->() const { return operator*(); } | |||
3676 | }; | |||
3677 | ||||
3678 | /// The const version of `succ_op_iterator`. | |||
3679 | struct const_succ_op_iterator | |||
3680 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, | |||
3681 | std::random_access_iterator_tag, | |||
3682 | const BasicBlock *, ptrdiff_t, const BasicBlock *, | |||
3683 | const BasicBlock *> { | |||
3684 | explicit const_succ_op_iterator(const_value_op_iterator I) | |||
3685 | : iterator_adaptor_base(I) {} | |||
3686 | ||||
3687 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3688 | const BasicBlock *operator->() const { return operator*(); } | |||
3689 | }; | |||
3690 | ||||
3691 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, | |||
3692 | Instruction *InsertBefore = nullptr) { | |||
3693 | return new IndirectBrInst(Address, NumDests, InsertBefore); | |||
3694 | } | |||
3695 | ||||
3696 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, | |||
3697 | BasicBlock *InsertAtEnd) { | |||
3698 | return new IndirectBrInst(Address, NumDests, InsertAtEnd); | |||
3699 | } | |||
3700 | ||||
3701 | /// Provide fast operand accessors. | |||
3702 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3703 | ||||
3704 | // Accessor Methods for IndirectBrInst instruction. | |||
3705 | Value *getAddress() { return getOperand(0); } | |||
3706 | const Value *getAddress() const { return getOperand(0); } | |||
3707 | void setAddress(Value *V) { setOperand(0, V); } | |||
3708 | ||||
3709 | /// return the number of possible destinations in this | |||
3710 | /// indirectbr instruction. | |||
3711 | unsigned getNumDestinations() const { return getNumOperands()-1; } | |||
3712 | ||||
3713 | /// Return the specified destination. | |||
3714 | BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } | |||
3715 | const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } | |||
3716 | ||||
3717 | /// Add a destination. | |||
3718 | /// | |||
3719 | void addDestination(BasicBlock *Dest); | |||
3720 | ||||
3721 | /// This method removes the specified successor from the | |||
3722 | /// indirectbr instruction. | |||
3723 | void removeDestination(unsigned i); | |||
3724 | ||||
3725 | unsigned getNumSuccessors() const { return getNumOperands()-1; } | |||
3726 | BasicBlock *getSuccessor(unsigned i) const { | |||
3727 | return cast<BasicBlock>(getOperand(i+1)); | |||
3728 | } | |||
3729 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { | |||
3730 | setOperand(i + 1, NewSucc); | |||
3731 | } | |||
3732 | ||||
3733 | iterator_range<succ_op_iterator> successors() { | |||
3734 | return make_range(succ_op_iterator(std::next(value_op_begin())), | |||
3735 | succ_op_iterator(value_op_end())); | |||
3736 | } | |||
3737 | ||||
3738 | iterator_range<const_succ_op_iterator> successors() const { | |||
3739 | return make_range(const_succ_op_iterator(std::next(value_op_begin())), | |||
3740 | const_succ_op_iterator(value_op_end())); | |||
3741 | } | |||
3742 | ||||
3743 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3744 | static bool classof(const Instruction *I) { | |||
3745 | return I->getOpcode() == Instruction::IndirectBr; | |||
3746 | } | |||
3747 | static bool classof(const Value *V) { | |||
3748 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3749 | } | |||
3750 | }; | |||
3751 | ||||
3752 | template <> | |||
3753 | struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { | |||
3754 | }; | |||
3755 | ||||
3756 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst ::const_op_iterator IndirectBrInst::op_begin() const { return OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst ::op_end() { return OperandTraits<IndirectBrInst>::op_end (this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end () const { return OperandTraits<IndirectBrInst>::op_end (const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<IndirectBrInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3756, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<IndirectBrInst >::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture ].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3756, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands( ) const { return OperandTraits<IndirectBrInst>::operands (this); } template <int Idx_nocapture> Use &IndirectBrInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &IndirectBrInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
3757 | ||||
3758 | //===----------------------------------------------------------------------===// | |||
3759 | // InvokeInst Class | |||
3760 | //===----------------------------------------------------------------------===// | |||
3761 | ||||
3762 | /// Invoke instruction. The SubclassData field is used to hold the | |||
3763 | /// calling convention of the call. | |||
3764 | /// | |||
3765 | class InvokeInst : public CallBase { | |||
3766 | /// The number of operands for this call beyond the called function, | |||
3767 | /// arguments, and operand bundles. | |||
3768 | static constexpr int NumExtraOperands = 2; | |||
3769 | ||||
3770 | /// The index from the end of the operand array to the normal destination. | |||
3771 | static constexpr int NormalDestOpEndIdx = -3; | |||
3772 | ||||
3773 | /// The index from the end of the operand array to the unwind destination. | |||
3774 | static constexpr int UnwindDestOpEndIdx = -2; | |||
3775 | ||||
3776 | InvokeInst(const InvokeInst &BI); | |||
3777 | ||||
3778 | /// Construct an InvokeInst given a range of arguments. | |||
3779 | /// | |||
3780 | /// Construct an InvokeInst from a range of arguments | |||
3781 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3782 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3783 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3784 | const Twine &NameStr, Instruction *InsertBefore); | |||
3785 | ||||
3786 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3787 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3788 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3789 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
3790 | ||||
3791 | void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3792 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3793 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); | |||
3794 | ||||
3795 | /// Compute the number of operands to allocate. | |||
3796 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { | |||
3797 | // We need one operand for the called function, plus our extra operands and | |||
3798 | // the input operand counts provided. | |||
3799 | return 1 + NumExtraOperands + NumArgs + NumBundleInputs; | |||
3800 | } | |||
3801 | ||||
3802 | protected: | |||
3803 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3804 | friend class Instruction; | |||
3805 | ||||
3806 | InvokeInst *cloneImpl() const; | |||
3807 | ||||
3808 | public: | |||
3809 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3810 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3811 | const Twine &NameStr, | |||
3812 | Instruction *InsertBefore = nullptr) { | |||
3813 | int NumOperands = ComputeNumOperands(Args.size()); | |||
3814 | return new (NumOperands) | |||
3815 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, | |||
3816 | NameStr, InsertBefore); | |||
3817 | } | |||
3818 | ||||
3819 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3820 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3821 | ArrayRef<OperandBundleDef> Bundles = None, | |||
3822 | const Twine &NameStr = "", | |||
3823 | Instruction *InsertBefore = nullptr) { | |||
3824 | int NumOperands = | |||
3825 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
3826 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
3827 | ||||
3828 | return new (NumOperands, DescriptorBytes) | |||
3829 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, | |||
3830 | NameStr, InsertBefore); | |||
3831 | } | |||
3832 | ||||
3833 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3834 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3835 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3836 | int NumOperands = ComputeNumOperands(Args.size()); | |||
3837 | return new (NumOperands) | |||
3838 | InvokeInst(Ty, Func, IfNormal, IfException, Args, None, NumOperands, | |||
3839 | NameStr, InsertAtEnd); | |||
3840 | } | |||
3841 | ||||
3842 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3843 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3844 | ArrayRef<OperandBundleDef> Bundles, | |||
3845 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3846 | int NumOperands = | |||
3847 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
3848 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
3849 | ||||
3850 | return new (NumOperands, DescriptorBytes) | |||
3851 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, | |||
3852 | NameStr, InsertAtEnd); | |||
3853 | } | |||
3854 | ||||
3855 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3856 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3857 | const Twine &NameStr, | |||
3858 | Instruction *InsertBefore = nullptr) { | |||
3859 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3860 | IfException, Args, None, NameStr, InsertBefore); | |||
3861 | } | |||
3862 | ||||
3863 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3864 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3865 | ArrayRef<OperandBundleDef> Bundles = None, | |||
3866 | const Twine &NameStr = "", | |||
3867 | Instruction *InsertBefore = nullptr) { | |||
3868 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3869 | IfException, Args, Bundles, NameStr, InsertBefore); | |||
3870 | } | |||
3871 | ||||
3872 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3873 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3874 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3875 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3876 | IfException, Args, NameStr, InsertAtEnd); | |||
3877 | } | |||
3878 | ||||
3879 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3880 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3881 | ArrayRef<OperandBundleDef> Bundles, | |||
3882 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3883 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3884 | IfException, Args, Bundles, NameStr, InsertAtEnd); | |||
3885 | } | |||
3886 | ||||
3887 | /// Create a clone of \p II with a different set of operand bundles and | |||
3888 | /// insert it before \p InsertPt. | |||
3889 | /// | |||
3890 | /// The returned invoke instruction is identical to \p II in every way except | |||
3891 | /// that the operand bundles for the new instruction are set to the operand | |||
3892 | /// bundles in \p Bundles. | |||
3893 | static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, | |||
3894 | Instruction *InsertPt = nullptr); | |||
3895 | ||||
3896 | // get*Dest - Return the destination basic blocks... | |||
3897 | BasicBlock *getNormalDest() const { | |||
3898 | return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); | |||
3899 | } | |||
3900 | BasicBlock *getUnwindDest() const { | |||
3901 | return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); | |||
3902 | } | |||
3903 | void setNormalDest(BasicBlock *B) { | |||
3904 | Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); | |||
3905 | } | |||
3906 | void setUnwindDest(BasicBlock *B) { | |||
3907 | Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); | |||
3908 | } | |||
3909 | ||||
3910 | /// Get the landingpad instruction from the landing pad | |||
3911 | /// block (the unwind destination). | |||
3912 | LandingPadInst *getLandingPadInst() const; | |||
3913 | ||||
3914 | BasicBlock *getSuccessor(unsigned i) const { | |||
3915 | assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!" ) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "llvm/include/llvm/IR/Instructions.h", 3915, __extension__ __PRETTY_FUNCTION__ )); | |||
3916 | return i == 0 ? getNormalDest() : getUnwindDest(); | |||
3917 | } | |||
3918 | ||||
3919 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { | |||
3920 | assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!" ) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "llvm/include/llvm/IR/Instructions.h", 3920, __extension__ __PRETTY_FUNCTION__ )); | |||
3921 | if (i == 0) | |||
3922 | setNormalDest(NewSucc); | |||
3923 | else | |||
3924 | setUnwindDest(NewSucc); | |||
3925 | } | |||
3926 | ||||
3927 | unsigned getNumSuccessors() const { return 2; } | |||
3928 | ||||
3929 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3930 | static bool classof(const Instruction *I) { | |||
3931 | return (I->getOpcode() == Instruction::Invoke); | |||
3932 | } | |||
3933 | static bool classof(const Value *V) { | |||
3934 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3935 | } | |||
3936 | ||||
3937 | private: | |||
3938 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
3939 | // method so that subclasses cannot accidentally use it. | |||
3940 | template <typename Bitfield> | |||
3941 | void setSubclassData(typename Bitfield::Type Value) { | |||
3942 | Instruction::setSubclassData<Bitfield>(Value); | |||
3943 | } | |||
3944 | }; | |||
3945 | ||||
3946 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3947 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3948 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3949 | const Twine &NameStr, Instruction *InsertBefore) | |||
3950 | : CallBase(Ty->getReturnType(), Instruction::Invoke, | |||
3951 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
3952 | InsertBefore) { | |||
3953 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); | |||
3954 | } | |||
3955 | ||||
3956 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3957 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3958 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3959 | const Twine &NameStr, BasicBlock *InsertAtEnd) | |||
3960 | : CallBase(Ty->getReturnType(), Instruction::Invoke, | |||
3961 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
3962 | InsertAtEnd) { | |||
3963 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); | |||
3964 | } | |||
3965 | ||||
3966 | //===----------------------------------------------------------------------===// | |||
3967 | // CallBrInst Class | |||
3968 | //===----------------------------------------------------------------------===// | |||
3969 | ||||
3970 | /// CallBr instruction, tracking function calls that may not return control but | |||
3971 | /// instead transfer it to a third location. The SubclassData field is used to | |||
3972 | /// hold the calling convention of the call. | |||
3973 | /// | |||
3974 | class CallBrInst : public CallBase { | |||
3975 | ||||
3976 | unsigned NumIndirectDests; | |||
3977 | ||||
3978 | CallBrInst(const CallBrInst &BI); | |||
3979 | ||||
3980 | /// Construct a CallBrInst given a range of arguments. | |||
3981 | /// | |||
3982 | /// Construct a CallBrInst from a range of arguments | |||
3983 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
3984 | ArrayRef<BasicBlock *> IndirectDests, | |||
3985 | ArrayRef<Value *> Args, | |||
3986 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3987 | const Twine &NameStr, Instruction *InsertBefore); | |||
3988 | ||||
3989 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
3990 | ArrayRef<BasicBlock *> IndirectDests, | |||
3991 | ArrayRef<Value *> Args, | |||
3992 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3993 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
3994 | ||||
3995 | void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, | |||
3996 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, | |||
3997 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); | |||
3998 | ||||
3999 | /// Should the Indirect Destinations change, scan + update the Arg list. | |||
4000 | void updateArgBlockAddresses(unsigned i, BasicBlock *B); | |||
4001 | ||||
4002 | /// Compute the number of operands to allocate. | |||
4003 | static int ComputeNumOperands(int NumArgs, int NumIndirectDests, | |||
4004 | int NumBundleInputs = 0) { | |||
4005 | // We need one operand for the called function, plus our extra operands and | |||
4006 | // the input operand counts provided. | |||
4007 | return 2 + NumIndirectDests + NumArgs + NumBundleInputs; | |||
4008 | } | |||
4009 | ||||
4010 | protected: | |||
4011 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4012 | friend class Instruction; | |||
4013 | ||||
4014 | CallBrInst *cloneImpl() const; | |||
4015 | ||||
4016 | public: | |||
4017 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4018 | BasicBlock *DefaultDest, | |||
4019 | ArrayRef<BasicBlock *> IndirectDests, | |||
4020 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4021 | Instruction *InsertBefore = nullptr) { | |||
4022 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); | |||
4023 | return new (NumOperands) | |||
4024 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, | |||
4025 | NumOperands, NameStr, InsertBefore); | |||
4026 | } | |||
4027 | ||||
4028 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4029 | BasicBlock *DefaultDest, | |||
4030 | ArrayRef<BasicBlock *> IndirectDests, | |||
4031 | ArrayRef<Value *> Args, | |||
4032 | ArrayRef<OperandBundleDef> Bundles = None, | |||
4033 | const Twine &NameStr = "", | |||
4034 | Instruction *InsertBefore = nullptr) { | |||
4035 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), | |||
4036 | CountBundleInputs(Bundles)); | |||
4037 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
4038 | ||||
4039 | return new (NumOperands, DescriptorBytes) | |||
4040 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, | |||
4041 | NumOperands, NameStr, InsertBefore); | |||
4042 | } | |||
4043 | ||||
4044 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4045 | BasicBlock *DefaultDest, | |||
4046 | ArrayRef<BasicBlock *> IndirectDests, | |||
4047 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4048 | BasicBlock *InsertAtEnd) { | |||
4049 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); | |||
4050 | return new (NumOperands) | |||
4051 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, None, | |||
4052 | NumOperands, NameStr, InsertAtEnd); | |||
4053 | } | |||
4054 | ||||
4055 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4056 | BasicBlock *DefaultDest, | |||
4057 | ArrayRef<BasicBlock *> IndirectDests, | |||
4058 | ArrayRef<Value *> Args, | |||
4059 | ArrayRef<OperandBundleDef> Bundles, | |||
4060 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4061 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), | |||
4062 | CountBundleInputs(Bundles)); | |||
4063 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
4064 | ||||
4065 | return new (NumOperands, DescriptorBytes) | |||
4066 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, | |||
4067 | NumOperands, NameStr, InsertAtEnd); | |||
4068 | } | |||
4069 | ||||
4070 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, | |||
4071 | ArrayRef<BasicBlock *> IndirectDests, | |||
4072 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4073 | Instruction *InsertBefore = nullptr) { | |||
4074 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4075 | IndirectDests, Args, NameStr, InsertBefore); | |||
4076 | } | |||
4077 | ||||
4078 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, | |||
4079 | ArrayRef<BasicBlock *> IndirectDests, | |||
4080 | ArrayRef<Value *> Args, | |||
4081 | ArrayRef<OperandBundleDef> Bundles = None, | |||
4082 | const Twine &NameStr = "", | |||
4083 | Instruction *InsertBefore = nullptr) { | |||
4084 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4085 | IndirectDests, Args, Bundles, NameStr, InsertBefore); | |||
4086 | } | |||
4087 | ||||
4088 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, | |||
4089 | ArrayRef<BasicBlock *> IndirectDests, | |||
4090 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4091 | BasicBlock *InsertAtEnd) { | |||
4092 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4093 | IndirectDests, Args, NameStr, InsertAtEnd); | |||
4094 | } | |||
4095 | ||||
4096 | static CallBrInst *Create(FunctionCallee Func, | |||
4097 | BasicBlock *DefaultDest, | |||
4098 | ArrayRef<BasicBlock *> IndirectDests, | |||
4099 | ArrayRef<Value *> Args, | |||
4100 | ArrayRef<OperandBundleDef> Bundles, | |||
4101 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4102 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4103 | IndirectDests, Args, Bundles, NameStr, InsertAtEnd); | |||
4104 | } | |||
4105 | ||||
4106 | /// Create a clone of \p CBI with a different set of operand bundles and | |||
4107 | /// insert it before \p InsertPt. | |||
4108 | /// | |||
4109 | /// The returned callbr instruction is identical to \p CBI in every way | |||
4110 | /// except that the operand bundles for the new instruction are set to the | |||
4111 | /// operand bundles in \p Bundles. | |||
4112 | static CallBrInst *Create(CallBrInst *CBI, | |||
4113 | ArrayRef<OperandBundleDef> Bundles, | |||
4114 | Instruction *InsertPt = nullptr); | |||
4115 | ||||
4116 | /// Return the number of callbr indirect dest labels. | |||
4117 | /// | |||
4118 | unsigned getNumIndirectDests() const { return NumIndirectDests; } | |||
4119 | ||||
4120 | /// getIndirectDestLabel - Return the i-th indirect dest label. | |||
4121 | /// | |||
4122 | Value *getIndirectDestLabel(unsigned i) const { | |||
4123 | assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() && "Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "llvm/include/llvm/IR/Instructions.h", 4123, __extension__ __PRETTY_FUNCTION__ )); | |||
4124 | return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); | |||
4125 | } | |||
4126 | ||||
4127 | Value *getIndirectDestLabelUse(unsigned i) const { | |||
4128 | assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() && "Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "llvm/include/llvm/IR/Instructions.h", 4128, __extension__ __PRETTY_FUNCTION__ )); | |||
4129 | return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); | |||
4130 | } | |||
4131 | ||||
4132 | // Return the destination basic blocks... | |||
4133 | BasicBlock *getDefaultDest() const { | |||
4134 | return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); | |||
4135 | } | |||
4136 | BasicBlock *getIndirectDest(unsigned i) const { | |||
4137 | return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); | |||
4138 | } | |||
4139 | SmallVector<BasicBlock *, 16> getIndirectDests() const { | |||
4140 | SmallVector<BasicBlock *, 16> IndirectDests; | |||
4141 | for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) | |||
4142 | IndirectDests.push_back(getIndirectDest(i)); | |||
4143 | return IndirectDests; | |||
4144 | } | |||
4145 | void setDefaultDest(BasicBlock *B) { | |||
4146 | *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); | |||
4147 | } | |||
4148 | void setIndirectDest(unsigned i, BasicBlock *B) { | |||
4149 | updateArgBlockAddresses(i, B); | |||
4150 | *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); | |||
4151 | } | |||
4152 | ||||
4153 | BasicBlock *getSuccessor(unsigned i) const { | |||
4154 | assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4155, __extension__ __PRETTY_FUNCTION__ )) | |||
4155 | "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4155, __extension__ __PRETTY_FUNCTION__ )); | |||
4156 | return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); | |||
4157 | } | |||
4158 | ||||
4159 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { | |||
4160 | assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4161, __extension__ __PRETTY_FUNCTION__ )) | |||
4161 | "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4161, __extension__ __PRETTY_FUNCTION__ )); | |||
4162 | return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); | |||
4163 | } | |||
4164 | ||||
4165 | unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } | |||
4166 | ||||
4167 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4168 | static bool classof(const Instruction *I) { | |||
4169 | return (I->getOpcode() == Instruction::CallBr); | |||
4170 | } | |||
4171 | static bool classof(const Value *V) { | |||
4172 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4173 | } | |||
4174 | ||||
4175 | private: | |||
4176 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
4177 | // method so that subclasses cannot accidentally use it. | |||
4178 | template <typename Bitfield> | |||
4179 | void setSubclassData(typename Bitfield::Type Value) { | |||
4180 | Instruction::setSubclassData<Bitfield>(Value); | |||
4181 | } | |||
4182 | }; | |||
4183 | ||||
4184 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4185 | ArrayRef<BasicBlock *> IndirectDests, | |||
4186 | ArrayRef<Value *> Args, | |||
4187 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4188 | const Twine &NameStr, Instruction *InsertBefore) | |||
4189 | : CallBase(Ty->getReturnType(), Instruction::CallBr, | |||
4190 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4191 | InsertBefore) { | |||
4192 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); | |||
4193 | } | |||
4194 | ||||
4195 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4196 | ArrayRef<BasicBlock *> IndirectDests, | |||
4197 | ArrayRef<Value *> Args, | |||
4198 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4199 | const Twine &NameStr, BasicBlock *InsertAtEnd) | |||
4200 | : CallBase(Ty->getReturnType(), Instruction::CallBr, | |||
4201 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4202 | InsertAtEnd) { | |||
4203 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); | |||
4204 | } | |||
4205 | ||||
4206 | //===----------------------------------------------------------------------===// | |||
4207 | // ResumeInst Class | |||
4208 | //===----------------------------------------------------------------------===// | |||
4209 | ||||
4210 | //===--------------------------------------------------------------------------- | |||
4211 | /// Resume the propagation of an exception. | |||
4212 | /// | |||
4213 | class ResumeInst : public Instruction { | |||
4214 | ResumeInst(const ResumeInst &RI); | |||
4215 | ||||
4216 | explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); | |||
4217 | ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); | |||
4218 | ||||
4219 | protected: | |||
4220 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4221 | friend class Instruction; | |||
4222 | ||||
4223 | ResumeInst *cloneImpl() const; | |||
4224 | ||||
4225 | public: | |||
4226 | static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { | |||
4227 | return new(1) ResumeInst(Exn, InsertBefore); | |||
4228 | } | |||
4229 | ||||
4230 | static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { | |||
4231 | return new(1) ResumeInst(Exn, InsertAtEnd); | |||
4232 | } | |||
4233 | ||||
4234 | /// Provide fast operand accessors | |||
4235 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4236 | ||||
4237 | /// Convenience accessor. | |||
4238 | Value *getValue() const { return Op<0>(); } | |||
4239 | ||||
4240 | unsigned getNumSuccessors() const { return 0; } | |||
4241 | ||||
4242 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4243 | static bool classof(const Instruction *I) { | |||
4244 | return I->getOpcode() == Instruction::Resume; | |||
4245 | } | |||
4246 | static bool classof(const Value *V) { | |||
4247 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4248 | } | |||
4249 | ||||
4250 | private: | |||
4251 | BasicBlock *getSuccessor(unsigned idx) const { | |||
4252 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4252); | |||
4253 | } | |||
4254 | ||||
4255 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { | |||
4256 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4256); | |||
4257 | } | |||
4258 | }; | |||
4259 | ||||
4260 | template <> | |||
4261 | struct OperandTraits<ResumeInst> : | |||
4262 | public FixedNumOperandTraits<ResumeInst, 1> { | |||
4263 | }; | |||
4264 | ||||
4265 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits <ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator ResumeInst::op_begin() const { return OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst ::op_iterator ResumeInst::op_end() { return OperandTraits< ResumeInst>::op_end(this); } ResumeInst::const_op_iterator ResumeInst::op_end() const { return OperandTraits<ResumeInst >::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<ResumeInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4265, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this))[i_nocapture ].get()); } void ResumeInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4265, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const { return OperandTraits<ResumeInst>::operands(this); } template <int Idx_nocapture> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ResumeInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
4266 | ||||
4267 | //===----------------------------------------------------------------------===// | |||
4268 | // CatchSwitchInst Class | |||
4269 | //===----------------------------------------------------------------------===// | |||
4270 | class CatchSwitchInst : public Instruction { | |||
4271 | using UnwindDestField = BoolBitfieldElementT<0>; | |||
4272 | ||||
4273 | /// The number of operands actually allocated. NumOperands is | |||
4274 | /// the number actually in use. | |||
4275 | unsigned ReservedSpace; | |||
4276 | ||||
4277 | // Operand[0] = Outer scope | |||
4278 | // Operand[1] = Unwind block destination | |||
4279 | // Operand[n] = BasicBlock to go to on match | |||
4280 | CatchSwitchInst(const CatchSwitchInst &CSI); | |||
4281 | ||||
4282 | /// Create a new switch instruction, specifying a | |||
4283 | /// default destination. The number of additional handlers can be specified | |||
4284 | /// here to make memory allocation more efficient. | |||
4285 | /// This constructor can also autoinsert before another instruction. | |||
4286 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, | |||
4287 | unsigned NumHandlers, const Twine &NameStr, | |||
4288 | Instruction *InsertBefore); | |||
4289 | ||||
4290 | /// Create a new switch instruction, specifying a | |||
4291 | /// default destination. The number of additional handlers can be specified | |||
4292 | /// here to make memory allocation more efficient. | |||
4293 | /// This constructor also autoinserts at the end of the specified BasicBlock. | |||
4294 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, | |||
4295 | unsigned NumHandlers, const Twine &NameStr, | |||
4296 | BasicBlock *InsertAtEnd); | |||
4297 | ||||
4298 | // allocate space for exactly zero operands | |||
4299 | void *operator new(size_t S) { return User::operator new(S); } | |||
4300 | ||||
4301 | void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); | |||
4302 | void growOperands(unsigned Size); | |||
4303 | ||||
4304 | protected: | |||
4305 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4306 | friend class Instruction; | |||
4307 | ||||
4308 | CatchSwitchInst *cloneImpl() const; | |||
4309 | ||||
4310 | public: | |||
4311 | void operator delete(void *Ptr) { return User::operator delete(Ptr); } | |||
4312 | ||||
4313 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, | |||
4314 | unsigned NumHandlers, | |||
4315 | const Twine &NameStr = "", | |||
4316 | Instruction *InsertBefore = nullptr) { | |||
4317 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, | |||
4318 | InsertBefore); | |||
4319 | } | |||
4320 | ||||
4321 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, | |||
4322 | unsigned NumHandlers, const Twine &NameStr, | |||
4323 | BasicBlock *InsertAtEnd) { | |||
4324 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, | |||
4325 | InsertAtEnd); | |||
4326 | } | |||
4327 | ||||
4328 | /// Provide fast operand accessors | |||
4329 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4330 | ||||
4331 | // Accessor Methods for CatchSwitch stmt | |||
4332 | Value *getParentPad() const { return getOperand(0); } | |||
4333 | void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } | |||
4334 | ||||
4335 | // Accessor Methods for CatchSwitch stmt | |||
4336 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } | |||
4337 | bool unwindsToCaller() const { return !hasUnwindDest(); } | |||
4338 | BasicBlock *getUnwindDest() const { | |||
4339 | if (hasUnwindDest()) | |||
4340 | return cast<BasicBlock>(getOperand(1)); | |||
4341 | return nullptr; | |||
4342 | } | |||
4343 | void setUnwindDest(BasicBlock *UnwindDest) { | |||
4344 | assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail ("UnwindDest", "llvm/include/llvm/IR/Instructions.h", 4344, __extension__ __PRETTY_FUNCTION__)); | |||
4345 | assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail ("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4345 , __extension__ __PRETTY_FUNCTION__)); | |||
4346 | setOperand(1, UnwindDest); | |||
4347 | } | |||
4348 | ||||
4349 | /// return the number of 'handlers' in this catchswitch | |||
4350 | /// instruction, except the default handler | |||
4351 | unsigned getNumHandlers() const { | |||
4352 | if (hasUnwindDest()) | |||
4353 | return getNumOperands() - 2; | |||
4354 | return getNumOperands() - 1; | |||
4355 | } | |||
4356 | ||||
4357 | private: | |||
4358 | static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } | |||
4359 | static const BasicBlock *handler_helper(const Value *V) { | |||
4360 | return cast<BasicBlock>(V); | |||
4361 | } | |||
4362 | ||||
4363 | public: | |||
4364 | using DerefFnTy = BasicBlock *(*)(Value *); | |||
4365 | using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; | |||
4366 | using handler_range = iterator_range<handler_iterator>; | |||
4367 | using ConstDerefFnTy = const BasicBlock *(*)(const Value *); | |||
4368 | using const_handler_iterator = | |||
4369 | mapped_iterator<const_op_iterator, ConstDerefFnTy>; | |||
4370 | using const_handler_range = iterator_range<const_handler_iterator>; | |||
4371 | ||||
4372 | /// Returns an iterator that points to the first handler in CatchSwitchInst. | |||
4373 | handler_iterator handler_begin() { | |||
4374 | op_iterator It = op_begin() + 1; | |||
4375 | if (hasUnwindDest()) | |||
4376 | ++It; | |||
4377 | return handler_iterator(It, DerefFnTy(handler_helper)); | |||
4378 | } | |||
4379 | ||||
4380 | /// Returns an iterator that points to the first handler in the | |||
4381 | /// CatchSwitchInst. | |||
4382 | const_handler_iterator handler_begin() const { | |||
4383 | const_op_iterator It = op_begin() + 1; | |||
4384 | if (hasUnwindDest()) | |||
4385 | ++It; | |||
4386 | return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); | |||
4387 | } | |||
4388 | ||||
4389 | /// Returns a read-only iterator that points one past the last | |||
4390 | /// handler in the CatchSwitchInst. | |||
4391 | handler_iterator handler_end() { | |||
4392 | return handler_iterator(op_end(), DerefFnTy(handler_helper)); | |||
4393 | } | |||
4394 | ||||
4395 | /// Returns an iterator that points one past the last handler in the | |||
4396 | /// CatchSwitchInst. | |||
4397 | const_handler_iterator handler_end() const { | |||
4398 | return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); | |||
4399 | } | |||
4400 | ||||
4401 | /// iteration adapter for range-for loops. | |||
4402 | handler_range handlers() { | |||
4403 | return make_range(handler_begin(), handler_end()); | |||
4404 | } | |||
4405 | ||||
4406 | /// iteration adapter for range-for loops. | |||
4407 | const_handler_range handlers() const { | |||
4408 | return make_range(handler_begin(), handler_end()); | |||
4409 | } | |||
4410 | ||||
4411 | /// Add an entry to the switch instruction... | |||
4412 | /// Note: | |||
4413 | /// This action invalidates handler_end(). Old handler_end() iterator will | |||
4414 | /// point to the added handler. | |||
4415 | void addHandler(BasicBlock *Dest); | |||
4416 | ||||
4417 | void removeHandler(handler_iterator HI); | |||
4418 | ||||
4419 | unsigned getNumSuccessors() const { return getNumOperands() - 1; } | |||
4420 | BasicBlock *getSuccessor(unsigned Idx) const { | |||
4421 | assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4422, __extension__ __PRETTY_FUNCTION__ )) | |||
4422 | "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4422, __extension__ __PRETTY_FUNCTION__ )); | |||
4423 | return cast<BasicBlock>(getOperand(Idx + 1)); | |||
4424 | } | |||
4425 | void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { | |||
4426 | assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4427, __extension__ __PRETTY_FUNCTION__ )) | |||
4427 | "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4427, __extension__ __PRETTY_FUNCTION__ )); | |||
4428 | setOperand(Idx + 1, NewSucc); | |||
4429 | } | |||
4430 | ||||
4431 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4432 | static bool classof(const Instruction *I) { | |||
4433 | return I->getOpcode() == Instruction::CatchSwitch; | |||
4434 | } | |||
4435 | static bool classof(const Value *V) { | |||
4436 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4437 | } | |||
4438 | }; | |||
4439 | ||||
4440 | template <> | |||
4441 | struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; | |||
4442 | ||||
4443 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst ::const_op_iterator CatchSwitchInst::op_begin() const { return OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst ::op_end() { return OperandTraits<CatchSwitchInst>::op_end (this); } CatchSwitchInst::const_op_iterator CatchSwitchInst:: op_end() const { return OperandTraits<CatchSwitchInst>:: op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<CatchSwitchInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4443, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CatchSwitchInst >::op_begin(const_cast<CatchSwitchInst*>(this))[i_nocapture ].get()); } void CatchSwitchInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4443, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CatchSwitchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands () const { return OperandTraits<CatchSwitchInst>::operands (this); } template <int Idx_nocapture> Use &CatchSwitchInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchSwitchInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
4444 | ||||
4445 | //===----------------------------------------------------------------------===// | |||
4446 | // CleanupPadInst Class | |||
4447 | //===----------------------------------------------------------------------===// | |||
4448 | class CleanupPadInst : public FuncletPadInst { | |||
4449 | private: | |||
4450 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, | |||
4451 | unsigned Values, const Twine &NameStr, | |||
4452 | Instruction *InsertBefore) | |||
4453 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, | |||
4454 | NameStr, InsertBefore) {} | |||
4455 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, | |||
4456 | unsigned Values, const Twine &NameStr, | |||
4457 | BasicBlock *InsertAtEnd) | |||
4458 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, | |||
4459 | NameStr, InsertAtEnd) {} | |||
4460 | ||||
4461 | public: | |||
4462 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args = None, | |||
4463 | const Twine &NameStr = "", | |||
4464 | Instruction *InsertBefore = nullptr) { | |||
4465 | unsigned Values = 1 + Args.size(); | |||
4466 | return new (Values) | |||
4467 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); | |||
4468 | } | |||
4469 | ||||
4470 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, | |||
4471 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4472 | unsigned Values = 1 + Args.size(); | |||
4473 | return new (Values) | |||
4474 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); | |||
4475 | } | |||
4476 | ||||
4477 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4478 | static bool classof(const Instruction *I) { | |||
4479 | return I->getOpcode() == Instruction::CleanupPad; | |||
4480 | } | |||
4481 | static bool classof(const Value *V) { | |||
4482 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4483 | } | |||
4484 | }; | |||
4485 | ||||
4486 | //===----------------------------------------------------------------------===// | |||
4487 | // CatchPadInst Class | |||
4488 | //===----------------------------------------------------------------------===// | |||
4489 | class CatchPadInst : public FuncletPadInst { | |||
4490 | private: | |||
4491 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4492 | unsigned Values, const Twine &NameStr, | |||
4493 | Instruction *InsertBefore) | |||
4494 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, | |||
4495 | NameStr, InsertBefore) {} | |||
4496 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4497 | unsigned Values, const Twine &NameStr, | |||
4498 | BasicBlock *InsertAtEnd) | |||
4499 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, | |||
4500 | NameStr, InsertAtEnd) {} | |||
4501 | ||||
4502 | public: | |||
4503 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4504 | const Twine &NameStr = "", | |||
4505 | Instruction *InsertBefore = nullptr) { | |||
4506 | unsigned Values = 1 + Args.size(); | |||
4507 | return new (Values) | |||
4508 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); | |||
4509 | } | |||
4510 | ||||
4511 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4512 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4513 | unsigned Values = 1 + Args.size(); | |||
4514 | return new (Values) | |||
4515 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); | |||
4516 | } | |||
4517 | ||||
4518 | /// Convenience accessors | |||
4519 | CatchSwitchInst *getCatchSwitch() const { | |||
4520 | return cast<CatchSwitchInst>(Op<-1>()); | |||
4521 | } | |||
4522 | void setCatchSwitch(Value *CatchSwitch) { | |||
4523 | assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail ("CatchSwitch", "llvm/include/llvm/IR/Instructions.h", 4523, __extension__ __PRETTY_FUNCTION__)); | |||
4524 | Op<-1>() = CatchSwitch; | |||
4525 | } | |||
4526 | ||||
4527 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4528 | static bool classof(const Instruction *I) { | |||
4529 | return I->getOpcode() == Instruction::CatchPad; | |||
4530 | } | |||
4531 | static bool classof(const Value *V) { | |||
4532 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4533 | } | |||
4534 | }; | |||
4535 | ||||
4536 | //===----------------------------------------------------------------------===// | |||
4537 | // CatchReturnInst Class | |||
4538 | //===----------------------------------------------------------------------===// | |||
4539 | ||||
4540 | class CatchReturnInst : public Instruction { | |||
4541 | CatchReturnInst(const CatchReturnInst &RI); | |||
4542 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); | |||
4543 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); | |||
4544 | ||||
4545 | void init(Value *CatchPad, BasicBlock *BB); | |||
4546 | ||||
4547 | protected: | |||
4548 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4549 | friend class Instruction; | |||
4550 | ||||
4551 | CatchReturnInst *cloneImpl() const; | |||
4552 | ||||
4553 | public: | |||
4554 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, | |||
4555 | Instruction *InsertBefore = nullptr) { | |||
4556 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4556, __extension__ __PRETTY_FUNCTION__)); | |||
4557 | assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB" , "llvm/include/llvm/IR/Instructions.h", 4557, __extension__ __PRETTY_FUNCTION__ )); | |||
4558 | return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); | |||
4559 | } | |||
4560 | ||||
4561 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, | |||
4562 | BasicBlock *InsertAtEnd) { | |||
4563 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4563, __extension__ __PRETTY_FUNCTION__)); | |||
4564 | assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB" , "llvm/include/llvm/IR/Instructions.h", 4564, __extension__ __PRETTY_FUNCTION__ )); | |||
4565 | return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); | |||
4566 | } | |||
4567 | ||||
4568 | /// Provide fast operand accessors | |||
4569 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4570 | ||||
4571 | /// Convenience accessors. | |||
4572 | CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } | |||
4573 | void setCatchPad(CatchPadInst *CatchPad) { | |||
4574 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4574, __extension__ __PRETTY_FUNCTION__)); | |||
4575 | Op<0>() = CatchPad; | |||
4576 | } | |||
4577 | ||||
4578 | BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } | |||
4579 | void setSuccessor(BasicBlock *NewSucc) { | |||
4580 | assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail ("NewSucc", "llvm/include/llvm/IR/Instructions.h", 4580, __extension__ __PRETTY_FUNCTION__)); | |||
4581 | Op<1>() = NewSucc; | |||
4582 | } | |||
4583 | unsigned getNumSuccessors() const { return 1; } | |||
4584 | ||||
4585 | /// Get the parentPad of this catchret's catchpad's catchswitch. | |||
4586 | /// The successor block is implicitly a member of this funclet. | |||
4587 | Value *getCatchSwitchParentPad() const { | |||
4588 | return getCatchPad()->getCatchSwitch()->getParentPad(); | |||
4589 | } | |||
4590 | ||||
4591 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4592 | static bool classof(const Instruction *I) { | |||
4593 | return (I->getOpcode() == Instruction::CatchRet); | |||
4594 | } | |||
4595 | static bool classof(const Value *V) { | |||
4596 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4597 | } | |||
4598 | ||||
4599 | private: | |||
4600 | BasicBlock *getSuccessor(unsigned Idx) const { | |||
4601 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchret!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "llvm/include/llvm/IR/Instructions.h", 4601, __extension__ __PRETTY_FUNCTION__ )); | |||
4602 | return getSuccessor(); | |||
4603 | } | |||
4604 | ||||
4605 | void setSuccessor(unsigned Idx, BasicBlock *B) { | |||
4606 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchret!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "llvm/include/llvm/IR/Instructions.h", 4606, __extension__ __PRETTY_FUNCTION__ )); | |||
4607 | setSuccessor(B); | |||
4608 | } | |||
4609 | }; | |||
4610 | ||||
4611 | template <> | |||
4612 | struct OperandTraits<CatchReturnInst> | |||
4613 | : public FixedNumOperandTraits<CatchReturnInst, 2> {}; | |||
4614 | ||||
4615 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst ::const_op_iterator CatchReturnInst::op_begin() const { return OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst ::op_end() { return OperandTraits<CatchReturnInst>::op_end (this); } CatchReturnInst::const_op_iterator CatchReturnInst:: op_end() const { return OperandTraits<CatchReturnInst>:: op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<CatchReturnInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4615, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CatchReturnInst >::op_begin(const_cast<CatchReturnInst*>(this))[i_nocapture ].get()); } void CatchReturnInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4615, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CatchReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands () const { return OperandTraits<CatchReturnInst>::operands (this); } template <int Idx_nocapture> Use &CatchReturnInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchReturnInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
4616 | ||||
4617 | //===----------------------------------------------------------------------===// | |||
4618 | // CleanupReturnInst Class | |||
4619 | //===----------------------------------------------------------------------===// | |||
4620 | ||||
4621 | class CleanupReturnInst : public Instruction { | |||
4622 | using UnwindDestField = BoolBitfieldElementT<0>; | |||
4623 | ||||
4624 | private: | |||
4625 | CleanupReturnInst(const CleanupReturnInst &RI); | |||
4626 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, | |||
4627 | Instruction *InsertBefore = nullptr); | |||
4628 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, | |||
4629 | BasicBlock *InsertAtEnd); | |||
4630 | ||||
4631 | void init(Value *CleanupPad, BasicBlock *UnwindBB); | |||
4632 | ||||
4633 | protected: | |||
4634 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4635 | friend class Instruction; | |||
4636 | ||||
4637 | CleanupReturnInst *cloneImpl() const; | |||
4638 | ||||
4639 | public: | |||
4640 | static CleanupReturnInst *Create(Value *CleanupPad, | |||
4641 | BasicBlock *UnwindBB = nullptr, | |||
4642 | Instruction *InsertBefore = nullptr) { | |||
4643 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4643, __extension__ __PRETTY_FUNCTION__)); | |||
4644 | unsigned Values = 1; | |||
4645 | if (UnwindBB) | |||
4646 | ++Values; | |||
4647 | return new (Values) | |||
4648 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); | |||
4649 | } | |||
4650 | ||||
4651 | static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, | |||
4652 | BasicBlock *InsertAtEnd) { | |||
4653 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4653, __extension__ __PRETTY_FUNCTION__)); | |||
4654 | unsigned Values = 1; | |||
4655 | if (UnwindBB) | |||
4656 | ++Values; | |||
4657 | return new (Values) | |||
4658 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); | |||
4659 | } | |||
4660 | ||||
4661 | /// Provide fast operand accessors | |||
4662 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4663 | ||||
4664 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } | |||
4665 | bool unwindsToCaller() const { return !hasUnwindDest(); } | |||
4666 | ||||
4667 | /// Convenience accessor. | |||
4668 | CleanupPadInst *getCleanupPad() const { | |||
4669 | return cast<CleanupPadInst>(Op<0>()); | |||
4670 | } | |||
4671 | void setCleanupPad(CleanupPadInst *CleanupPad) { | |||
4672 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4672, __extension__ __PRETTY_FUNCTION__)); | |||
4673 | Op<0>() = CleanupPad; | |||
4674 | } | |||
4675 | ||||
4676 | unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } | |||
4677 | ||||
4678 | BasicBlock *getUnwindDest() const { | |||
4679 | return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; | |||
4680 | } | |||
4681 | void setUnwindDest(BasicBlock *NewDest) { | |||
4682 | assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail ("NewDest", "llvm/include/llvm/IR/Instructions.h", 4682, __extension__ __PRETTY_FUNCTION__)); | |||
4683 | assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail ("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4683 , __extension__ __PRETTY_FUNCTION__)); | |||
4684 | Op<1>() = NewDest; | |||
4685 | } | |||
4686 | ||||
4687 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4688 | static bool classof(const Instruction *I) { | |||
4689 | return (I->getOpcode() == Instruction::CleanupRet); | |||
4690 | } | |||
4691 | static bool classof(const Value *V) { | |||
4692 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4693 | } | |||
4694 | ||||
4695 | private: | |||
4696 | BasicBlock *getSuccessor(unsigned Idx) const { | |||
4697 | assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail ("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4697, __extension__ __PRETTY_FUNCTION__)); | |||
4698 | return getUnwindDest(); | |||
4699 | } | |||
4700 | ||||
4701 | void setSuccessor(unsigned Idx, BasicBlock *B) { | |||
4702 | assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail ("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4702, __extension__ __PRETTY_FUNCTION__)); | |||
4703 | setUnwindDest(B); | |||
4704 | } | |||
4705 | ||||
4706 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
4707 | // method so that subclasses cannot accidentally use it. | |||
4708 | template <typename Bitfield> | |||
4709 | void setSubclassData(typename Bitfield::Type Value) { | |||
4710 | Instruction::setSubclassData<Bitfield>(Value); | |||
4711 | } | |||
4712 | }; | |||
4713 | ||||
4714 | template <> | |||
4715 | struct OperandTraits<CleanupReturnInst> | |||
4716 | : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; | |||
4717 | ||||
4718 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() { return OperandTraits<CleanupReturnInst>::op_begin(this ); } CleanupReturnInst::const_op_iterator CleanupReturnInst:: op_begin() const { return OperandTraits<CleanupReturnInst> ::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst ::op_iterator CleanupReturnInst::op_end() { return OperandTraits <CleanupReturnInst>::op_end(this); } CleanupReturnInst:: const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits <CleanupReturnInst>::op_end(const_cast<CleanupReturnInst *>(this)); } Value *CleanupReturnInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4718, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CleanupReturnInst >::op_begin(const_cast<CleanupReturnInst*>(this))[i_nocapture ].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4718, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CleanupReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands () const { return OperandTraits<CleanupReturnInst>::operands (this); } template <int Idx_nocapture> Use &CleanupReturnInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CleanupReturnInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
4719 | ||||
4720 | //===----------------------------------------------------------------------===// | |||
4721 | // UnreachableInst Class | |||
4722 | //===----------------------------------------------------------------------===// | |||
4723 | ||||
4724 | //===--------------------------------------------------------------------------- | |||
4725 | /// This function has undefined behavior. In particular, the | |||
4726 | /// presence of this instruction indicates some higher level knowledge that the | |||
4727 | /// end of the block cannot be reached. | |||
4728 | /// | |||
4729 | class UnreachableInst : public Instruction { | |||
4730 | protected: | |||
4731 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4732 | friend class Instruction; | |||
4733 | ||||
4734 | UnreachableInst *cloneImpl() const; | |||
4735 | ||||
4736 | public: | |||
4737 | explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); | |||
4738 | explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); | |||
4739 | ||||
4740 | // allocate space for exactly zero operands | |||
4741 | void *operator new(size_t S) { return User::operator new(S, 0); } | |||
4742 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
4743 | ||||
4744 | unsigned getNumSuccessors() const { return 0; } | |||
4745 | ||||
4746 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4747 | static bool classof(const Instruction *I) { | |||
4748 | return I->getOpcode() == Instruction::Unreachable; | |||
4749 | } | |||
4750 | static bool classof(const Value *V) { | |||
4751 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4752 | } | |||
4753 | ||||
4754 | private: | |||
4755 | BasicBlock *getSuccessor(unsigned idx) const { | |||
4756 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4756); | |||
4757 | } | |||
4758 | ||||
4759 | void setSuccessor(unsigned idx, BasicBlock *B) { | |||
4760 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4760); | |||
4761 | } | |||
4762 | }; | |||
4763 | ||||
4764 | //===----------------------------------------------------------------------===// | |||
4765 | // TruncInst Class | |||
4766 | //===----------------------------------------------------------------------===// | |||
4767 | ||||
4768 | /// This class represents a truncation of integer types. | |||
4769 | class TruncInst : public CastInst { | |||
4770 | protected: | |||
4771 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4772 | friend class Instruction; | |||
4773 | ||||
4774 | /// Clone an identical TruncInst | |||
4775 | TruncInst *cloneImpl() const; | |||
4776 | ||||
4777 | public: | |||
4778 | /// Constructor with insert-before-instruction semantics | |||
4779 | TruncInst( | |||
4780 | Value *S, ///< The value to be truncated | |||
4781 | Type *Ty, ///< The (smaller) type to truncate to | |||
4782 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4783 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4784 | ); | |||
4785 | ||||
4786 | /// Constructor with insert-at-end-of-block semantics | |||
4787 | TruncInst( | |||
4788 | Value *S, ///< The value to be truncated | |||
4789 | Type *Ty, ///< The (smaller) type to truncate to | |||
4790 | const Twine &NameStr, ///< A name for the new instruction | |||
4791 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4792 | ); | |||
4793 | ||||
4794 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4795 | static bool classof(const Instruction *I) { | |||
4796 | return I->getOpcode() == Trunc; | |||
4797 | } | |||
4798 | static bool classof(const Value *V) { | |||
4799 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4800 | } | |||
4801 | }; | |||
4802 | ||||
4803 | //===----------------------------------------------------------------------===// | |||
4804 | // ZExtInst Class | |||
4805 | //===----------------------------------------------------------------------===// | |||
4806 | ||||
4807 | /// This class represents zero extension of integer types. | |||
4808 | class ZExtInst : public CastInst { | |||
4809 | protected: | |||
4810 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4811 | friend class Instruction; | |||
4812 | ||||
4813 | /// Clone an identical ZExtInst | |||
4814 | ZExtInst *cloneImpl() const; | |||
4815 | ||||
4816 | public: | |||
4817 | /// Constructor with insert-before-instruction semantics | |||
4818 | ZExtInst( | |||
4819 | Value *S, ///< The value to be zero extended | |||
4820 | Type *Ty, ///< The type to zero extend to | |||
4821 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4822 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4823 | ); | |||
4824 | ||||
4825 | /// Constructor with insert-at-end semantics. | |||
4826 | ZExtInst( | |||
4827 | Value *S, ///< The value to be zero extended | |||
4828 | Type *Ty, ///< The type to zero extend to | |||
4829 | const Twine &NameStr, ///< A name for the new instruction | |||
4830 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4831 | ); | |||
4832 | ||||
4833 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4834 | static bool classof(const Instruction *I) { | |||
4835 | return I->getOpcode() == ZExt; | |||
4836 | } | |||
4837 | static bool classof(const Value *V) { | |||
4838 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4839 | } | |||
4840 | }; | |||
4841 | ||||
4842 | //===----------------------------------------------------------------------===// | |||
4843 | // SExtInst Class | |||
4844 | //===----------------------------------------------------------------------===// | |||
4845 | ||||
4846 | /// This class represents a sign extension of integer types. | |||
4847 | class SExtInst : public CastInst { | |||
4848 | protected: | |||
4849 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4850 | friend class Instruction; | |||
4851 | ||||
4852 | /// Clone an identical SExtInst | |||
4853 | SExtInst *cloneImpl() const; | |||
4854 | ||||
4855 | public: | |||
4856 | /// Constructor with insert-before-instruction semantics | |||
4857 | SExtInst( | |||
4858 | Value *S, ///< The value to be sign extended | |||
4859 | Type *Ty, ///< The type to sign extend to | |||
4860 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4861 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4862 | ); | |||
4863 | ||||
4864 | /// Constructor with insert-at-end-of-block semantics | |||
4865 | SExtInst( | |||
4866 | Value *S, ///< The value to be sign extended | |||
4867 | Type *Ty, ///< The type to sign extend to | |||
4868 | const Twine &NameStr, ///< A name for the new instruction | |||
4869 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4870 | ); | |||
4871 | ||||
4872 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4873 | static bool classof(const Instruction *I) { | |||
4874 | return I->getOpcode() == SExt; | |||
4875 | } | |||
4876 | static bool classof(const Value *V) { | |||
4877 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4878 | } | |||
4879 | }; | |||
4880 | ||||
4881 | //===----------------------------------------------------------------------===// | |||
4882 | // FPTruncInst Class | |||
4883 | //===----------------------------------------------------------------------===// | |||
4884 | ||||
4885 | /// This class represents a truncation of floating point types. | |||
4886 | class FPTruncInst : public CastInst { | |||
4887 | protected: | |||
4888 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4889 | friend class Instruction; | |||
4890 | ||||
4891 | /// Clone an identical FPTruncInst | |||
4892 | FPTruncInst *cloneImpl() const; | |||
4893 | ||||
4894 | public: | |||
4895 | /// Constructor with insert-before-instruction semantics | |||
4896 | FPTruncInst( | |||
4897 | Value *S, ///< The value to be truncated | |||
4898 | Type *Ty, ///< The type to truncate to | |||
4899 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4900 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4901 | ); | |||
4902 | ||||
4903 | /// Constructor with insert-before-instruction semantics | |||
4904 | FPTruncInst( | |||
4905 | Value *S, ///< The value to be truncated | |||
4906 | Type *Ty, ///< The type to truncate to | |||
4907 | const Twine &NameStr, ///< A name for the new instruction | |||
4908 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4909 | ); | |||
4910 | ||||
4911 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4912 | static bool classof(const Instruction *I) { | |||
4913 | return I->getOpcode() == FPTrunc; | |||
4914 | } | |||
4915 | static bool classof(const Value *V) { | |||
4916 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4917 | } | |||
4918 | }; | |||
4919 | ||||
4920 | //===----------------------------------------------------------------------===// | |||
4921 | // FPExtInst Class | |||
4922 | //===----------------------------------------------------------------------===// | |||
4923 | ||||
4924 | /// This class represents an extension of floating point types. | |||
4925 | class FPExtInst : public CastInst { | |||
4926 | protected: | |||
4927 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4928 | friend class Instruction; | |||
4929 | ||||
4930 | /// Clone an identical FPExtInst | |||
4931 | FPExtInst *cloneImpl() const; | |||
4932 | ||||
4933 | public: | |||
4934 | /// Constructor with insert-before-instruction semantics | |||
4935 | FPExtInst( | |||
4936 | Value *S, ///< The value to be extended | |||
4937 | Type *Ty, ///< The type to extend to | |||
4938 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4939 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4940 | ); | |||
4941 | ||||
4942 | /// Constructor with insert-at-end-of-block semantics | |||
4943 | FPExtInst( | |||
4944 | Value *S, ///< The value to be extended | |||
4945 | Type *Ty, ///< The type to extend to | |||
4946 | const Twine &NameStr, ///< A name for the new instruction | |||
4947 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4948 | ); | |||
4949 | ||||
4950 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4951 | static bool classof(const Instruction *I) { | |||
4952 | return I->getOpcode() == FPExt; | |||
4953 | } | |||
4954 | static bool classof(const Value *V) { | |||
4955 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4956 | } | |||
4957 | }; | |||
4958 | ||||
4959 | //===----------------------------------------------------------------------===// | |||
4960 | // UIToFPInst Class | |||
4961 | //===----------------------------------------------------------------------===// | |||
4962 | ||||
4963 | /// This class represents a cast unsigned integer to floating point. | |||
4964 | class UIToFPInst : public CastInst { | |||
4965 | protected: | |||
4966 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4967 | friend class Instruction; | |||
4968 | ||||
4969 | /// Clone an identical UIToFPInst | |||
4970 | UIToFPInst *cloneImpl() const; | |||
4971 | ||||
4972 | public: | |||
4973 | /// Constructor with insert-before-instruction semantics | |||
4974 | UIToFPInst( | |||
4975 | Value *S, ///< The value to be converted | |||
4976 | Type *Ty, ///< The type to convert to | |||
4977 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4978 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4979 | ); | |||
4980 | ||||
4981 | /// Constructor with insert-at-end-of-block semantics | |||
4982 | UIToFPInst( | |||
4983 | Value *S, ///< The value to be converted | |||
4984 | Type *Ty, ///< The type to convert to | |||
4985 | const Twine &NameStr, ///< A name for the new instruction | |||
4986 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4987 | ); | |||
4988 | ||||
4989 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4990 | static bool classof(const Instruction *I) { | |||
4991 | return I->getOpcode() == UIToFP; | |||
4992 | } | |||
4993 | static bool classof(const Value *V) { | |||
4994 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4995 | } | |||
4996 | }; | |||
4997 | ||||
4998 | //===----------------------------------------------------------------------===// | |||
4999 | // SIToFPInst Class | |||
5000 | //===----------------------------------------------------------------------===// | |||
5001 | ||||
5002 | /// This class represents a cast from signed integer to floating point. | |||
5003 | class SIToFPInst : public CastInst { | |||
5004 | protected: | |||
5005 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5006 | friend class Instruction; | |||
5007 | ||||
5008 | /// Clone an identical SIToFPInst | |||
5009 | SIToFPInst *cloneImpl() const; | |||
5010 | ||||
5011 | public: | |||
5012 | /// Constructor with insert-before-instruction semantics | |||
5013 | SIToFPInst( | |||
5014 | Value *S, ///< The value to be converted | |||
5015 | Type *Ty, ///< The type to convert to | |||
5016 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5017 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5018 | ); | |||
5019 | ||||
5020 | /// Constructor with insert-at-end-of-block semantics | |||
5021 | SIToFPInst( | |||
5022 | Value *S, ///< The value to be converted | |||
5023 | Type *Ty, ///< The type to convert to | |||
5024 | const Twine &NameStr, ///< A name for the new instruction | |||
5025 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5026 | ); | |||
5027 | ||||
5028 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5029 | static bool classof(const Instruction *I) { | |||
5030 | return I->getOpcode() == SIToFP; | |||
5031 | } | |||
5032 | static bool classof(const Value *V) { | |||
5033 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5034 | } | |||
5035 | }; | |||
5036 | ||||
5037 | //===----------------------------------------------------------------------===// | |||
5038 | // FPToUIInst Class | |||
5039 | //===----------------------------------------------------------------------===// | |||
5040 | ||||
5041 | /// This class represents a cast from floating point to unsigned integer | |||
5042 | class FPToUIInst : public CastInst { | |||
5043 | protected: | |||
5044 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5045 | friend class Instruction; | |||
5046 | ||||
5047 | /// Clone an identical FPToUIInst | |||
5048 | FPToUIInst *cloneImpl() const; | |||
5049 | ||||
5050 | public: | |||
5051 | /// Constructor with insert-before-instruction semantics | |||
5052 | FPToUIInst( | |||
5053 | Value *S, ///< The value to be converted | |||
5054 | Type *Ty, ///< The type to convert to | |||
5055 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5056 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5057 | ); | |||
5058 | ||||
5059 | /// Constructor with insert-at-end-of-block semantics | |||
5060 | FPToUIInst( | |||
5061 | Value *S, ///< The value to be converted | |||
5062 | Type *Ty, ///< The type to convert to | |||
5063 | const Twine &NameStr, ///< A name for the new instruction | |||
5064 | BasicBlock *InsertAtEnd ///< Where to insert the new instruction | |||
5065 | ); | |||
5066 | ||||
5067 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5068 | static bool classof(const Instruction *I) { | |||
5069 | return I->getOpcode() == FPToUI; | |||
5070 | } | |||
5071 | static bool classof(const Value *V) { | |||
5072 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5073 | } | |||
5074 | }; | |||
5075 | ||||
5076 | //===----------------------------------------------------------------------===// | |||
5077 | // FPToSIInst Class | |||
5078 | //===----------------------------------------------------------------------===// | |||
5079 | ||||
5080 | /// This class represents a cast from floating point to signed integer. | |||
5081 | class FPToSIInst : public CastInst { | |||
5082 | protected: | |||
5083 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5084 | friend class Instruction; | |||
5085 | ||||
5086 | /// Clone an identical FPToSIInst | |||
5087 | FPToSIInst *cloneImpl() const; | |||
5088 | ||||
5089 | public: | |||
5090 | /// Constructor with insert-before-instruction semantics | |||
5091 | FPToSIInst( | |||
5092 | Value *S, ///< The value to be converted | |||
5093 | Type *Ty, ///< The type to convert to | |||
5094 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5095 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5096 | ); | |||
5097 | ||||
5098 | /// Constructor with insert-at-end-of-block semantics | |||
5099 | FPToSIInst( | |||
5100 | Value *S, ///< The value to be converted | |||
5101 | Type *Ty, ///< The type to convert to | |||
5102 | const Twine &NameStr, ///< A name for the new instruction | |||
5103 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5104 | ); | |||
5105 | ||||
5106 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5107 | static bool classof(const Instruction *I) { | |||
5108 | return I->getOpcode() == FPToSI; | |||
5109 | } | |||
5110 | static bool classof(const Value *V) { | |||
5111 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5112 | } | |||
5113 | }; | |||
5114 | ||||
5115 | //===----------------------------------------------------------------------===// | |||
5116 | // IntToPtrInst Class | |||
5117 | //===----------------------------------------------------------------------===// | |||
5118 | ||||
5119 | /// This class represents a cast from an integer to a pointer. | |||
5120 | class IntToPtrInst : public CastInst { | |||
5121 | public: | |||
5122 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5123 | friend class Instruction; | |||
5124 | ||||
5125 | /// Constructor with insert-before-instruction semantics | |||
5126 | IntToPtrInst( | |||
5127 | Value *S, ///< The value to be converted | |||
5128 | Type *Ty, ///< The type to convert to | |||
5129 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5130 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5131 | ); | |||
5132 | ||||
5133 | /// Constructor with insert-at-end-of-block semantics | |||
5134 | IntToPtrInst( | |||
5135 | Value *S, ///< The value to be converted | |||
5136 | Type *Ty, ///< The type to convert to | |||
5137 | const Twine &NameStr, ///< A name for the new instruction | |||
5138 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5139 | ); | |||
5140 | ||||
5141 | /// Clone an identical IntToPtrInst. | |||
5142 | IntToPtrInst *cloneImpl() const; | |||
5143 | ||||
5144 | /// Returns the address space of this instruction's pointer type. | |||
5145 | unsigned getAddressSpace() const { | |||
5146 | return getType()->getPointerAddressSpace(); | |||
5147 | } | |||
5148 | ||||
5149 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5150 | static bool classof(const Instruction *I) { | |||
5151 | return I->getOpcode() == IntToPtr; | |||
5152 | } | |||
5153 | static bool classof(const Value *V) { | |||
5154 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5155 | } | |||
5156 | }; | |||
5157 | ||||
5158 | //===----------------------------------------------------------------------===// | |||
5159 | // PtrToIntInst Class | |||
5160 | //===----------------------------------------------------------------------===// | |||
5161 | ||||
5162 | /// This class represents a cast from a pointer to an integer. | |||
5163 | class PtrToIntInst : public CastInst { | |||
5164 | protected: | |||
5165 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5166 | friend class Instruction; | |||
5167 | ||||
5168 | /// Clone an identical PtrToIntInst. | |||
5169 | PtrToIntInst *cloneImpl() const; | |||
5170 | ||||
5171 | public: | |||
5172 | /// Constructor with insert-before-instruction semantics | |||
5173 | PtrToIntInst( | |||
5174 | Value *S, ///< The value to be converted | |||
5175 | Type *Ty, ///< The type to convert to | |||
5176 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5177 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5178 | ); | |||
5179 | ||||
5180 | /// Constructor with insert-at-end-of-block semantics | |||
5181 | PtrToIntInst( | |||
5182 | Value *S, ///< The value to be converted | |||
5183 | Type *Ty, ///< The type to convert to | |||
5184 | const Twine &NameStr, ///< A name for the new instruction | |||
5185 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5186 | ); | |||
5187 | ||||
5188 | /// Gets the pointer operand. | |||
5189 | Value *getPointerOperand() { return getOperand(0); } | |||
5190 | /// Gets the pointer operand. | |||
5191 | const Value *getPointerOperand() const { return getOperand(0); } | |||
5192 | /// Gets the operand index of the pointer operand. | |||
5193 | static unsigned getPointerOperandIndex() { return 0U; } | |||
5194 | ||||
5195 | /// Returns the address space of the pointer operand. | |||
5196 | unsigned getPointerAddressSpace() const { | |||
5197 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
5198 | } | |||
5199 | ||||
5200 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5201 | static bool classof(const Instruction *I) { | |||
5202 | return I->getOpcode() == PtrToInt; | |||
5203 | } | |||
5204 | static bool classof(const Value *V) { | |||
5205 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5206 | } | |||
5207 | }; | |||
5208 | ||||
5209 | //===----------------------------------------------------------------------===// | |||
5210 | // BitCastInst Class | |||
5211 | //===----------------------------------------------------------------------===// | |||
5212 | ||||
5213 | /// This class represents a no-op cast from one type to another. | |||
5214 | class BitCastInst : public CastInst { | |||
5215 | protected: | |||
5216 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5217 | friend class Instruction; | |||
5218 | ||||
5219 | /// Clone an identical BitCastInst. | |||
5220 | BitCastInst *cloneImpl() const; | |||
5221 | ||||
5222 | public: | |||
5223 | /// Constructor with insert-before-instruction semantics | |||
5224 | BitCastInst( | |||
5225 | Value *S, ///< The value to be casted | |||
5226 | Type *Ty, ///< The type to casted to | |||
5227 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5228 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5229 | ); | |||
5230 | ||||
5231 | /// Constructor with insert-at-end-of-block semantics | |||
5232 | BitCastInst( | |||
5233 | Value *S, ///< The value to be casted | |||
5234 | Type *Ty, ///< The type to casted to | |||
5235 | const Twine &NameStr, ///< A name for the new instruction | |||
5236 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5237 | ); | |||
5238 | ||||
5239 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5240 | static bool classof(const Instruction *I) { | |||
5241 | return I->getOpcode() == BitCast; | |||
5242 | } | |||
5243 | static bool classof(const Value *V) { | |||
5244 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5245 | } | |||
5246 | }; | |||
5247 | ||||
5248 | //===----------------------------------------------------------------------===// | |||
5249 | // AddrSpaceCastInst Class | |||
5250 | //===----------------------------------------------------------------------===// | |||
5251 | ||||
5252 | /// This class represents a conversion between pointers from one address space | |||
5253 | /// to another. | |||
5254 | class AddrSpaceCastInst : public CastInst { | |||
5255 | protected: | |||
5256 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5257 | friend class Instruction; | |||
5258 | ||||
5259 | /// Clone an identical AddrSpaceCastInst. | |||
5260 | AddrSpaceCastInst *cloneImpl() const; | |||
5261 | ||||
5262 | public: | |||
5263 | /// Constructor with insert-before-instruction semantics | |||
5264 | AddrSpaceCastInst( | |||
5265 | Value *S, ///< The value to be casted | |||
5266 | Type *Ty, ///< The type to casted to | |||
5267 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5268 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5269 | ); | |||
5270 | ||||
5271 | /// Constructor with insert-at-end-of-block semantics | |||
5272 | AddrSpaceCastInst( | |||
5273 | Value *S, ///< The value to be casted | |||
5274 | Type *Ty, ///< The type to casted to | |||
5275 | const Twine &NameStr, ///< A name for the new instruction | |||
5276 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5277 | ); | |||
5278 | ||||
5279 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5280 | static bool classof(const Instruction *I) { | |||
5281 | return I->getOpcode() == AddrSpaceCast; | |||
5282 | } | |||
5283 | static bool classof(const Value *V) { | |||
5284 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5285 | } | |||
5286 | ||||
5287 | /// Gets the pointer operand. | |||
5288 | Value *getPointerOperand() { | |||
5289 | return getOperand(0); | |||
5290 | } | |||
5291 | ||||
5292 | /// Gets the pointer operand. | |||
5293 | const Value *getPointerOperand() const { | |||
5294 | return getOperand(0); | |||
5295 | } | |||
5296 | ||||
5297 | /// Gets the operand index of the pointer operand. | |||
5298 | static unsigned getPointerOperandIndex() { | |||
5299 | return 0U; | |||
5300 | } | |||
5301 | ||||
5302 | /// Returns the address space of the pointer operand. | |||
5303 | unsigned getSrcAddressSpace() const { | |||
5304 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
5305 | } | |||
5306 | ||||
5307 | /// Returns the address space of the result. | |||
5308 | unsigned getDestAddressSpace() const { | |||
5309 | return getType()->getPointerAddressSpace(); | |||
5310 | } | |||
5311 | }; | |||
5312 | ||||
5313 | //===----------------------------------------------------------------------===// | |||
5314 | // Helper functions | |||
5315 | //===----------------------------------------------------------------------===// | |||
5316 | ||||
5317 | /// A helper function that returns the pointer operand of a load or store | |||
5318 | /// instruction. Returns nullptr if not load or store. | |||
5319 | inline const Value *getLoadStorePointerOperand(const Value *V) { | |||
5320 | if (auto *Load = dyn_cast<LoadInst>(V)) | |||
5321 | return Load->getPointerOperand(); | |||
5322 | if (auto *Store = dyn_cast<StoreInst>(V)) | |||
5323 | return Store->getPointerOperand(); | |||
5324 | return nullptr; | |||
5325 | } | |||
5326 | inline Value *getLoadStorePointerOperand(Value *V) { | |||
5327 | return const_cast<Value *>( | |||
5328 | getLoadStorePointerOperand(static_cast<const Value *>(V))); | |||
5329 | } | |||
5330 | ||||
5331 | /// A helper function that returns the pointer operand of a load, store | |||
5332 | /// or GEP instruction. Returns nullptr if not load, store, or GEP. | |||
5333 | inline const Value *getPointerOperand(const Value *V) { | |||
5334 | if (auto *Ptr = getLoadStorePointerOperand(V)) | |||
5335 | return Ptr; | |||
5336 | if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) | |||
5337 | return Gep->getPointerOperand(); | |||
5338 | return nullptr; | |||
5339 | } | |||
5340 | inline Value *getPointerOperand(Value *V) { | |||
5341 | return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); | |||
5342 | } | |||
5343 | ||||
5344 | /// A helper function that returns the alignment of load or store instruction. | |||
5345 | inline Align getLoadStoreAlignment(Value *I) { | |||
5346 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5347, __extension__ __PRETTY_FUNCTION__ )) | |||
5347 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5347, __extension__ __PRETTY_FUNCTION__ )); | |||
5348 | if (auto *LI = dyn_cast<LoadInst>(I)) | |||
5349 | return LI->getAlign(); | |||
5350 | return cast<StoreInst>(I)->getAlign(); | |||
5351 | } | |||
5352 | ||||
5353 | /// A helper function that returns the address space of the pointer operand of | |||
5354 | /// load or store instruction. | |||
5355 | inline unsigned getLoadStoreAddressSpace(Value *I) { | |||
5356 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5357, __extension__ __PRETTY_FUNCTION__ )) | |||
5357 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5357, __extension__ __PRETTY_FUNCTION__ )); | |||
5358 | if (auto *LI = dyn_cast<LoadInst>(I)) | |||
5359 | return LI->getPointerAddressSpace(); | |||
5360 | return cast<StoreInst>(I)->getPointerAddressSpace(); | |||
5361 | } | |||
5362 | ||||
5363 | /// A helper function that returns the type of a load or store instruction. | |||
5364 | inline Type *getLoadStoreType(Value *I) { | |||
5365 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5366, __extension__ __PRETTY_FUNCTION__ )) | |||
5366 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5366, __extension__ __PRETTY_FUNCTION__ )); | |||
5367 | if (auto *LI = dyn_cast<LoadInst>(I)) | |||
5368 | return LI->getType(); | |||
5369 | return cast<StoreInst>(I)->getValueOperand()->getType(); | |||
5370 | } | |||
5371 | ||||
5372 | /// A helper function that returns an atomic operation's sync scope; returns | |||
5373 | /// None if it is not an atomic operation. | |||
5374 | inline Optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) { | |||
5375 | if (!I->isAtomic()) | |||
5376 | return None; | |||
5377 | if (auto *AI = dyn_cast<LoadInst>(I)) | |||
5378 | return AI->getSyncScopeID(); | |||
5379 | if (auto *AI = dyn_cast<StoreInst>(I)) | |||
5380 | return AI->getSyncScopeID(); | |||
5381 | if (auto *AI = dyn_cast<FenceInst>(I)) | |||
5382 | return AI->getSyncScopeID(); | |||
5383 | if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) | |||
5384 | return AI->getSyncScopeID(); | |||
5385 | if (auto *AI = dyn_cast<AtomicRMWInst>(I)) | |||
5386 | return AI->getSyncScopeID(); | |||
5387 | llvm_unreachable("unhandled atomic operation")::llvm::llvm_unreachable_internal("unhandled atomic operation" , "llvm/include/llvm/IR/Instructions.h", 5387); | |||
5388 | } | |||
5389 | ||||
5390 | //===----------------------------------------------------------------------===// | |||
5391 | // FreezeInst Class | |||
5392 | //===----------------------------------------------------------------------===// | |||
5393 | ||||
5394 | /// This class represents a freeze function that returns random concrete | |||
5395 | /// value if an operand is either a poison value or an undef value | |||
5396 | class FreezeInst : public UnaryInstruction { | |||
5397 | protected: | |||
5398 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5399 | friend class Instruction; | |||
5400 | ||||
5401 | /// Clone an identical FreezeInst | |||
5402 | FreezeInst *cloneImpl() const; | |||
5403 | ||||
5404 | public: | |||
5405 | explicit FreezeInst(Value *S, | |||
5406 | const Twine &NameStr = "", | |||
5407 | Instruction *InsertBefore = nullptr); | |||
5408 | FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
5409 | ||||
5410 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5411 | static inline bool classof(const Instruction *I) { | |||
5412 | return I->getOpcode() == Freeze; | |||
5413 | } | |||
5414 | static inline bool classof(const Value *V) { | |||
5415 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5416 | } | |||
5417 | }; | |||
5418 | ||||
5419 | } // end namespace llvm | |||
5420 | ||||
5421 | #endif // LLVM_IR_INSTRUCTIONS_H |