File: | build/source/llvm/include/llvm/IR/Instructions.h |
Warning: | line 1253, column 33 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===------- LoopBoundSplit.cpp - Split Loop Bound --------------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | ||||
9 | #include "llvm/Transforms/Scalar/LoopBoundSplit.h" | |||
10 | #include "llvm/ADT/Sequence.h" | |||
11 | #include "llvm/Analysis/LoopAnalysisManager.h" | |||
12 | #include "llvm/Analysis/LoopInfo.h" | |||
13 | #include "llvm/Analysis/ScalarEvolution.h" | |||
14 | #include "llvm/Analysis/ScalarEvolutionExpressions.h" | |||
15 | #include "llvm/IR/PatternMatch.h" | |||
16 | #include "llvm/Transforms/Scalar/LoopPassManager.h" | |||
17 | #include "llvm/Transforms/Utils/BasicBlockUtils.h" | |||
18 | #include "llvm/Transforms/Utils/Cloning.h" | |||
19 | #include "llvm/Transforms/Utils/LoopSimplify.h" | |||
20 | #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h" | |||
21 | ||||
22 | #define DEBUG_TYPE"loop-bound-split" "loop-bound-split" | |||
23 | ||||
24 | namespace llvm { | |||
25 | ||||
26 | using namespace PatternMatch; | |||
27 | ||||
28 | namespace { | |||
29 | struct ConditionInfo { | |||
30 | /// Branch instruction with this condition | |||
31 | BranchInst *BI = nullptr; | |||
32 | /// ICmp instruction with this condition | |||
33 | ICmpInst *ICmp = nullptr; | |||
34 | /// Preciate info | |||
35 | ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE; | |||
36 | /// AddRec llvm value | |||
37 | Value *AddRecValue = nullptr; | |||
38 | /// Non PHI AddRec llvm value | |||
39 | Value *NonPHIAddRecValue; | |||
40 | /// Bound llvm value | |||
41 | Value *BoundValue = nullptr; | |||
42 | /// AddRec SCEV | |||
43 | const SCEVAddRecExpr *AddRecSCEV = nullptr; | |||
44 | /// Bound SCEV | |||
45 | const SCEV *BoundSCEV = nullptr; | |||
46 | ||||
47 | ConditionInfo() = default; | |||
48 | }; | |||
49 | } // namespace | |||
50 | ||||
51 | static void analyzeICmp(ScalarEvolution &SE, ICmpInst *ICmp, | |||
52 | ConditionInfo &Cond, const Loop &L) { | |||
53 | Cond.ICmp = ICmp; | |||
54 | if (match(ICmp, m_ICmp(Cond.Pred, m_Value(Cond.AddRecValue), | |||
55 | m_Value(Cond.BoundValue)))) { | |||
56 | const SCEV *AddRecSCEV = SE.getSCEV(Cond.AddRecValue); | |||
57 | const SCEV *BoundSCEV = SE.getSCEV(Cond.BoundValue); | |||
58 | const SCEVAddRecExpr *LHSAddRecSCEV = dyn_cast<SCEVAddRecExpr>(AddRecSCEV); | |||
59 | const SCEVAddRecExpr *RHSAddRecSCEV = dyn_cast<SCEVAddRecExpr>(BoundSCEV); | |||
60 | // Locate AddRec in LHSSCEV and Bound in RHSSCEV. | |||
61 | if (!LHSAddRecSCEV && RHSAddRecSCEV) { | |||
62 | std::swap(Cond.AddRecValue, Cond.BoundValue); | |||
63 | std::swap(AddRecSCEV, BoundSCEV); | |||
64 | Cond.Pred = ICmpInst::getSwappedPredicate(Cond.Pred); | |||
65 | } | |||
66 | ||||
67 | Cond.AddRecSCEV = dyn_cast<SCEVAddRecExpr>(AddRecSCEV); | |||
68 | Cond.BoundSCEV = BoundSCEV; | |||
69 | Cond.NonPHIAddRecValue = Cond.AddRecValue; | |||
70 | ||||
71 | // If the Cond.AddRecValue is PHI node, update Cond.NonPHIAddRecValue with | |||
72 | // value from backedge. | |||
73 | if (Cond.AddRecSCEV && isa<PHINode>(Cond.AddRecValue)) { | |||
74 | PHINode *PN = cast<PHINode>(Cond.AddRecValue); | |||
75 | Cond.NonPHIAddRecValue = PN->getIncomingValueForBlock(L.getLoopLatch()); | |||
76 | } | |||
77 | } | |||
78 | } | |||
79 | ||||
80 | static bool calculateUpperBound(const Loop &L, ScalarEvolution &SE, | |||
81 | ConditionInfo &Cond, bool IsExitCond) { | |||
82 | if (IsExitCond) { | |||
83 | const SCEV *ExitCount = SE.getExitCount(&L, Cond.ICmp->getParent()); | |||
84 | if (isa<SCEVCouldNotCompute>(ExitCount)) | |||
85 | return false; | |||
86 | ||||
87 | Cond.BoundSCEV = ExitCount; | |||
88 | return true; | |||
89 | } | |||
90 | ||||
91 | // For non-exit condtion, if pred is LT, keep existing bound. | |||
92 | if (Cond.Pred == ICmpInst::ICMP_SLT || Cond.Pred == ICmpInst::ICMP_ULT) | |||
93 | return true; | |||
94 | ||||
95 | // For non-exit condition, if pre is LE, try to convert it to LT. | |||
96 | // Range Range | |||
97 | // AddRec <= Bound --> AddRec < Bound + 1 | |||
98 | if (Cond.Pred != ICmpInst::ICMP_ULE && Cond.Pred != ICmpInst::ICMP_SLE) | |||
99 | return false; | |||
100 | ||||
101 | if (IntegerType *BoundSCEVIntType = | |||
102 | dyn_cast<IntegerType>(Cond.BoundSCEV->getType())) { | |||
103 | unsigned BitWidth = BoundSCEVIntType->getBitWidth(); | |||
104 | APInt Max = ICmpInst::isSigned(Cond.Pred) | |||
105 | ? APInt::getSignedMaxValue(BitWidth) | |||
106 | : APInt::getMaxValue(BitWidth); | |||
107 | const SCEV *MaxSCEV = SE.getConstant(Max); | |||
108 | // Check Bound < INT_MAX | |||
109 | ICmpInst::Predicate Pred = | |||
110 | ICmpInst::isSigned(Cond.Pred) ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT; | |||
111 | if (SE.isKnownPredicate(Pred, Cond.BoundSCEV, MaxSCEV)) { | |||
112 | const SCEV *BoundPlusOneSCEV = | |||
113 | SE.getAddExpr(Cond.BoundSCEV, SE.getOne(BoundSCEVIntType)); | |||
114 | Cond.BoundSCEV = BoundPlusOneSCEV; | |||
115 | Cond.Pred = Pred; | |||
116 | return true; | |||
117 | } | |||
118 | } | |||
119 | ||||
120 | // ToDo: Support ICMP_NE/EQ. | |||
121 | ||||
122 | return false; | |||
123 | } | |||
124 | ||||
125 | static bool hasProcessableCondition(const Loop &L, ScalarEvolution &SE, | |||
126 | ICmpInst *ICmp, ConditionInfo &Cond, | |||
127 | bool IsExitCond) { | |||
128 | analyzeICmp(SE, ICmp, Cond, L); | |||
129 | ||||
130 | // The BoundSCEV should be evaluated at loop entry. | |||
131 | if (!SE.isAvailableAtLoopEntry(Cond.BoundSCEV, &L)) | |||
132 | return false; | |||
133 | ||||
134 | // Allowed AddRec as induction variable. | |||
135 | if (!Cond.AddRecSCEV) | |||
136 | return false; | |||
137 | ||||
138 | if (!Cond.AddRecSCEV->isAffine()) | |||
139 | return false; | |||
140 | ||||
141 | const SCEV *StepRecSCEV = Cond.AddRecSCEV->getStepRecurrence(SE); | |||
142 | // Allowed constant step. | |||
143 | if (!isa<SCEVConstant>(StepRecSCEV)) | |||
144 | return false; | |||
145 | ||||
146 | ConstantInt *StepCI = cast<SCEVConstant>(StepRecSCEV)->getValue(); | |||
147 | // Allowed positive step for now. | |||
148 | // TODO: Support negative step. | |||
149 | if (StepCI->isNegative() || StepCI->isZero()) | |||
150 | return false; | |||
151 | ||||
152 | // Calculate upper bound. | |||
153 | if (!calculateUpperBound(L, SE, Cond, IsExitCond)) | |||
154 | return false; | |||
155 | ||||
156 | return true; | |||
157 | } | |||
158 | ||||
159 | static bool isProcessableCondBI(const ScalarEvolution &SE, | |||
160 | const BranchInst *BI) { | |||
161 | BasicBlock *TrueSucc = nullptr; | |||
162 | BasicBlock *FalseSucc = nullptr; | |||
163 | ICmpInst::Predicate Pred; | |||
164 | Value *LHS, *RHS; | |||
165 | if (!match(BI, m_Br(m_ICmp(Pred, m_Value(LHS), m_Value(RHS)), | |||
166 | m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc)))) | |||
167 | return false; | |||
168 | ||||
169 | if (!SE.isSCEVable(LHS->getType())) | |||
170 | return false; | |||
171 | assert(SE.isSCEVable(RHS->getType()) && "Expected RHS's type is SCEVable")(static_cast <bool> (SE.isSCEVable(RHS->getType()) && "Expected RHS's type is SCEVable") ? void (0) : __assert_fail ("SE.isSCEVable(RHS->getType()) && \"Expected RHS's type is SCEVable\"" , "llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp", 171, __extension__ __PRETTY_FUNCTION__)); | |||
172 | ||||
173 | if (TrueSucc == FalseSucc) | |||
174 | return false; | |||
175 | ||||
176 | return true; | |||
177 | } | |||
178 | ||||
179 | static bool canSplitLoopBound(const Loop &L, const DominatorTree &DT, | |||
180 | ScalarEvolution &SE, ConditionInfo &Cond) { | |||
181 | // Skip function with optsize. | |||
182 | if (L.getHeader()->getParent()->hasOptSize()) | |||
183 | return false; | |||
184 | ||||
185 | // Split only innermost loop. | |||
186 | if (!L.isInnermost()) | |||
187 | return false; | |||
188 | ||||
189 | // Check loop is in simplified form. | |||
190 | if (!L.isLoopSimplifyForm()) | |||
191 | return false; | |||
192 | ||||
193 | // Check loop is in LCSSA form. | |||
194 | if (!L.isLCSSAForm(DT)) | |||
195 | return false; | |||
196 | ||||
197 | // Skip loop that cannot be cloned. | |||
198 | if (!L.isSafeToClone()) | |||
199 | return false; | |||
200 | ||||
201 | BasicBlock *ExitingBB = L.getExitingBlock(); | |||
202 | // Assumed only one exiting block. | |||
203 | if (!ExitingBB) | |||
204 | return false; | |||
205 | ||||
206 | BranchInst *ExitingBI = dyn_cast<BranchInst>(ExitingBB->getTerminator()); | |||
207 | if (!ExitingBI) | |||
208 | return false; | |||
209 | ||||
210 | // Allowed only conditional branch with ICmp. | |||
211 | if (!isProcessableCondBI(SE, ExitingBI)) | |||
212 | return false; | |||
213 | ||||
214 | // Check the condition is processable. | |||
215 | ICmpInst *ICmp = cast<ICmpInst>(ExitingBI->getCondition()); | |||
216 | if (!hasProcessableCondition(L, SE, ICmp, Cond, /*IsExitCond*/ true)) | |||
217 | return false; | |||
218 | ||||
219 | Cond.BI = ExitingBI; | |||
220 | return true; | |||
221 | } | |||
222 | ||||
223 | static bool isProfitableToTransform(const Loop &L, const BranchInst *BI) { | |||
224 | // If the conditional branch splits a loop into two halves, we could | |||
225 | // generally say it is profitable. | |||
226 | // | |||
227 | // ToDo: Add more profitable cases here. | |||
228 | ||||
229 | // Check this branch causes diamond CFG. | |||
230 | BasicBlock *Succ0 = BI->getSuccessor(0); | |||
231 | BasicBlock *Succ1 = BI->getSuccessor(1); | |||
232 | ||||
233 | BasicBlock *Succ0Succ = Succ0->getSingleSuccessor(); | |||
234 | BasicBlock *Succ1Succ = Succ1->getSingleSuccessor(); | |||
235 | if (!Succ0Succ || !Succ1Succ || Succ0Succ != Succ1Succ) | |||
236 | return false; | |||
237 | ||||
238 | // ToDo: Calculate each successor's instruction cost. | |||
239 | ||||
240 | return true; | |||
241 | } | |||
242 | ||||
243 | static BranchInst *findSplitCandidate(const Loop &L, ScalarEvolution &SE, | |||
244 | ConditionInfo &ExitingCond, | |||
245 | ConditionInfo &SplitCandidateCond) { | |||
246 | for (auto *BB : L.blocks()) { | |||
247 | // Skip condition of backedge. | |||
248 | if (L.getLoopLatch() == BB) | |||
249 | continue; | |||
250 | ||||
251 | auto *BI = dyn_cast<BranchInst>(BB->getTerminator()); | |||
252 | if (!BI) | |||
253 | continue; | |||
254 | ||||
255 | // Check conditional branch with ICmp. | |||
256 | if (!isProcessableCondBI(SE, BI)) | |||
257 | continue; | |||
258 | ||||
259 | // Skip loop invariant condition. | |||
260 | if (L.isLoopInvariant(BI->getCondition())) | |||
261 | continue; | |||
262 | ||||
263 | // Check the condition is processable. | |||
264 | ICmpInst *ICmp = cast<ICmpInst>(BI->getCondition()); | |||
265 | if (!hasProcessableCondition(L, SE, ICmp, SplitCandidateCond, | |||
266 | /*IsExitCond*/ false)) | |||
267 | continue; | |||
268 | ||||
269 | if (ExitingCond.BoundSCEV->getType() != | |||
270 | SplitCandidateCond.BoundSCEV->getType()) | |||
271 | continue; | |||
272 | ||||
273 | // After transformation, we assume the split condition of the pre-loop is | |||
274 | // always true. In order to guarantee it, we need to check the start value | |||
275 | // of the split cond AddRec satisfies the split condition. | |||
276 | if (!SE.isLoopEntryGuardedByCond(&L, SplitCandidateCond.Pred, | |||
277 | SplitCandidateCond.AddRecSCEV->getStart(), | |||
278 | SplitCandidateCond.BoundSCEV)) | |||
279 | continue; | |||
280 | ||||
281 | SplitCandidateCond.BI = BI; | |||
282 | return BI; | |||
283 | } | |||
284 | ||||
285 | return nullptr; | |||
286 | } | |||
287 | ||||
288 | static bool splitLoopBound(Loop &L, DominatorTree &DT, LoopInfo &LI, | |||
289 | ScalarEvolution &SE, LPMUpdater &U) { | |||
290 | ConditionInfo SplitCandidateCond; | |||
291 | ConditionInfo ExitingCond; | |||
292 | ||||
293 | // Check we can split this loop's bound. | |||
294 | if (!canSplitLoopBound(L, DT, SE, ExitingCond)) | |||
295 | return false; | |||
296 | ||||
297 | if (!findSplitCandidate(L, SE, ExitingCond, SplitCandidateCond)) | |||
298 | return false; | |||
299 | ||||
300 | if (!isProfitableToTransform(L, SplitCandidateCond.BI)) | |||
301 | return false; | |||
302 | ||||
303 | // Now, we have a split candidate. Let's build a form as below. | |||
304 | // +--------------------+ | |||
305 | // | preheader | | |||
306 | // | set up newbound | | |||
307 | // +--------------------+ | |||
308 | // | /----------------\ | |||
309 | // +--------v----v------+ | | |||
310 | // | header |---\ | | |||
311 | // | with true condition| | | | |||
312 | // +--------------------+ | | | |||
313 | // | | | | |||
314 | // +--------v-----------+ | | | |||
315 | // | if.then.BB | | | | |||
316 | // +--------------------+ | | | |||
317 | // | | | | |||
318 | // +--------v-----------<---/ | | |||
319 | // | latch >----------/ | |||
320 | // | with newbound | | |||
321 | // +--------------------+ | |||
322 | // | | |||
323 | // +--------v-----------+ | |||
324 | // | preheader2 |--------------\ | |||
325 | // | if (AddRec i != | | | |||
326 | // | org bound) | | | |||
327 | // +--------------------+ | | |||
328 | // | /----------------\ | | |||
329 | // +--------v----v------+ | | | |||
330 | // | header2 |---\ | | | |||
331 | // | conditional branch | | | | | |||
332 | // |with false condition| | | | | |||
333 | // +--------------------+ | | | | |||
334 | // | | | | | |||
335 | // +--------v-----------+ | | | | |||
336 | // | if.then.BB2 | | | | | |||
337 | // +--------------------+ | | | | |||
338 | // | | | | | |||
339 | // +--------v-----------<---/ | | | |||
340 | // | latch2 >----------/ | | |||
341 | // | with org bound | | | |||
342 | // +--------v-----------+ | | |||
343 | // | | | |||
344 | // | +---------------+ | | |||
345 | // +--> exit <-------/ | |||
346 | // +---------------+ | |||
347 | ||||
348 | // Let's create post loop. | |||
349 | SmallVector<BasicBlock *, 8> PostLoopBlocks; | |||
350 | Loop *PostLoop; | |||
351 | ValueToValueMapTy VMap; | |||
352 | BasicBlock *PreHeader = L.getLoopPreheader(); | |||
353 | BasicBlock *SplitLoopPH = SplitEdge(PreHeader, L.getHeader(), &DT, &LI); | |||
354 | PostLoop = cloneLoopWithPreheader(L.getExitBlock(), SplitLoopPH, &L, VMap, | |||
355 | ".split", &LI, &DT, PostLoopBlocks); | |||
356 | remapInstructionsInBlocks(PostLoopBlocks, VMap); | |||
357 | ||||
358 | BasicBlock *PostLoopPreHeader = PostLoop->getLoopPreheader(); | |||
359 | IRBuilder<> Builder(&PostLoopPreHeader->front()); | |||
360 | ||||
361 | // Update phi nodes in header of post-loop. | |||
362 | bool isExitingLatch = | |||
363 | (L.getExitingBlock() == L.getLoopLatch()) ? true : false; | |||
364 | Value *ExitingCondLCSSAPhi = nullptr; | |||
365 | for (PHINode &PN : L.getHeader()->phis()) { | |||
366 | // Create LCSSA phi node in preheader of post-loop. | |||
367 | PHINode *LCSSAPhi = | |||
368 | Builder.CreatePHI(PN.getType(), 1, PN.getName() + ".lcssa"); | |||
369 | LCSSAPhi->setDebugLoc(PN.getDebugLoc()); | |||
370 | // If the exiting block is loop latch, the phi does not have the update at | |||
371 | // last iteration. In this case, update lcssa phi with value from backedge. | |||
372 | LCSSAPhi->addIncoming( | |||
373 | isExitingLatch ? PN.getIncomingValueForBlock(L.getLoopLatch()) : &PN, | |||
374 | L.getExitingBlock()); | |||
375 | ||||
376 | // Update the start value of phi node in post-loop with the LCSSA phi node. | |||
377 | PHINode *PostLoopPN = cast<PHINode>(VMap[&PN]); | |||
378 | PostLoopPN->setIncomingValueForBlock(PostLoopPreHeader, LCSSAPhi); | |||
379 | ||||
380 | // Find PHI with exiting condition from pre-loop. The PHI should be | |||
381 | // SCEVAddRecExpr and have same incoming value from backedge with | |||
382 | // ExitingCond. | |||
383 | if (!SE.isSCEVable(PN.getType())) | |||
384 | continue; | |||
385 | ||||
386 | const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN)); | |||
387 | if (PhiSCEV && ExitingCond.NonPHIAddRecValue == | |||
388 | PN.getIncomingValueForBlock(L.getLoopLatch())) | |||
389 | ExitingCondLCSSAPhi = LCSSAPhi; | |||
390 | } | |||
391 | ||||
392 | // Add conditional branch to check we can skip post-loop in its preheader. | |||
393 | Instruction *OrigBI = PostLoopPreHeader->getTerminator(); | |||
394 | ICmpInst::Predicate Pred = ICmpInst::ICMP_NE; | |||
395 | Value *Cond = | |||
396 | Builder.CreateICmp(Pred, ExitingCondLCSSAPhi, ExitingCond.BoundValue); | |||
397 | Builder.CreateCondBr(Cond, PostLoop->getHeader(), PostLoop->getExitBlock()); | |||
398 | OrigBI->eraseFromParent(); | |||
399 | ||||
400 | // Create new loop bound and add it into preheader of pre-loop. | |||
401 | const SCEV *NewBoundSCEV = ExitingCond.BoundSCEV; | |||
402 | const SCEV *SplitBoundSCEV = SplitCandidateCond.BoundSCEV; | |||
403 | NewBoundSCEV = ICmpInst::isSigned(ExitingCond.Pred) | |||
404 | ? SE.getSMinExpr(NewBoundSCEV, SplitBoundSCEV) | |||
405 | : SE.getUMinExpr(NewBoundSCEV, SplitBoundSCEV); | |||
406 | ||||
407 | SCEVExpander Expander( | |||
408 | SE, L.getHeader()->getParent()->getParent()->getDataLayout(), "split"); | |||
409 | Instruction *InsertPt = SplitLoopPH->getTerminator(); | |||
410 | Value *NewBoundValue = | |||
411 | Expander.expandCodeFor(NewBoundSCEV, NewBoundSCEV->getType(), InsertPt); | |||
412 | NewBoundValue->setName("new.bound"); | |||
413 | ||||
414 | // Replace exiting bound value of pre-loop NewBound. | |||
415 | ExitingCond.ICmp->setOperand(1, NewBoundValue); | |||
416 | ||||
417 | // Replace SplitCandidateCond.BI's condition of pre-loop by True. | |||
418 | LLVMContext &Context = PreHeader->getContext(); | |||
419 | SplitCandidateCond.BI->setCondition(ConstantInt::getTrue(Context)); | |||
420 | ||||
421 | // Replace cloned SplitCandidateCond.BI's condition in post-loop by False. | |||
422 | BranchInst *ClonedSplitCandidateBI = | |||
423 | cast<BranchInst>(VMap[SplitCandidateCond.BI]); | |||
424 | ClonedSplitCandidateBI->setCondition(ConstantInt::getFalse(Context)); | |||
425 | ||||
426 | // Replace exit branch target of pre-loop by post-loop's preheader. | |||
427 | if (L.getExitBlock() == ExitingCond.BI->getSuccessor(0)) | |||
428 | ExitingCond.BI->setSuccessor(0, PostLoopPreHeader); | |||
429 | else | |||
430 | ExitingCond.BI->setSuccessor(1, PostLoopPreHeader); | |||
431 | ||||
432 | // Update phi node in exit block of post-loop. | |||
433 | Builder.SetInsertPoint(&PostLoopPreHeader->front()); | |||
434 | for (PHINode &PN : PostLoop->getExitBlock()->phis()) { | |||
435 | for (auto i : seq<int>(0, PN.getNumOperands())) { | |||
436 | // Check incoming block is pre-loop's exiting block. | |||
437 | if (PN.getIncomingBlock(i) == L.getExitingBlock()) { | |||
438 | Value *IncomingValue = PN.getIncomingValue(i); | |||
439 | ||||
440 | // Create LCSSA phi node for incoming value. | |||
441 | PHINode *LCSSAPhi = | |||
442 | Builder.CreatePHI(PN.getType(), 1, PN.getName() + ".lcssa"); | |||
443 | LCSSAPhi->setDebugLoc(PN.getDebugLoc()); | |||
444 | LCSSAPhi->addIncoming(IncomingValue, PN.getIncomingBlock(i)); | |||
445 | ||||
446 | // Replace pre-loop's exiting block by post-loop's preheader. | |||
447 | PN.setIncomingBlock(i, PostLoopPreHeader); | |||
448 | // Replace incoming value by LCSSAPhi. | |||
449 | PN.setIncomingValue(i, LCSSAPhi); | |||
450 | // Add a new incoming value with post-loop's exiting block. | |||
451 | PN.addIncoming(VMap[IncomingValue], PostLoop->getExitingBlock()); | |||
452 | } | |||
453 | } | |||
454 | } | |||
455 | ||||
456 | // Update dominator tree. | |||
457 | DT.changeImmediateDominator(PostLoopPreHeader, L.getExitingBlock()); | |||
458 | DT.changeImmediateDominator(PostLoop->getExitBlock(), PostLoopPreHeader); | |||
459 | ||||
460 | // Invalidate cached SE information. | |||
461 | SE.forgetLoop(&L); | |||
462 | ||||
463 | // Canonicalize loops. | |||
464 | simplifyLoop(&L, &DT, &LI, &SE, nullptr, nullptr, true); | |||
465 | simplifyLoop(PostLoop, &DT, &LI, &SE, nullptr, nullptr, true); | |||
466 | ||||
467 | // Add new post-loop to loop pass manager. | |||
468 | U.addSiblingLoops(PostLoop); | |||
469 | ||||
470 | return true; | |||
471 | } | |||
472 | ||||
473 | PreservedAnalyses LoopBoundSplitPass::run(Loop &L, LoopAnalysisManager &AM, | |||
474 | LoopStandardAnalysisResults &AR, | |||
475 | LPMUpdater &U) { | |||
476 | Function &F = *L.getHeader()->getParent(); | |||
477 | (void)F; | |||
478 | ||||
479 | LLVM_DEBUG(dbgs() << "Spliting bound of loop in " << F.getName() << ": " << Ldo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-bound-split")) { dbgs() << "Spliting bound of loop in " << F.getName() << ": " << L << "\n"; } } while (false) | |||
| ||||
480 | << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType ("loop-bound-split")) { dbgs() << "Spliting bound of loop in " << F.getName() << ": " << L << "\n"; } } while (false); | |||
481 | ||||
482 | if (!splitLoopBound(L, AR.DT, AR.LI, AR.SE, U)) | |||
483 | return PreservedAnalyses::all(); | |||
484 | ||||
485 | assert(AR.DT.verify(DominatorTree::VerificationLevel::Fast))(static_cast <bool> (AR.DT.verify(DominatorTree::VerificationLevel ::Fast)) ? void (0) : __assert_fail ("AR.DT.verify(DominatorTree::VerificationLevel::Fast)" , "llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp", 485, __extension__ __PRETTY_FUNCTION__)); | |||
486 | AR.LI.verify(AR.DT); | |||
487 | ||||
488 | return getLoopPassPreservedAnalyses(); | |||
489 | } | |||
490 | ||||
491 | } // end namespace llvm |
1 | //===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file defines the IRBuilder class, which is used as a convenient way |
10 | // to create LLVM instructions with a consistent and simplified interface. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #ifndef LLVM_IR_IRBUILDER_H |
15 | #define LLVM_IR_IRBUILDER_H |
16 | |
17 | #include "llvm-c/Types.h" |
18 | #include "llvm/ADT/ArrayRef.h" |
19 | #include "llvm/ADT/STLExtras.h" |
20 | #include "llvm/ADT/StringRef.h" |
21 | #include "llvm/ADT/Twine.h" |
22 | #include "llvm/IR/BasicBlock.h" |
23 | #include "llvm/IR/Constant.h" |
24 | #include "llvm/IR/ConstantFolder.h" |
25 | #include "llvm/IR/Constants.h" |
26 | #include "llvm/IR/DataLayout.h" |
27 | #include "llvm/IR/DebugLoc.h" |
28 | #include "llvm/IR/DerivedTypes.h" |
29 | #include "llvm/IR/FPEnv.h" |
30 | #include "llvm/IR/Function.h" |
31 | #include "llvm/IR/GlobalVariable.h" |
32 | #include "llvm/IR/InstrTypes.h" |
33 | #include "llvm/IR/Instruction.h" |
34 | #include "llvm/IR/Instructions.h" |
35 | #include "llvm/IR/Intrinsics.h" |
36 | #include "llvm/IR/LLVMContext.h" |
37 | #include "llvm/IR/Module.h" |
38 | #include "llvm/IR/Operator.h" |
39 | #include "llvm/IR/Type.h" |
40 | #include "llvm/IR/Value.h" |
41 | #include "llvm/IR/ValueHandle.h" |
42 | #include "llvm/Support/AtomicOrdering.h" |
43 | #include "llvm/Support/CBindingWrapping.h" |
44 | #include "llvm/Support/Casting.h" |
45 | #include <cassert> |
46 | #include <cstdint> |
47 | #include <functional> |
48 | #include <optional> |
49 | #include <utility> |
50 | |
51 | namespace llvm { |
52 | |
53 | class APInt; |
54 | class Use; |
55 | |
56 | /// This provides the default implementation of the IRBuilder |
57 | /// 'InsertHelper' method that is called whenever an instruction is created by |
58 | /// IRBuilder and needs to be inserted. |
59 | /// |
60 | /// By default, this inserts the instruction at the insertion point. |
61 | class IRBuilderDefaultInserter { |
62 | public: |
63 | virtual ~IRBuilderDefaultInserter(); |
64 | |
65 | virtual void InsertHelper(Instruction *I, const Twine &Name, |
66 | BasicBlock *BB, |
67 | BasicBlock::iterator InsertPt) const { |
68 | if (BB) |
69 | I->insertInto(BB, InsertPt); |
70 | I->setName(Name); |
71 | } |
72 | }; |
73 | |
74 | /// Provides an 'InsertHelper' that calls a user-provided callback after |
75 | /// performing the default insertion. |
76 | class IRBuilderCallbackInserter : public IRBuilderDefaultInserter { |
77 | std::function<void(Instruction *)> Callback; |
78 | |
79 | public: |
80 | ~IRBuilderCallbackInserter() override; |
81 | |
82 | IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback) |
83 | : Callback(std::move(Callback)) {} |
84 | |
85 | void InsertHelper(Instruction *I, const Twine &Name, |
86 | BasicBlock *BB, |
87 | BasicBlock::iterator InsertPt) const override { |
88 | IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); |
89 | Callback(I); |
90 | } |
91 | }; |
92 | |
93 | /// Common base class shared among various IRBuilders. |
94 | class IRBuilderBase { |
95 | /// Pairs of (metadata kind, MDNode *) that should be added to all newly |
96 | /// created instructions, like !dbg metadata. |
97 | SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy; |
98 | |
99 | /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not |
100 | /// null. If \p MD is null, remove the entry with \p Kind. |
101 | void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) { |
102 | if (!MD) { |
103 | erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) { |
104 | return KV.first == Kind; |
105 | }); |
106 | return; |
107 | } |
108 | |
109 | for (auto &KV : MetadataToCopy) |
110 | if (KV.first == Kind) { |
111 | KV.second = MD; |
112 | return; |
113 | } |
114 | |
115 | MetadataToCopy.emplace_back(Kind, MD); |
116 | } |
117 | |
118 | protected: |
119 | BasicBlock *BB; |
120 | BasicBlock::iterator InsertPt; |
121 | LLVMContext &Context; |
122 | const IRBuilderFolder &Folder; |
123 | const IRBuilderDefaultInserter &Inserter; |
124 | |
125 | MDNode *DefaultFPMathTag; |
126 | FastMathFlags FMF; |
127 | |
128 | bool IsFPConstrained = false; |
129 | fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; |
130 | RoundingMode DefaultConstrainedRounding = RoundingMode::Dynamic; |
131 | |
132 | ArrayRef<OperandBundleDef> DefaultOperandBundles; |
133 | |
134 | public: |
135 | IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder, |
136 | const IRBuilderDefaultInserter &Inserter, MDNode *FPMathTag, |
137 | ArrayRef<OperandBundleDef> OpBundles) |
138 | : Context(context), Folder(Folder), Inserter(Inserter), |
139 | DefaultFPMathTag(FPMathTag), DefaultOperandBundles(OpBundles) { |
140 | ClearInsertionPoint(); |
141 | } |
142 | |
143 | /// Insert and return the specified instruction. |
144 | template<typename InstTy> |
145 | InstTy *Insert(InstTy *I, const Twine &Name = "") const { |
146 | Inserter.InsertHelper(I, Name, BB, InsertPt); |
147 | AddMetadataToInst(I); |
148 | return I; |
149 | } |
150 | |
151 | /// No-op overload to handle constants. |
152 | Constant *Insert(Constant *C, const Twine& = "") const { |
153 | return C; |
154 | } |
155 | |
156 | Value *Insert(Value *V, const Twine &Name = "") const { |
157 | if (Instruction *I = dyn_cast<Instruction>(V)) |
158 | return Insert(I, Name); |
159 | assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0) : __assert_fail ("isa<Constant>(V)", "llvm/include/llvm/IR/IRBuilder.h" , 159, __extension__ __PRETTY_FUNCTION__)); |
160 | return V; |
161 | } |
162 | |
163 | //===--------------------------------------------------------------------===// |
164 | // Builder configuration methods |
165 | //===--------------------------------------------------------------------===// |
166 | |
167 | /// Clear the insertion point: created instructions will not be |
168 | /// inserted into a block. |
169 | void ClearInsertionPoint() { |
170 | BB = nullptr; |
171 | InsertPt = BasicBlock::iterator(); |
172 | } |
173 | |
174 | BasicBlock *GetInsertBlock() const { return BB; } |
175 | BasicBlock::iterator GetInsertPoint() const { return InsertPt; } |
176 | LLVMContext &getContext() const { return Context; } |
177 | |
178 | /// This specifies that created instructions should be appended to the |
179 | /// end of the specified block. |
180 | void SetInsertPoint(BasicBlock *TheBB) { |
181 | BB = TheBB; |
182 | InsertPt = BB->end(); |
183 | } |
184 | |
185 | /// This specifies that created instructions should be inserted before |
186 | /// the specified instruction. |
187 | void SetInsertPoint(Instruction *I) { |
188 | BB = I->getParent(); |
189 | InsertPt = I->getIterator(); |
190 | assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() && "Can't read debug loc from end()") ? void (0) : __assert_fail ("InsertPt != BB->end() && \"Can't read debug loc from end()\"" , "llvm/include/llvm/IR/IRBuilder.h", 190, __extension__ __PRETTY_FUNCTION__ )); |
191 | SetCurrentDebugLocation(I->getDebugLoc()); |
192 | } |
193 | |
194 | /// This specifies that created instructions should be inserted at the |
195 | /// specified point. |
196 | void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) { |
197 | BB = TheBB; |
198 | InsertPt = IP; |
199 | if (IP != TheBB->end()) |
200 | SetCurrentDebugLocation(IP->getDebugLoc()); |
201 | } |
202 | |
203 | /// This specifies that created instructions should inserted at the beginning |
204 | /// end of the specified function, but after already existing static alloca |
205 | /// instructions that are at the start. |
206 | void SetInsertPointPastAllocas(Function *F) { |
207 | BB = &F->getEntryBlock(); |
208 | InsertPt = BB->getFirstNonPHIOrDbgOrAlloca(); |
209 | } |
210 | |
211 | /// Set location information used by debugging information. |
212 | void SetCurrentDebugLocation(DebugLoc L) { |
213 | AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode()); |
214 | } |
215 | |
216 | /// Collect metadata with IDs \p MetadataKinds from \p Src which should be |
217 | /// added to all created instructions. Entries present in MedataDataToCopy but |
218 | /// not on \p Src will be dropped from MetadataToCopy. |
219 | void CollectMetadataToCopy(Instruction *Src, |
220 | ArrayRef<unsigned> MetadataKinds) { |
221 | for (unsigned K : MetadataKinds) |
222 | AddOrRemoveMetadataToCopy(K, Src->getMetadata(K)); |
223 | } |
224 | |
225 | /// Get location information used by debugging information. |
226 | DebugLoc getCurrentDebugLocation() const; |
227 | |
228 | /// If this builder has a current debug location, set it on the |
229 | /// specified instruction. |
230 | void SetInstDebugLocation(Instruction *I) const; |
231 | |
232 | /// Add all entries in MetadataToCopy to \p I. |
233 | void AddMetadataToInst(Instruction *I) const { |
234 | for (const auto &KV : MetadataToCopy) |
235 | I->setMetadata(KV.first, KV.second); |
236 | } |
237 | |
238 | /// Get the return type of the current function that we're emitting |
239 | /// into. |
240 | Type *getCurrentFunctionReturnType() const; |
241 | |
242 | /// InsertPoint - A saved insertion point. |
243 | class InsertPoint { |
244 | BasicBlock *Block = nullptr; |
245 | BasicBlock::iterator Point; |
246 | |
247 | public: |
248 | /// Creates a new insertion point which doesn't point to anything. |
249 | InsertPoint() = default; |
250 | |
251 | /// Creates a new insertion point at the given location. |
252 | InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint) |
253 | : Block(InsertBlock), Point(InsertPoint) {} |
254 | |
255 | /// Returns true if this insert point is set. |
256 | bool isSet() const { return (Block != nullptr); } |
257 | |
258 | BasicBlock *getBlock() const { return Block; } |
259 | BasicBlock::iterator getPoint() const { return Point; } |
260 | }; |
261 | |
262 | /// Returns the current insert point. |
263 | InsertPoint saveIP() const { |
264 | return InsertPoint(GetInsertBlock(), GetInsertPoint()); |
265 | } |
266 | |
267 | /// Returns the current insert point, clearing it in the process. |
268 | InsertPoint saveAndClearIP() { |
269 | InsertPoint IP(GetInsertBlock(), GetInsertPoint()); |
270 | ClearInsertionPoint(); |
271 | return IP; |
272 | } |
273 | |
274 | /// Sets the current insert point to a previously-saved location. |
275 | void restoreIP(InsertPoint IP) { |
276 | if (IP.isSet()) |
277 | SetInsertPoint(IP.getBlock(), IP.getPoint()); |
278 | else |
279 | ClearInsertionPoint(); |
280 | } |
281 | |
282 | /// Get the floating point math metadata being used. |
283 | MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; } |
284 | |
285 | /// Get the flags to be applied to created floating point ops |
286 | FastMathFlags getFastMathFlags() const { return FMF; } |
287 | |
288 | FastMathFlags &getFastMathFlags() { return FMF; } |
289 | |
290 | /// Clear the fast-math flags. |
291 | void clearFastMathFlags() { FMF.clear(); } |
292 | |
293 | /// Set the floating point math metadata to be used. |
294 | void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; } |
295 | |
296 | /// Set the fast-math flags to be used with generated fp-math operators |
297 | void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; } |
298 | |
299 | /// Enable/Disable use of constrained floating point math. When |
300 | /// enabled the CreateF<op>() calls instead create constrained |
301 | /// floating point intrinsic calls. Fast math flags are unaffected |
302 | /// by this setting. |
303 | void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; } |
304 | |
305 | /// Query for the use of constrained floating point math |
306 | bool getIsFPConstrained() { return IsFPConstrained; } |
307 | |
308 | /// Set the exception handling to be used with constrained floating point |
309 | void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) { |
310 | #ifndef NDEBUG |
311 | std::optional<StringRef> ExceptStr = |
312 | convertExceptionBehaviorToStr(NewExcept); |
313 | assert(ExceptStr && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr && "Garbage strict exception behavior!" ) ? void (0) : __assert_fail ("ExceptStr && \"Garbage strict exception behavior!\"" , "llvm/include/llvm/IR/IRBuilder.h", 313, __extension__ __PRETTY_FUNCTION__ )); |
314 | #endif |
315 | DefaultConstrainedExcept = NewExcept; |
316 | } |
317 | |
318 | /// Set the rounding mode handling to be used with constrained floating point |
319 | void setDefaultConstrainedRounding(RoundingMode NewRounding) { |
320 | #ifndef NDEBUG |
321 | std::optional<StringRef> RoundingStr = |
322 | convertRoundingModeToStr(NewRounding); |
323 | assert(RoundingStr && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr && "Garbage strict rounding mode!" ) ? void (0) : __assert_fail ("RoundingStr && \"Garbage strict rounding mode!\"" , "llvm/include/llvm/IR/IRBuilder.h", 323, __extension__ __PRETTY_FUNCTION__ )); |
324 | #endif |
325 | DefaultConstrainedRounding = NewRounding; |
326 | } |
327 | |
328 | /// Get the exception handling used with constrained floating point |
329 | fp::ExceptionBehavior getDefaultConstrainedExcept() { |
330 | return DefaultConstrainedExcept; |
331 | } |
332 | |
333 | /// Get the rounding mode handling used with constrained floating point |
334 | RoundingMode getDefaultConstrainedRounding() { |
335 | return DefaultConstrainedRounding; |
336 | } |
337 | |
338 | void setConstrainedFPFunctionAttr() { |
339 | assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!" ) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\"" , "llvm/include/llvm/IR/IRBuilder.h", 339, __extension__ __PRETTY_FUNCTION__ )); |
340 | |
341 | Function *F = BB->getParent(); |
342 | if (!F->hasFnAttribute(Attribute::StrictFP)) { |
343 | F->addFnAttr(Attribute::StrictFP); |
344 | } |
345 | } |
346 | |
347 | void setConstrainedFPCallAttr(CallBase *I) { |
348 | I->addFnAttr(Attribute::StrictFP); |
349 | } |
350 | |
351 | void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) { |
352 | DefaultOperandBundles = OpBundles; |
353 | } |
354 | |
355 | //===--------------------------------------------------------------------===// |
356 | // RAII helpers. |
357 | //===--------------------------------------------------------------------===// |
358 | |
359 | // RAII object that stores the current insertion point and restores it |
360 | // when the object is destroyed. This includes the debug location. |
361 | class InsertPointGuard { |
362 | IRBuilderBase &Builder; |
363 | AssertingVH<BasicBlock> Block; |
364 | BasicBlock::iterator Point; |
365 | DebugLoc DbgLoc; |
366 | |
367 | public: |
368 | InsertPointGuard(IRBuilderBase &B) |
369 | : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()), |
370 | DbgLoc(B.getCurrentDebugLocation()) {} |
371 | |
372 | InsertPointGuard(const InsertPointGuard &) = delete; |
373 | InsertPointGuard &operator=(const InsertPointGuard &) = delete; |
374 | |
375 | ~InsertPointGuard() { |
376 | Builder.restoreIP(InsertPoint(Block, Point)); |
377 | Builder.SetCurrentDebugLocation(DbgLoc); |
378 | } |
379 | }; |
380 | |
381 | // RAII object that stores the current fast math settings and restores |
382 | // them when the object is destroyed. |
383 | class FastMathFlagGuard { |
384 | IRBuilderBase &Builder; |
385 | FastMathFlags FMF; |
386 | MDNode *FPMathTag; |
387 | bool IsFPConstrained; |
388 | fp::ExceptionBehavior DefaultConstrainedExcept; |
389 | RoundingMode DefaultConstrainedRounding; |
390 | |
391 | public: |
392 | FastMathFlagGuard(IRBuilderBase &B) |
393 | : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag), |
394 | IsFPConstrained(B.IsFPConstrained), |
395 | DefaultConstrainedExcept(B.DefaultConstrainedExcept), |
396 | DefaultConstrainedRounding(B.DefaultConstrainedRounding) {} |
397 | |
398 | FastMathFlagGuard(const FastMathFlagGuard &) = delete; |
399 | FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete; |
400 | |
401 | ~FastMathFlagGuard() { |
402 | Builder.FMF = FMF; |
403 | Builder.DefaultFPMathTag = FPMathTag; |
404 | Builder.IsFPConstrained = IsFPConstrained; |
405 | Builder.DefaultConstrainedExcept = DefaultConstrainedExcept; |
406 | Builder.DefaultConstrainedRounding = DefaultConstrainedRounding; |
407 | } |
408 | }; |
409 | |
410 | // RAII object that stores the current default operand bundles and restores |
411 | // them when the object is destroyed. |
412 | class OperandBundlesGuard { |
413 | IRBuilderBase &Builder; |
414 | ArrayRef<OperandBundleDef> DefaultOperandBundles; |
415 | |
416 | public: |
417 | OperandBundlesGuard(IRBuilderBase &B) |
418 | : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {} |
419 | |
420 | OperandBundlesGuard(const OperandBundlesGuard &) = delete; |
421 | OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete; |
422 | |
423 | ~OperandBundlesGuard() { |
424 | Builder.DefaultOperandBundles = DefaultOperandBundles; |
425 | } |
426 | }; |
427 | |
428 | |
429 | //===--------------------------------------------------------------------===// |
430 | // Miscellaneous creation methods. |
431 | //===--------------------------------------------------------------------===// |
432 | |
433 | /// Make a new global variable with initializer type i8* |
434 | /// |
435 | /// Make a new global variable with an initializer that has array of i8 type |
436 | /// filled in with the null terminated string value specified. The new global |
437 | /// variable will be marked mergable with any others of the same contents. If |
438 | /// Name is specified, it is the name of the global variable created. |
439 | /// |
440 | /// If no module is given via \p M, it is take from the insertion point basic |
441 | /// block. |
442 | GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "", |
443 | unsigned AddressSpace = 0, |
444 | Module *M = nullptr); |
445 | |
446 | /// Get a constant value representing either true or false. |
447 | ConstantInt *getInt1(bool V) { |
448 | return ConstantInt::get(getInt1Ty(), V); |
449 | } |
450 | |
451 | /// Get the constant value for i1 true. |
452 | ConstantInt *getTrue() { |
453 | return ConstantInt::getTrue(Context); |
454 | } |
455 | |
456 | /// Get the constant value for i1 false. |
457 | ConstantInt *getFalse() { |
458 | return ConstantInt::getFalse(Context); |
459 | } |
460 | |
461 | /// Get a constant 8-bit value. |
462 | ConstantInt *getInt8(uint8_t C) { |
463 | return ConstantInt::get(getInt8Ty(), C); |
464 | } |
465 | |
466 | /// Get a constant 16-bit value. |
467 | ConstantInt *getInt16(uint16_t C) { |
468 | return ConstantInt::get(getInt16Ty(), C); |
469 | } |
470 | |
471 | /// Get a constant 32-bit value. |
472 | ConstantInt *getInt32(uint32_t C) { |
473 | return ConstantInt::get(getInt32Ty(), C); |
474 | } |
475 | |
476 | /// Get a constant 64-bit value. |
477 | ConstantInt *getInt64(uint64_t C) { |
478 | return ConstantInt::get(getInt64Ty(), C); |
479 | } |
480 | |
481 | /// Get a constant N-bit value, zero extended or truncated from |
482 | /// a 64-bit value. |
483 | ConstantInt *getIntN(unsigned N, uint64_t C) { |
484 | return ConstantInt::get(getIntNTy(N), C); |
485 | } |
486 | |
487 | /// Get a constant integer value. |
488 | ConstantInt *getInt(const APInt &AI) { |
489 | return ConstantInt::get(Context, AI); |
490 | } |
491 | |
492 | //===--------------------------------------------------------------------===// |
493 | // Type creation methods |
494 | //===--------------------------------------------------------------------===// |
495 | |
496 | /// Fetch the type representing a single bit |
497 | IntegerType *getInt1Ty() { |
498 | return Type::getInt1Ty(Context); |
499 | } |
500 | |
501 | /// Fetch the type representing an 8-bit integer. |
502 | IntegerType *getInt8Ty() { |
503 | return Type::getInt8Ty(Context); |
504 | } |
505 | |
506 | /// Fetch the type representing a 16-bit integer. |
507 | IntegerType *getInt16Ty() { |
508 | return Type::getInt16Ty(Context); |
509 | } |
510 | |
511 | /// Fetch the type representing a 32-bit integer. |
512 | IntegerType *getInt32Ty() { |
513 | return Type::getInt32Ty(Context); |
514 | } |
515 | |
516 | /// Fetch the type representing a 64-bit integer. |
517 | IntegerType *getInt64Ty() { |
518 | return Type::getInt64Ty(Context); |
519 | } |
520 | |
521 | /// Fetch the type representing a 128-bit integer. |
522 | IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); } |
523 | |
524 | /// Fetch the type representing an N-bit integer. |
525 | IntegerType *getIntNTy(unsigned N) { |
526 | return Type::getIntNTy(Context, N); |
527 | } |
528 | |
529 | /// Fetch the type representing a 16-bit floating point value. |
530 | Type *getHalfTy() { |
531 | return Type::getHalfTy(Context); |
532 | } |
533 | |
534 | /// Fetch the type representing a 16-bit brain floating point value. |
535 | Type *getBFloatTy() { |
536 | return Type::getBFloatTy(Context); |
537 | } |
538 | |
539 | /// Fetch the type representing a 32-bit floating point value. |
540 | Type *getFloatTy() { |
541 | return Type::getFloatTy(Context); |
542 | } |
543 | |
544 | /// Fetch the type representing a 64-bit floating point value. |
545 | Type *getDoubleTy() { |
546 | return Type::getDoubleTy(Context); |
547 | } |
548 | |
549 | /// Fetch the type representing void. |
550 | Type *getVoidTy() { |
551 | return Type::getVoidTy(Context); |
552 | } |
553 | |
554 | /// Fetch the type representing a pointer. |
555 | PointerType *getPtrTy(unsigned AddrSpace = 0) { |
556 | return PointerType::get(Context, AddrSpace); |
557 | } |
558 | |
559 | /// Fetch the type representing a pointer to an 8-bit integer value. |
560 | PointerType *getInt8PtrTy(unsigned AddrSpace = 0) { |
561 | return Type::getInt8PtrTy(Context, AddrSpace); |
562 | } |
563 | |
564 | /// Fetch the type of an integer with size at least as big as that of a |
565 | /// pointer in the given address space. |
566 | IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) { |
567 | return DL.getIntPtrType(Context, AddrSpace); |
568 | } |
569 | |
570 | /// Fetch the type of an integer that should be used to index GEP operations |
571 | /// within AddressSpace. |
572 | IntegerType *getIndexTy(const DataLayout &DL, unsigned AddrSpace) { |
573 | return DL.getIndexType(Context, AddrSpace); |
574 | } |
575 | |
576 | //===--------------------------------------------------------------------===// |
577 | // Intrinsic creation methods |
578 | //===--------------------------------------------------------------------===// |
579 | |
580 | /// Create and insert a memset to the specified pointer and the |
581 | /// specified value. |
582 | /// |
583 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is |
584 | /// specified, it will be added to the instruction. Likewise with alias.scope |
585 | /// and noalias tags. |
586 | CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, |
587 | MaybeAlign Align, bool isVolatile = false, |
588 | MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, |
589 | MDNode *NoAliasTag = nullptr) { |
590 | return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, |
591 | TBAATag, ScopeTag, NoAliasTag); |
592 | } |
593 | |
594 | CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align, |
595 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
596 | MDNode *ScopeTag = nullptr, |
597 | MDNode *NoAliasTag = nullptr); |
598 | |
599 | CallInst *CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, Value *Val, |
600 | Value *Size, bool IsVolatile = false, |
601 | MDNode *TBAATag = nullptr, |
602 | MDNode *ScopeTag = nullptr, |
603 | MDNode *NoAliasTag = nullptr); |
604 | |
605 | /// Create and insert an element unordered-atomic memset of the region of |
606 | /// memory starting at the given pointer to the given value. |
607 | /// |
608 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is |
609 | /// specified, it will be added to the instruction. Likewise with alias.scope |
610 | /// and noalias tags. |
611 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, |
612 | uint64_t Size, Align Alignment, |
613 | uint32_t ElementSize, |
614 | MDNode *TBAATag = nullptr, |
615 | MDNode *ScopeTag = nullptr, |
616 | MDNode *NoAliasTag = nullptr) { |
617 | return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size), |
618 | Align(Alignment), ElementSize, |
619 | TBAATag, ScopeTag, NoAliasTag); |
620 | } |
621 | |
622 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, |
623 | Value *Size, Align Alignment, |
624 | uint32_t ElementSize, |
625 | MDNode *TBAATag = nullptr, |
626 | MDNode *ScopeTag = nullptr, |
627 | MDNode *NoAliasTag = nullptr); |
628 | |
629 | /// Create and insert a memcpy between the specified pointers. |
630 | /// |
631 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is |
632 | /// specified, it will be added to the instruction. Likewise with alias.scope |
633 | /// and noalias tags. |
634 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, |
635 | MaybeAlign SrcAlign, uint64_t Size, |
636 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
637 | MDNode *TBAAStructTag = nullptr, |
638 | MDNode *ScopeTag = nullptr, |
639 | MDNode *NoAliasTag = nullptr) { |
640 | return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size), |
641 | isVolatile, TBAATag, TBAAStructTag, ScopeTag, |
642 | NoAliasTag); |
643 | } |
644 | |
645 | CallInst *CreateMemTransferInst( |
646 | Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, |
647 | MaybeAlign SrcAlign, Value *Size, bool isVolatile = false, |
648 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, |
649 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); |
650 | |
651 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, |
652 | MaybeAlign SrcAlign, Value *Size, |
653 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
654 | MDNode *TBAAStructTag = nullptr, |
655 | MDNode *ScopeTag = nullptr, |
656 | MDNode *NoAliasTag = nullptr) { |
657 | return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src, |
658 | SrcAlign, Size, isVolatile, TBAATag, |
659 | TBAAStructTag, ScopeTag, NoAliasTag); |
660 | } |
661 | |
662 | CallInst * |
663 | CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src, |
664 | MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false, |
665 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, |
666 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); |
667 | |
668 | /// Create and insert an element unordered-atomic memcpy between the |
669 | /// specified pointers. |
670 | /// |
671 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively. |
672 | /// |
673 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is |
674 | /// specified, it will be added to the instruction. Likewise with alias.scope |
675 | /// and noalias tags. |
676 | CallInst *CreateElementUnorderedAtomicMemCpy( |
677 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
678 | uint32_t ElementSize, MDNode *TBAATag = nullptr, |
679 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, |
680 | MDNode *NoAliasTag = nullptr); |
681 | |
682 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, |
683 | MaybeAlign SrcAlign, uint64_t Size, |
684 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
685 | MDNode *ScopeTag = nullptr, |
686 | MDNode *NoAliasTag = nullptr) { |
687 | return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size), |
688 | isVolatile, TBAATag, ScopeTag, NoAliasTag); |
689 | } |
690 | |
691 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, |
692 | MaybeAlign SrcAlign, Value *Size, |
693 | bool isVolatile = false, MDNode *TBAATag = nullptr, |
694 | MDNode *ScopeTag = nullptr, |
695 | MDNode *NoAliasTag = nullptr); |
696 | |
697 | /// \brief Create and insert an element unordered-atomic memmove between the |
698 | /// specified pointers. |
699 | /// |
700 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, |
701 | /// respectively. |
702 | /// |
703 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is |
704 | /// specified, it will be added to the instruction. Likewise with alias.scope |
705 | /// and noalias tags. |
706 | CallInst *CreateElementUnorderedAtomicMemMove( |
707 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, |
708 | uint32_t ElementSize, MDNode *TBAATag = nullptr, |
709 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, |
710 | MDNode *NoAliasTag = nullptr); |
711 | |
712 | private: |
713 | CallInst *getReductionIntrinsic(Intrinsic::ID ID, Value *Src); |
714 | |
715 | public: |
716 | /// Create a sequential vector fadd reduction intrinsic of the source vector. |
717 | /// The first parameter is a scalar accumulator value. An unordered reduction |
718 | /// can be created by adding the reassoc fast-math flag to the resulting |
719 | /// sequential reduction. |
720 | CallInst *CreateFAddReduce(Value *Acc, Value *Src); |
721 | |
722 | /// Create a sequential vector fmul reduction intrinsic of the source vector. |
723 | /// The first parameter is a scalar accumulator value. An unordered reduction |
724 | /// can be created by adding the reassoc fast-math flag to the resulting |
725 | /// sequential reduction. |
726 | CallInst *CreateFMulReduce(Value *Acc, Value *Src); |
727 | |
728 | /// Create a vector int add reduction intrinsic of the source vector. |
729 | CallInst *CreateAddReduce(Value *Src); |
730 | |
731 | /// Create a vector int mul reduction intrinsic of the source vector. |
732 | CallInst *CreateMulReduce(Value *Src); |
733 | |
734 | /// Create a vector int AND reduction intrinsic of the source vector. |
735 | CallInst *CreateAndReduce(Value *Src); |
736 | |
737 | /// Create a vector int OR reduction intrinsic of the source vector. |
738 | CallInst *CreateOrReduce(Value *Src); |
739 | |
740 | /// Create a vector int XOR reduction intrinsic of the source vector. |
741 | CallInst *CreateXorReduce(Value *Src); |
742 | |
743 | /// Create a vector integer max reduction intrinsic of the source |
744 | /// vector. |
745 | CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false); |
746 | |
747 | /// Create a vector integer min reduction intrinsic of the source |
748 | /// vector. |
749 | CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false); |
750 | |
751 | /// Create a vector float max reduction intrinsic of the source |
752 | /// vector. |
753 | CallInst *CreateFPMaxReduce(Value *Src); |
754 | |
755 | /// Create a vector float min reduction intrinsic of the source |
756 | /// vector. |
757 | CallInst *CreateFPMinReduce(Value *Src); |
758 | |
759 | /// Create a lifetime.start intrinsic. |
760 | /// |
761 | /// If the pointer isn't i8* it will be converted. |
762 | CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr); |
763 | |
764 | /// Create a lifetime.end intrinsic. |
765 | /// |
766 | /// If the pointer isn't i8* it will be converted. |
767 | CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr); |
768 | |
769 | /// Create a call to invariant.start intrinsic. |
770 | /// |
771 | /// If the pointer isn't i8* it will be converted. |
772 | CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr); |
773 | |
774 | /// Create a call to llvm.threadlocal.address intrinsic. |
775 | CallInst *CreateThreadLocalAddress(Value *Ptr); |
776 | |
777 | /// Create a call to Masked Load intrinsic |
778 | CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, |
779 | Value *PassThru = nullptr, const Twine &Name = ""); |
780 | |
781 | /// Create a call to Masked Store intrinsic |
782 | CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, |
783 | Value *Mask); |
784 | |
785 | /// Create a call to Masked Gather intrinsic |
786 | CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, |
787 | Value *Mask = nullptr, Value *PassThru = nullptr, |
788 | const Twine &Name = ""); |
789 | |
790 | /// Create a call to Masked Scatter intrinsic |
791 | CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, |
792 | Value *Mask = nullptr); |
793 | |
794 | /// Create a call to Masked Expand Load intrinsic |
795 | CallInst *CreateMaskedExpandLoad(Type *Ty, Value *Ptr, Value *Mask = nullptr, |
796 | Value *PassThru = nullptr, |
797 | const Twine &Name = ""); |
798 | |
799 | /// Create a call to Masked Compress Store intrinsic |
800 | CallInst *CreateMaskedCompressStore(Value *Val, Value *Ptr, |
801 | Value *Mask = nullptr); |
802 | |
803 | /// Create an assume intrinsic call that allows the optimizer to |
804 | /// assume that the provided condition will be true. |
805 | /// |
806 | /// The optional argument \p OpBundles specifies operand bundles that are |
807 | /// added to the call instruction. |
808 | CallInst * |
809 | CreateAssumption(Value *Cond, |
810 | ArrayRef<OperandBundleDef> OpBundles = std::nullopt); |
811 | |
812 | /// Create a llvm.experimental.noalias.scope.decl intrinsic call. |
813 | Instruction *CreateNoAliasScopeDeclaration(Value *Scope); |
814 | Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) { |
815 | return CreateNoAliasScopeDeclaration( |
816 | MetadataAsValue::get(Context, ScopeTag)); |
817 | } |
818 | |
819 | /// Create a call to the experimental.gc.statepoint intrinsic to |
820 | /// start a new statepoint sequence. |
821 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, |
822 | FunctionCallee ActualCallee, |
823 | ArrayRef<Value *> CallArgs, |
824 | std::optional<ArrayRef<Value *>> DeoptArgs, |
825 | ArrayRef<Value *> GCArgs, |
826 | const Twine &Name = ""); |
827 | |
828 | /// Create a call to the experimental.gc.statepoint intrinsic to |
829 | /// start a new statepoint sequence. |
830 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, |
831 | FunctionCallee ActualCallee, uint32_t Flags, |
832 | ArrayRef<Value *> CallArgs, |
833 | std::optional<ArrayRef<Use>> TransitionArgs, |
834 | std::optional<ArrayRef<Use>> DeoptArgs, |
835 | ArrayRef<Value *> GCArgs, |
836 | const Twine &Name = ""); |
837 | |
838 | /// Conveninence function for the common case when CallArgs are filled |
839 | /// in using ArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be |
840 | /// .get()'ed to get the Value pointer. |
841 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, |
842 | FunctionCallee ActualCallee, |
843 | ArrayRef<Use> CallArgs, |
844 | std::optional<ArrayRef<Value *>> DeoptArgs, |
845 | ArrayRef<Value *> GCArgs, |
846 | const Twine &Name = ""); |
847 | |
848 | /// Create an invoke to the experimental.gc.statepoint intrinsic to |
849 | /// start a new statepoint sequence. |
850 | InvokeInst * |
851 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, |
852 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, |
853 | BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs, |
854 | std::optional<ArrayRef<Value *>> DeoptArgs, |
855 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); |
856 | |
857 | /// Create an invoke to the experimental.gc.statepoint intrinsic to |
858 | /// start a new statepoint sequence. |
859 | InvokeInst *CreateGCStatepointInvoke( |
860 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, |
861 | BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, |
862 | ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs, |
863 | std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, |
864 | const Twine &Name = ""); |
865 | |
866 | // Convenience function for the common case when CallArgs are filled in using |
867 | // ArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to |
868 | // get the Value *. |
869 | InvokeInst * |
870 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, |
871 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, |
872 | BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, |
873 | std::optional<ArrayRef<Value *>> DeoptArgs, |
874 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); |
875 | |
876 | /// Create a call to the experimental.gc.result intrinsic to extract |
877 | /// the result from a call wrapped in a statepoint. |
878 | CallInst *CreateGCResult(Instruction *Statepoint, |
879 | Type *ResultType, |
880 | const Twine &Name = ""); |
881 | |
882 | /// Create a call to the experimental.gc.relocate intrinsics to |
883 | /// project the relocated value of one pointer from the statepoint. |
884 | CallInst *CreateGCRelocate(Instruction *Statepoint, |
885 | int BaseOffset, |
886 | int DerivedOffset, |
887 | Type *ResultType, |
888 | const Twine &Name = ""); |
889 | |
890 | /// Create a call to the experimental.gc.pointer.base intrinsic to get the |
891 | /// base pointer for the specified derived pointer. |
892 | CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = ""); |
893 | |
894 | /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get |
895 | /// the offset of the specified derived pointer from its base. |
896 | CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = ""); |
897 | |
898 | /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale |
899 | /// will be the same type as that of \p Scaling. |
900 | Value *CreateVScale(Constant *Scaling, const Twine &Name = ""); |
901 | |
902 | /// Create an expression which evaluates to the number of elements in \p EC |
903 | /// at runtime. |
904 | Value *CreateElementCount(Type *DstType, ElementCount EC); |
905 | |
906 | /// Create an expression which evaluates to the number of units in \p Size |
907 | /// at runtime. This works for both units of bits and bytes. |
908 | Value *CreateTypeSize(Type *DstType, TypeSize Size); |
909 | |
910 | /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...> |
911 | Value *CreateStepVector(Type *DstType, const Twine &Name = ""); |
912 | |
913 | /// Create a call to intrinsic \p ID with 1 operand which is mangled on its |
914 | /// type. |
915 | CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, |
916 | Instruction *FMFSource = nullptr, |
917 | const Twine &Name = ""); |
918 | |
919 | /// Create a call to intrinsic \p ID with 2 operands which is mangled on the |
920 | /// first type. |
921 | CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, |
922 | Instruction *FMFSource = nullptr, |
923 | const Twine &Name = ""); |
924 | |
925 | /// Create a call to intrinsic \p ID with \p Args, mangled using \p Types. If |
926 | /// \p FMFSource is provided, copy fast-math-flags from that instruction to |
927 | /// the intrinsic. |
928 | CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types, |
929 | ArrayRef<Value *> Args, |
930 | Instruction *FMFSource = nullptr, |
931 | const Twine &Name = ""); |
932 | |
933 | /// Create a call to intrinsic \p ID with \p RetTy and \p Args. If |
934 | /// \p FMFSource is provided, copy fast-math-flags from that instruction to |
935 | /// the intrinsic. |
936 | CallInst *CreateIntrinsic(Type *RetTy, Intrinsic::ID ID, |
937 | ArrayRef<Value *> Args, |
938 | Instruction *FMFSource = nullptr, |
939 | const Twine &Name = ""); |
940 | |
941 | /// Create call to the minnum intrinsic. |
942 | CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") { |
943 | return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name); |
944 | } |
945 | |
946 | /// Create call to the maxnum intrinsic. |
947 | CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") { |
948 | return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name); |
949 | } |
950 | |
951 | /// Create call to the minimum intrinsic. |
952 | CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") { |
953 | return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name); |
954 | } |
955 | |
956 | /// Create call to the maximum intrinsic. |
957 | CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") { |
958 | return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name); |
959 | } |
960 | |
961 | /// Create call to the copysign intrinsic. |
962 | CallInst *CreateCopySign(Value *LHS, Value *RHS, |
963 | Instruction *FMFSource = nullptr, |
964 | const Twine &Name = "") { |
965 | return CreateBinaryIntrinsic(Intrinsic::copysign, LHS, RHS, FMFSource, |
966 | Name); |
967 | } |
968 | |
969 | /// Create a call to the arithmetic_fence intrinsic. |
970 | CallInst *CreateArithmeticFence(Value *Val, Type *DstType, |
971 | const Twine &Name = "") { |
972 | return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr, |
973 | Name); |
974 | } |
975 | |
976 | /// Create a call to the vector.extract intrinsic. |
977 | CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, |
978 | const Twine &Name = "") { |
979 | return CreateIntrinsic(Intrinsic::vector_extract, |
980 | {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr, |
981 | Name); |
982 | } |
983 | |
984 | /// Create a call to the vector.insert intrinsic. |
985 | CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, |
986 | Value *Idx, const Twine &Name = "") { |
987 | return CreateIntrinsic(Intrinsic::vector_insert, |
988 | {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx}, |
989 | nullptr, Name); |
990 | } |
991 | |
992 | private: |
993 | /// Create a call to a masked intrinsic with given Id. |
994 | CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops, |
995 | ArrayRef<Type *> OverloadedTypes, |
996 | const Twine &Name = ""); |
997 | |
998 | Value *getCastedInt8PtrValue(Value *Ptr); |
999 | |
1000 | //===--------------------------------------------------------------------===// |
1001 | // Instruction creation methods: Terminators |
1002 | //===--------------------------------------------------------------------===// |
1003 | |
1004 | private: |
1005 | /// Helper to add branch weight and unpredictable metadata onto an |
1006 | /// instruction. |
1007 | /// \returns The annotated instruction. |
1008 | template <typename InstTy> |
1009 | InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) { |
1010 | if (Weights) |
1011 | I->setMetadata(LLVMContext::MD_prof, Weights); |
1012 | if (Unpredictable) |
1013 | I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable); |
1014 | return I; |
1015 | } |
1016 | |
1017 | public: |
1018 | /// Create a 'ret void' instruction. |
1019 | ReturnInst *CreateRetVoid() { |
1020 | return Insert(ReturnInst::Create(Context)); |
1021 | } |
1022 | |
1023 | /// Create a 'ret <val>' instruction. |
1024 | ReturnInst *CreateRet(Value *V) { |
1025 | return Insert(ReturnInst::Create(Context, V)); |
1026 | } |
1027 | |
1028 | /// Create a sequence of N insertvalue instructions, |
1029 | /// with one Value from the retVals array each, that build a aggregate |
1030 | /// return value one value at a time, and a ret instruction to return |
1031 | /// the resulting aggregate value. |
1032 | /// |
1033 | /// This is a convenience function for code that uses aggregate return values |
1034 | /// as a vehicle for having multiple return values. |
1035 | ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) { |
1036 | Value *V = PoisonValue::get(getCurrentFunctionReturnType()); |
1037 | for (unsigned i = 0; i != N; ++i) |
1038 | V = CreateInsertValue(V, retVals[i], i, "mrv"); |
1039 | return Insert(ReturnInst::Create(Context, V)); |
1040 | } |
1041 | |
1042 | /// Create an unconditional 'br label X' instruction. |
1043 | BranchInst *CreateBr(BasicBlock *Dest) { |
1044 | return Insert(BranchInst::Create(Dest)); |
1045 | } |
1046 | |
1047 | /// Create a conditional 'br Cond, TrueDest, FalseDest' |
1048 | /// instruction. |
1049 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, |
1050 | MDNode *BranchWeights = nullptr, |
1051 | MDNode *Unpredictable = nullptr) { |
1052 | return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond), |
1053 | BranchWeights, Unpredictable)); |
1054 | } |
1055 | |
1056 | /// Create a conditional 'br Cond, TrueDest, FalseDest' |
1057 | /// instruction. Copy branch meta data if available. |
1058 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, |
1059 | Instruction *MDSrc) { |
1060 | BranchInst *Br = BranchInst::Create(True, False, Cond); |
1061 | if (MDSrc) { |
1062 | unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable, |
1063 | LLVMContext::MD_make_implicit, LLVMContext::MD_dbg}; |
1064 | Br->copyMetadata(*MDSrc, WL); |
1065 | } |
1066 | return Insert(Br); |
1067 | } |
1068 | |
1069 | /// Create a switch instruction with the specified value, default dest, |
1070 | /// and with a hint for the number of cases that will be added (for efficient |
1071 | /// allocation). |
1072 | SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10, |
1073 | MDNode *BranchWeights = nullptr, |
1074 | MDNode *Unpredictable = nullptr) { |
1075 | return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases), |
1076 | BranchWeights, Unpredictable)); |
1077 | } |
1078 | |
1079 | /// Create an indirect branch instruction with the specified address |
1080 | /// operand, with an optional hint for the number of destinations that will be |
1081 | /// added (for efficient allocation). |
1082 | IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) { |
1083 | return Insert(IndirectBrInst::Create(Addr, NumDests)); |
1084 | } |
1085 | |
1086 | /// Create an invoke instruction. |
1087 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, |
1088 | BasicBlock *NormalDest, BasicBlock *UnwindDest, |
1089 | ArrayRef<Value *> Args, |
1090 | ArrayRef<OperandBundleDef> OpBundles, |
1091 | const Twine &Name = "") { |
1092 | InvokeInst *II = |
1093 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles); |
1094 | if (IsFPConstrained) |
1095 | setConstrainedFPCallAttr(II); |
1096 | return Insert(II, Name); |
1097 | } |
1098 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, |
1099 | BasicBlock *NormalDest, BasicBlock *UnwindDest, |
1100 | ArrayRef<Value *> Args = std::nullopt, |
1101 | const Twine &Name = "") { |
1102 | InvokeInst *II = |
1103 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args); |
1104 | if (IsFPConstrained) |
1105 | setConstrainedFPCallAttr(II); |
1106 | return Insert(II, Name); |
1107 | } |
1108 | |
1109 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, |
1110 | BasicBlock *UnwindDest, ArrayRef<Value *> Args, |
1111 | ArrayRef<OperandBundleDef> OpBundles, |
1112 | const Twine &Name = "") { |
1113 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), |
1114 | NormalDest, UnwindDest, Args, OpBundles, Name); |
1115 | } |
1116 | |
1117 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, |
1118 | BasicBlock *UnwindDest, |
1119 | ArrayRef<Value *> Args = std::nullopt, |
1120 | const Twine &Name = "") { |
1121 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), |
1122 | NormalDest, UnwindDest, Args, Name); |
1123 | } |
1124 | |
1125 | /// \brief Create a callbr instruction. |
1126 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, |
1127 | BasicBlock *DefaultDest, |
1128 | ArrayRef<BasicBlock *> IndirectDests, |
1129 | ArrayRef<Value *> Args = std::nullopt, |
1130 | const Twine &Name = "") { |
1131 | return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, |
1132 | Args), Name); |
1133 | } |
1134 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, |
1135 | BasicBlock *DefaultDest, |
1136 | ArrayRef<BasicBlock *> IndirectDests, |
1137 | ArrayRef<Value *> Args, |
1138 | ArrayRef<OperandBundleDef> OpBundles, |
1139 | const Twine &Name = "") { |
1140 | return Insert( |
1141 | CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args, |
1142 | OpBundles), Name); |
1143 | } |
1144 | |
1145 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, |
1146 | ArrayRef<BasicBlock *> IndirectDests, |
1147 | ArrayRef<Value *> Args = std::nullopt, |
1148 | const Twine &Name = "") { |
1149 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), |
1150 | DefaultDest, IndirectDests, Args, Name); |
1151 | } |
1152 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, |
1153 | ArrayRef<BasicBlock *> IndirectDests, |
1154 | ArrayRef<Value *> Args, |
1155 | ArrayRef<OperandBundleDef> OpBundles, |
1156 | const Twine &Name = "") { |
1157 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), |
1158 | DefaultDest, IndirectDests, Args, Name); |
1159 | } |
1160 | |
1161 | ResumeInst *CreateResume(Value *Exn) { |
1162 | return Insert(ResumeInst::Create(Exn)); |
1163 | } |
1164 | |
1165 | CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad, |
1166 | BasicBlock *UnwindBB = nullptr) { |
1167 | return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB)); |
1168 | } |
1169 | |
1170 | CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB, |
1171 | unsigned NumHandlers, |
1172 | const Twine &Name = "") { |
1173 | return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers), |
1174 | Name); |
1175 | } |
1176 | |
1177 | CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args, |
1178 | const Twine &Name = "") { |
1179 | return Insert(CatchPadInst::Create(ParentPad, Args), Name); |
1180 | } |
1181 | |
1182 | CleanupPadInst *CreateCleanupPad(Value *ParentPad, |
1183 | ArrayRef<Value *> Args = std::nullopt, |
1184 | const Twine &Name = "") { |
1185 | return Insert(CleanupPadInst::Create(ParentPad, Args), Name); |
1186 | } |
1187 | |
1188 | CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) { |
1189 | return Insert(CatchReturnInst::Create(CatchPad, BB)); |
1190 | } |
1191 | |
1192 | UnreachableInst *CreateUnreachable() { |
1193 | return Insert(new UnreachableInst(Context)); |
1194 | } |
1195 | |
1196 | //===--------------------------------------------------------------------===// |
1197 | // Instruction creation methods: Binary Operators |
1198 | //===--------------------------------------------------------------------===// |
1199 | private: |
1200 | BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc, |
1201 | Value *LHS, Value *RHS, |
1202 | const Twine &Name, |
1203 | bool HasNUW, bool HasNSW) { |
1204 | BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name); |
1205 | if (HasNUW) BO->setHasNoUnsignedWrap(); |
1206 | if (HasNSW) BO->setHasNoSignedWrap(); |
1207 | return BO; |
1208 | } |
1209 | |
1210 | Instruction *setFPAttrs(Instruction *I, MDNode *FPMD, |
1211 | FastMathFlags FMF) const { |
1212 | if (!FPMD) |
1213 | FPMD = DefaultFPMathTag; |
1214 | if (FPMD) |
1215 | I->setMetadata(LLVMContext::MD_fpmath, FPMD); |
1216 | I->setFastMathFlags(FMF); |
1217 | return I; |
1218 | } |
1219 | |
1220 | Value *getConstrainedFPRounding(std::optional<RoundingMode> Rounding) { |
1221 | RoundingMode UseRounding = DefaultConstrainedRounding; |
1222 | |
1223 | if (Rounding) |
1224 | UseRounding = *Rounding; |
1225 | |
1226 | std::optional<StringRef> RoundingStr = |
1227 | convertRoundingModeToStr(UseRounding); |
1228 | assert(RoundingStr && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr && "Garbage strict rounding mode!" ) ? void (0) : __assert_fail ("RoundingStr && \"Garbage strict rounding mode!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1228, __extension__ __PRETTY_FUNCTION__ )); |
1229 | auto *RoundingMDS = MDString::get(Context, *RoundingStr); |
1230 | |
1231 | return MetadataAsValue::get(Context, RoundingMDS); |
1232 | } |
1233 | |
1234 | Value *getConstrainedFPExcept(std::optional<fp::ExceptionBehavior> Except) { |
1235 | std::optional<StringRef> ExceptStr = convertExceptionBehaviorToStr( |
1236 | Except.value_or(DefaultConstrainedExcept)); |
1237 | assert(ExceptStr && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr && "Garbage strict exception behavior!" ) ? void (0) : __assert_fail ("ExceptStr && \"Garbage strict exception behavior!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1237, __extension__ __PRETTY_FUNCTION__ )); |
1238 | auto *ExceptMDS = MDString::get(Context, *ExceptStr); |
1239 | |
1240 | return MetadataAsValue::get(Context, ExceptMDS); |
1241 | } |
1242 | |
1243 | Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) { |
1244 | assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1247, __extension__ __PRETTY_FUNCTION__ )) |
1245 | Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1247, __extension__ __PRETTY_FUNCTION__ )) |
1246 | Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1247, __extension__ __PRETTY_FUNCTION__ )) |
1247 | "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1247, __extension__ __PRETTY_FUNCTION__ )); |
1248 | |
1249 | StringRef PredicateStr = CmpInst::getPredicateName(Predicate); |
1250 | auto *PredicateMDS = MDString::get(Context, PredicateStr); |
1251 | |
1252 | return MetadataAsValue::get(Context, PredicateMDS); |
1253 | } |
1254 | |
1255 | public: |
1256 | Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "", |
1257 | bool HasNUW = false, bool HasNSW = false) { |
1258 | if (Value *V = |
1259 | Folder.FoldNoWrapBinOp(Instruction::Add, LHS, RHS, HasNUW, HasNSW)) |
1260 | return V; |
1261 | return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name, HasNUW, |
1262 | HasNSW); |
1263 | } |
1264 | |
1265 | Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { |
1266 | return CreateAdd(LHS, RHS, Name, false, true); |
1267 | } |
1268 | |
1269 | Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { |
1270 | return CreateAdd(LHS, RHS, Name, true, false); |
1271 | } |
1272 | |
1273 | Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "", |
1274 | bool HasNUW = false, bool HasNSW = false) { |
1275 | if (Value *V = |
1276 | Folder.FoldNoWrapBinOp(Instruction::Sub, LHS, RHS, HasNUW, HasNSW)) |
1277 | return V; |
1278 | return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name, HasNUW, |
1279 | HasNSW); |
1280 | } |
1281 | |
1282 | Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") { |
1283 | return CreateSub(LHS, RHS, Name, false, true); |
1284 | } |
1285 | |
1286 | Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") { |
1287 | return CreateSub(LHS, RHS, Name, true, false); |
1288 | } |
1289 | |
1290 | Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "", |
1291 | bool HasNUW = false, bool HasNSW = false) { |
1292 | if (Value *V = |
1293 | Folder.FoldNoWrapBinOp(Instruction::Mul, LHS, RHS, HasNUW, HasNSW)) |
1294 | return V; |
1295 | return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name, HasNUW, |
1296 | HasNSW); |
1297 | } |
1298 | |
1299 | Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") { |
1300 | return CreateMul(LHS, RHS, Name, false, true); |
1301 | } |
1302 | |
1303 | Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") { |
1304 | return CreateMul(LHS, RHS, Name, true, false); |
1305 | } |
1306 | |
1307 | Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "", |
1308 | bool isExact = false) { |
1309 | if (Value *V = Folder.FoldExactBinOp(Instruction::UDiv, LHS, RHS, isExact)) |
1310 | return V; |
1311 | if (!isExact) |
1312 | return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name); |
1313 | return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name); |
1314 | } |
1315 | |
1316 | Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") { |
1317 | return CreateUDiv(LHS, RHS, Name, true); |
1318 | } |
1319 | |
1320 | Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "", |
1321 | bool isExact = false) { |
1322 | if (Value *V = Folder.FoldExactBinOp(Instruction::SDiv, LHS, RHS, isExact)) |
1323 | return V; |
1324 | if (!isExact) |
1325 | return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name); |
1326 | return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name); |
1327 | } |
1328 | |
1329 | Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") { |
1330 | return CreateSDiv(LHS, RHS, Name, true); |
1331 | } |
1332 | |
1333 | Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") { |
1334 | if (Value *V = Folder.FoldBinOp(Instruction::URem, LHS, RHS)) |
1335 | return V; |
1336 | return Insert(BinaryOperator::CreateURem(LHS, RHS), Name); |
1337 | } |
1338 | |
1339 | Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") { |
1340 | if (Value *V = Folder.FoldBinOp(Instruction::SRem, LHS, RHS)) |
1341 | return V; |
1342 | return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name); |
1343 | } |
1344 | |
1345 | Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "", |
1346 | bool HasNUW = false, bool HasNSW = false) { |
1347 | if (Value *V = |
1348 | Folder.FoldNoWrapBinOp(Instruction::Shl, LHS, RHS, HasNUW, HasNSW)) |
1349 | return V; |
1350 | return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name, |
1351 | HasNUW, HasNSW); |
1352 | } |
1353 | |
1354 | Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "", |
1355 | bool HasNUW = false, bool HasNSW = false) { |
1356 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, |
1357 | HasNUW, HasNSW); |
1358 | } |
1359 | |
1360 | Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "", |
1361 | bool HasNUW = false, bool HasNSW = false) { |
1362 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, |
1363 | HasNUW, HasNSW); |
1364 | } |
1365 | |
1366 | Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "", |
1367 | bool isExact = false) { |
1368 | if (Value *V = Folder.FoldExactBinOp(Instruction::LShr, LHS, RHS, isExact)) |
1369 | return V; |
1370 | if (!isExact) |
1371 | return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name); |
1372 | return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name); |
1373 | } |
1374 | |
1375 | Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "", |
1376 | bool isExact = false) { |
1377 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); |
1378 | } |
1379 | |
1380 | Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "", |
1381 | bool isExact = false) { |
1382 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); |
1383 | } |
1384 | |
1385 | Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "", |
1386 | bool isExact = false) { |
1387 | if (Value *V = Folder.FoldExactBinOp(Instruction::AShr, LHS, RHS, isExact)) |
1388 | return V; |
1389 | if (!isExact) |
1390 | return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name); |
1391 | return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name); |
1392 | } |
1393 | |
1394 | Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "", |
1395 | bool isExact = false) { |
1396 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); |
1397 | } |
1398 | |
1399 | Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "", |
1400 | bool isExact = false) { |
1401 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); |
1402 | } |
1403 | |
1404 | Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") { |
1405 | if (auto *V = Folder.FoldBinOp(Instruction::And, LHS, RHS)) |
1406 | return V; |
1407 | return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name); |
1408 | } |
1409 | |
1410 | Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") { |
1411 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1412 | } |
1413 | |
1414 | Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") { |
1415 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1416 | } |
1417 | |
1418 | Value *CreateAnd(ArrayRef<Value*> Ops) { |
1419 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1419, __extension__ __PRETTY_FUNCTION__)); |
1420 | Value *Accum = Ops[0]; |
1421 | for (unsigned i = 1; i < Ops.size(); i++) |
1422 | Accum = CreateAnd(Accum, Ops[i]); |
1423 | return Accum; |
1424 | } |
1425 | |
1426 | Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") { |
1427 | if (auto *V = Folder.FoldBinOp(Instruction::Or, LHS, RHS)) |
1428 | return V; |
1429 | return Insert(BinaryOperator::CreateOr(LHS, RHS), Name); |
1430 | } |
1431 | |
1432 | Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") { |
1433 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1434 | } |
1435 | |
1436 | Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") { |
1437 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1438 | } |
1439 | |
1440 | Value *CreateOr(ArrayRef<Value*> Ops) { |
1441 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1441, __extension__ __PRETTY_FUNCTION__)); |
1442 | Value *Accum = Ops[0]; |
1443 | for (unsigned i = 1; i < Ops.size(); i++) |
1444 | Accum = CreateOr(Accum, Ops[i]); |
1445 | return Accum; |
1446 | } |
1447 | |
1448 | Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") { |
1449 | if (Value *V = Folder.FoldBinOp(Instruction::Xor, LHS, RHS)) |
1450 | return V; |
1451 | return Insert(BinaryOperator::CreateXor(LHS, RHS), Name); |
1452 | } |
1453 | |
1454 | Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") { |
1455 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1456 | } |
1457 | |
1458 | Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") { |
1459 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); |
1460 | } |
1461 | |
1462 | Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "", |
1463 | MDNode *FPMD = nullptr) { |
1464 | if (IsFPConstrained) |
1465 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, |
1466 | L, R, nullptr, Name, FPMD); |
1467 | |
1468 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FAdd, L, R, FMF)) |
1469 | return V; |
1470 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF); |
1471 | return Insert(I, Name); |
1472 | } |
1473 | |
1474 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1475 | /// default FMF. |
1476 | Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource, |
1477 | const Twine &Name = "") { |
1478 | if (IsFPConstrained) |
1479 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, |
1480 | L, R, FMFSource, Name); |
1481 | |
1482 | FastMathFlags FMF = FMFSource->getFastMathFlags(); |
1483 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FAdd, L, R, FMF)) |
1484 | return V; |
1485 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr, FMF); |
1486 | return Insert(I, Name); |
1487 | } |
1488 | |
1489 | Value *CreateFSub(Value *L, Value *R, const Twine &Name = "", |
1490 | MDNode *FPMD = nullptr) { |
1491 | if (IsFPConstrained) |
1492 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, |
1493 | L, R, nullptr, Name, FPMD); |
1494 | |
1495 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FSub, L, R, FMF)) |
1496 | return V; |
1497 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF); |
1498 | return Insert(I, Name); |
1499 | } |
1500 | |
1501 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1502 | /// default FMF. |
1503 | Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource, |
1504 | const Twine &Name = "") { |
1505 | if (IsFPConstrained) |
1506 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, |
1507 | L, R, FMFSource, Name); |
1508 | |
1509 | FastMathFlags FMF = FMFSource->getFastMathFlags(); |
1510 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FSub, L, R, FMF)) |
1511 | return V; |
1512 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr, FMF); |
1513 | return Insert(I, Name); |
1514 | } |
1515 | |
1516 | Value *CreateFMul(Value *L, Value *R, const Twine &Name = "", |
1517 | MDNode *FPMD = nullptr) { |
1518 | if (IsFPConstrained) |
1519 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, |
1520 | L, R, nullptr, Name, FPMD); |
1521 | |
1522 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FMul, L, R, FMF)) |
1523 | return V; |
1524 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF); |
1525 | return Insert(I, Name); |
1526 | } |
1527 | |
1528 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1529 | /// default FMF. |
1530 | Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource, |
1531 | const Twine &Name = "") { |
1532 | if (IsFPConstrained) |
1533 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, |
1534 | L, R, FMFSource, Name); |
1535 | |
1536 | FastMathFlags FMF = FMFSource->getFastMathFlags(); |
1537 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FMul, L, R, FMF)) |
1538 | return V; |
1539 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr, FMF); |
1540 | return Insert(I, Name); |
1541 | } |
1542 | |
1543 | Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "", |
1544 | MDNode *FPMD = nullptr) { |
1545 | if (IsFPConstrained) |
1546 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, |
1547 | L, R, nullptr, Name, FPMD); |
1548 | |
1549 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FDiv, L, R, FMF)) |
1550 | return V; |
1551 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF); |
1552 | return Insert(I, Name); |
1553 | } |
1554 | |
1555 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1556 | /// default FMF. |
1557 | Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource, |
1558 | const Twine &Name = "") { |
1559 | if (IsFPConstrained) |
1560 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, |
1561 | L, R, FMFSource, Name); |
1562 | |
1563 | FastMathFlags FMF = FMFSource->getFastMathFlags(); |
1564 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FDiv, L, R, FMF)) |
1565 | return V; |
1566 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr, FMF); |
1567 | return Insert(I, Name); |
1568 | } |
1569 | |
1570 | Value *CreateFRem(Value *L, Value *R, const Twine &Name = "", |
1571 | MDNode *FPMD = nullptr) { |
1572 | if (IsFPConstrained) |
1573 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, |
1574 | L, R, nullptr, Name, FPMD); |
1575 | |
1576 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FRem, L, R, FMF)) return V; |
1577 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF); |
1578 | return Insert(I, Name); |
1579 | } |
1580 | |
1581 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1582 | /// default FMF. |
1583 | Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource, |
1584 | const Twine &Name = "") { |
1585 | if (IsFPConstrained) |
1586 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, |
1587 | L, R, FMFSource, Name); |
1588 | |
1589 | FastMathFlags FMF = FMFSource->getFastMathFlags(); |
1590 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FRem, L, R, FMF)) return V; |
1591 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr, FMF); |
1592 | return Insert(I, Name); |
1593 | } |
1594 | |
1595 | Value *CreateBinOp(Instruction::BinaryOps Opc, |
1596 | Value *LHS, Value *RHS, const Twine &Name = "", |
1597 | MDNode *FPMathTag = nullptr) { |
1598 | if (Value *V = Folder.FoldBinOp(Opc, LHS, RHS)) return V; |
1599 | Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS); |
1600 | if (isa<FPMathOperator>(BinOp)) |
1601 | setFPAttrs(BinOp, FPMathTag, FMF); |
1602 | return Insert(BinOp, Name); |
1603 | } |
1604 | |
1605 | Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") { |
1606 | assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy (1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)" , "llvm/include/llvm/IR/IRBuilder.h", 1606, __extension__ __PRETTY_FUNCTION__ )); |
1607 | return CreateSelect(Cond1, Cond2, |
1608 | ConstantInt::getNullValue(Cond2->getType()), Name); |
1609 | } |
1610 | |
1611 | Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") { |
1612 | assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy (1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)" , "llvm/include/llvm/IR/IRBuilder.h", 1612, __extension__ __PRETTY_FUNCTION__ )); |
1613 | return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()), |
1614 | Cond2, Name); |
1615 | } |
1616 | |
1617 | Value *CreateLogicalOp(Instruction::BinaryOps Opc, Value *Cond1, Value *Cond2, |
1618 | const Twine &Name = "") { |
1619 | switch (Opc) { |
1620 | case Instruction::And: |
1621 | return CreateLogicalAnd(Cond1, Cond2, Name); |
1622 | case Instruction::Or: |
1623 | return CreateLogicalOr(Cond1, Cond2, Name); |
1624 | default: |
1625 | break; |
1626 | } |
1627 | llvm_unreachable("Not a logical operation.")::llvm::llvm_unreachable_internal("Not a logical operation.", "llvm/include/llvm/IR/IRBuilder.h", 1627); |
1628 | } |
1629 | |
1630 | // NOTE: this is sequential, non-commutative, ordered reduction! |
1631 | Value *CreateLogicalOr(ArrayRef<Value *> Ops) { |
1632 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1632, __extension__ __PRETTY_FUNCTION__)); |
1633 | Value *Accum = Ops[0]; |
1634 | for (unsigned i = 1; i < Ops.size(); i++) |
1635 | Accum = CreateLogicalOr(Accum, Ops[i]); |
1636 | return Accum; |
1637 | } |
1638 | |
1639 | CallInst *CreateConstrainedFPBinOp( |
1640 | Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr, |
1641 | const Twine &Name = "", MDNode *FPMathTag = nullptr, |
1642 | std::optional<RoundingMode> Rounding = std::nullopt, |
1643 | std::optional<fp::ExceptionBehavior> Except = std::nullopt); |
1644 | |
1645 | Value *CreateNeg(Value *V, const Twine &Name = "", bool HasNUW = false, |
1646 | bool HasNSW = false) { |
1647 | return CreateSub(Constant::getNullValue(V->getType()), V, Name, HasNUW, |
1648 | HasNSW); |
1649 | } |
1650 | |
1651 | Value *CreateNSWNeg(Value *V, const Twine &Name = "") { |
1652 | return CreateNeg(V, Name, false, true); |
1653 | } |
1654 | |
1655 | Value *CreateNUWNeg(Value *V, const Twine &Name = "") { |
1656 | return CreateNeg(V, Name, true, false); |
1657 | } |
1658 | |
1659 | Value *CreateFNeg(Value *V, const Twine &Name = "", |
1660 | MDNode *FPMathTag = nullptr) { |
1661 | if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF)) |
1662 | return Res; |
1663 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF), |
1664 | Name); |
1665 | } |
1666 | |
1667 | /// Copy fast-math-flags from an instruction rather than using the builder's |
1668 | /// default FMF. |
1669 | Value *CreateFNegFMF(Value *V, Instruction *FMFSource, |
1670 | const Twine &Name = "") { |
1671 | FastMathFlags FMF = FMFSource->getFastMathFlags(); |
1672 | if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF)) |
1673 | return Res; |
1674 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr, FMF), |
1675 | Name); |
1676 | } |
1677 | |
1678 | Value *CreateNot(Value *V, const Twine &Name = "") { |
1679 | return CreateXor(V, Constant::getAllOnesValue(V->getType()), Name); |
1680 | } |
1681 | |
1682 | Value *CreateUnOp(Instruction::UnaryOps Opc, |
1683 | Value *V, const Twine &Name = "", |
1684 | MDNode *FPMathTag = nullptr) { |
1685 | if (Value *Res = Folder.FoldUnOpFMF(Opc, V, FMF)) |
1686 | return Res; |
1687 | Instruction *UnOp = UnaryOperator::Create(Opc, V); |
1688 | if (isa<FPMathOperator>(UnOp)) |
1689 | setFPAttrs(UnOp, FPMathTag, FMF); |
1690 | return Insert(UnOp, Name); |
1691 | } |
1692 | |
1693 | /// Create either a UnaryOperator or BinaryOperator depending on \p Opc. |
1694 | /// Correct number of operands must be passed accordingly. |
1695 | Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, |
1696 | const Twine &Name = "", MDNode *FPMathTag = nullptr); |
1697 | |
1698 | //===--------------------------------------------------------------------===// |
1699 | // Instruction creation methods: Memory Instructions |
1700 | //===--------------------------------------------------------------------===// |
1701 | |
1702 | AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace, |
1703 | Value *ArraySize = nullptr, const Twine &Name = "") { |
1704 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1705 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); |
1706 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); |
1707 | } |
1708 | |
1709 | AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr, |
1710 | const Twine &Name = "") { |
1711 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1712 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); |
1713 | unsigned AddrSpace = DL.getAllocaAddrSpace(); |
1714 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); |
1715 | } |
1716 | |
1717 | /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of |
1718 | /// converting the string to 'bool' for the isVolatile parameter. |
1719 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) { |
1720 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); |
1721 | } |
1722 | |
1723 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") { |
1724 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); |
1725 | } |
1726 | |
1727 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile, |
1728 | const Twine &Name = "") { |
1729 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name); |
1730 | } |
1731 | |
1732 | StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) { |
1733 | return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile); |
1734 | } |
1735 | |
1736 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, |
1737 | const char *Name) { |
1738 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); |
1739 | } |
1740 | |
1741 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, |
1742 | const Twine &Name = "") { |
1743 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); |
1744 | } |
1745 | |
1746 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, |
1747 | bool isVolatile, const Twine &Name = "") { |
1748 | if (!Align) { |
1749 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1750 | Align = DL.getABITypeAlign(Ty); |
1751 | } |
1752 | return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name); |
1753 | } |
1754 | |
1755 | StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, |
1756 | bool isVolatile = false) { |
1757 | if (!Align) { |
1758 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1759 | Align = DL.getABITypeAlign(Val->getType()); |
1760 | } |
1761 | return Insert(new StoreInst(Val, Ptr, isVolatile, *Align)); |
1762 | } |
1763 | FenceInst *CreateFence(AtomicOrdering Ordering, |
1764 | SyncScope::ID SSID = SyncScope::System, |
1765 | const Twine &Name = "") { |
1766 | return Insert(new FenceInst(Context, Ordering, SSID), Name); |
1767 | } |
1768 | |
1769 | AtomicCmpXchgInst * |
1770 | CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, |
1771 | AtomicOrdering SuccessOrdering, |
1772 | AtomicOrdering FailureOrdering, |
1773 | SyncScope::ID SSID = SyncScope::System) { |
1774 | if (!Align) { |
1775 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1776 | Align = llvm::Align(DL.getTypeStoreSize(New->getType())); |
1777 | } |
1778 | |
1779 | return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering, |
1780 | FailureOrdering, SSID)); |
1781 | } |
1782 | |
1783 | AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, |
1784 | Value *Val, MaybeAlign Align, |
1785 | AtomicOrdering Ordering, |
1786 | SyncScope::ID SSID = SyncScope::System) { |
1787 | if (!Align) { |
1788 | const DataLayout &DL = BB->getModule()->getDataLayout(); |
1789 | Align = llvm::Align(DL.getTypeStoreSize(Val->getType())); |
1790 | } |
1791 | |
1792 | return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID)); |
1793 | } |
1794 | |
1795 | Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, |
1796 | const Twine &Name = "", bool IsInBounds = false) { |
1797 | if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, IsInBounds)) |
1798 | return V; |
1799 | return Insert(IsInBounds |
1800 | ? GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList) |
1801 | : GetElementPtrInst::Create(Ty, Ptr, IdxList), |
1802 | Name); |
1803 | } |
1804 | |
1805 | Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, |
1806 | const Twine &Name = "") { |
1807 | return CreateGEP(Ty, Ptr, IdxList, Name, /* IsInBounds */ true); |
1808 | } |
1809 | |
1810 | Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, |
1811 | const Twine &Name = "") { |
1812 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); |
1813 | |
1814 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false)) |
1815 | return V; |
1816 | |
1817 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); |
1818 | } |
1819 | |
1820 | Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, |
1821 | const Twine &Name = "") { |
1822 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); |
1823 | |
1824 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true)) |
1825 | return V; |
1826 | |
1827 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); |
1828 | } |
1829 | |
1830 | Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, |
1831 | const Twine &Name = "") { |
1832 | Value *Idxs[] = { |
1833 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), |
1834 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) |
1835 | }; |
1836 | |
1837 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false)) |
1838 | return V; |
1839 | |
1840 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); |
1841 | } |
1842 | |
1843 | Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, |
1844 | unsigned Idx1, const Twine &Name = "") { |
1845 | Value *Idxs[] = { |
1846 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), |
1847 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) |
1848 | }; |
1849 | |
1850 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true)) |
1851 | return V; |
1852 | |
1853 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); |
1854 | } |
1855 | |
1856 | Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, |
1857 | const Twine &Name = "") { |
1858 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); |
1859 | |
1860 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false)) |
1861 | return V; |
1862 | |
1863 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); |
1864 | } |
1865 | |
1866 | Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, |
1867 | const Twine &Name = "") { |
1868 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); |
1869 | |
1870 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true)) |
1871 | return V; |
1872 | |
1873 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); |
1874 | } |
1875 | |
1876 | Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1, |
1877 | const Twine &Name = "") { |
1878 | Value *Idxs[] = { |
1879 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), |
1880 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) |
1881 | }; |
1882 | |
1883 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false)) |
1884 | return V; |
1885 | |
1886 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); |
1887 | } |
1888 | |
1889 | Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, |
1890 | uint64_t Idx1, const Twine &Name = "") { |
1891 | Value *Idxs[] = { |
1892 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), |
1893 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) |
1894 | }; |
1895 | |
1896 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true)) |
1897 | return V; |
1898 | |
1899 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); |
1900 | } |
1901 | |
1902 | Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, |
1903 | const Twine &Name = "") { |
1904 | return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name); |
1905 | } |
1906 | |
1907 | /// Same as CreateGlobalString, but return a pointer with "i8*" type |
1908 | /// instead of a pointer to array of i8. |
1909 | /// |
1910 | /// If no module is given via \p M, it is take from the insertion point basic |
1911 | /// block. |
1912 | Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "", |
1913 | unsigned AddressSpace = 0, |
1914 | Module *M = nullptr) { |
1915 | GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M); |
1916 | Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); |
1917 | Constant *Indices[] = {Zero, Zero}; |
1918 | return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV, |
1919 | Indices); |
1920 | } |
1921 | |
1922 | //===--------------------------------------------------------------------===// |
1923 | // Instruction creation methods: Cast/Conversion Operators |
1924 | //===--------------------------------------------------------------------===// |
1925 | |
1926 | Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") { |
1927 | return CreateCast(Instruction::Trunc, V, DestTy, Name); |
1928 | } |
1929 | |
1930 | Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") { |
1931 | return CreateCast(Instruction::ZExt, V, DestTy, Name); |
1932 | } |
1933 | |
1934 | Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") { |
1935 | return CreateCast(Instruction::SExt, V, DestTy, Name); |
1936 | } |
1937 | |
1938 | /// Create a ZExt or Trunc from the integer value V to DestTy. Return |
1939 | /// the value untouched if the type of V is already DestTy. |
1940 | Value *CreateZExtOrTrunc(Value *V, Type *DestTy, |
1941 | const Twine &Name = "") { |
1942 | assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1944, __extension__ __PRETTY_FUNCTION__ )) |
1943 | DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1944, __extension__ __PRETTY_FUNCTION__ )) |
1944 | "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1944, __extension__ __PRETTY_FUNCTION__ )); |
1945 | Type *VTy = V->getType(); |
1946 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) |
1947 | return CreateZExt(V, DestTy, Name); |
1948 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) |
1949 | return CreateTrunc(V, DestTy, Name); |
1950 | return V; |
1951 | } |
1952 | |
1953 | /// Create a SExt or Trunc from the integer value V to DestTy. Return |
1954 | /// the value untouched if the type of V is already DestTy. |
1955 | Value *CreateSExtOrTrunc(Value *V, Type *DestTy, |
1956 | const Twine &Name = "") { |
1957 | assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1959, __extension__ __PRETTY_FUNCTION__ )) |
1958 | DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1959, __extension__ __PRETTY_FUNCTION__ )) |
1959 | "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1959, __extension__ __PRETTY_FUNCTION__ )); |
1960 | Type *VTy = V->getType(); |
1961 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) |
1962 | return CreateSExt(V, DestTy, Name); |
1963 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) |
1964 | return CreateTrunc(V, DestTy, Name); |
1965 | return V; |
1966 | } |
1967 | |
1968 | Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") { |
1969 | if (IsFPConstrained) |
1970 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui, |
1971 | V, DestTy, nullptr, Name); |
1972 | return CreateCast(Instruction::FPToUI, V, DestTy, Name); |
1973 | } |
1974 | |
1975 | Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") { |
1976 | if (IsFPConstrained) |
1977 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi, |
1978 | V, DestTy, nullptr, Name); |
1979 | return CreateCast(Instruction::FPToSI, V, DestTy, Name); |
1980 | } |
1981 | |
1982 | Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ |
1983 | if (IsFPConstrained) |
1984 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp, |
1985 | V, DestTy, nullptr, Name); |
1986 | return CreateCast(Instruction::UIToFP, V, DestTy, Name); |
1987 | } |
1988 | |
1989 | Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ |
1990 | if (IsFPConstrained) |
1991 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp, |
1992 | V, DestTy, nullptr, Name); |
1993 | return CreateCast(Instruction::SIToFP, V, DestTy, Name); |
1994 | } |
1995 | |
1996 | Value *CreateFPTrunc(Value *V, Type *DestTy, |
1997 | const Twine &Name = "") { |
1998 | if (IsFPConstrained) |
1999 | return CreateConstrainedFPCast( |
2000 | Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr, |
2001 | Name); |
2002 | return CreateCast(Instruction::FPTrunc, V, DestTy, Name); |
2003 | } |
2004 | |
2005 | Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") { |
2006 | if (IsFPConstrained) |
2007 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext, |
2008 | V, DestTy, nullptr, Name); |
2009 | return CreateCast(Instruction::FPExt, V, DestTy, Name); |
2010 | } |
2011 | |
2012 | Value *CreatePtrToInt(Value *V, Type *DestTy, |
2013 | const Twine &Name = "") { |
2014 | return CreateCast(Instruction::PtrToInt, V, DestTy, Name); |
2015 | } |
2016 | |
2017 | Value *CreateIntToPtr(Value *V, Type *DestTy, |
2018 | const Twine &Name = "") { |
2019 | return CreateCast(Instruction::IntToPtr, V, DestTy, Name); |
2020 | } |
2021 | |
2022 | Value *CreateBitCast(Value *V, Type *DestTy, |
2023 | const Twine &Name = "") { |
2024 | return CreateCast(Instruction::BitCast, V, DestTy, Name); |
2025 | } |
2026 | |
2027 | Value *CreateAddrSpaceCast(Value *V, Type *DestTy, |
2028 | const Twine &Name = "") { |
2029 | return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name); |
2030 | } |
2031 | |
2032 | Value *CreateZExtOrBitCast(Value *V, Type *DestTy, |
2033 | const Twine &Name = "") { |
2034 | if (V->getType() == DestTy) |
2035 | return V; |
2036 | if (auto *VC = dyn_cast<Constant>(V)) |
2037 | return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name); |
2038 | return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name); |
2039 | } |
2040 | |
2041 | Value *CreateSExtOrBitCast(Value *V, Type *DestTy, |
2042 | const Twine &Name = "") { |
2043 | if (V->getType() == DestTy) |
2044 | return V; |
2045 | if (auto *VC = dyn_cast<Constant>(V)) |
2046 | return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name); |
2047 | return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name); |
2048 | } |
2049 | |
2050 | Value *CreateTruncOrBitCast(Value *V, Type *DestTy, |
2051 | const Twine &Name = "") { |
2052 | if (V->getType() == DestTy) |
2053 | return V; |
2054 | if (auto *VC = dyn_cast<Constant>(V)) |
2055 | return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name); |
2056 | return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name); |
2057 | } |
2058 | |
2059 | Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, |
2060 | const Twine &Name = "") { |
2061 | if (V->getType() == DestTy) |
2062 | return V; |
2063 | if (auto *VC = dyn_cast<Constant>(V)) |
2064 | return Insert(Folder.CreateCast(Op, VC, DestTy), Name); |
2065 | return Insert(CastInst::Create(Op, V, DestTy), Name); |
2066 | } |
2067 | |
2068 | Value *CreatePointerCast(Value *V, Type *DestTy, |
2069 | const Twine &Name = "") { |
2070 | if (V->getType() == DestTy) |
2071 | return V; |
2072 | if (auto *VC = dyn_cast<Constant>(V)) |
2073 | return Insert(Folder.CreatePointerCast(VC, DestTy), Name); |
2074 | return Insert(CastInst::CreatePointerCast(V, DestTy), Name); |
2075 | } |
2076 | |
2077 | Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy, |
2078 | const Twine &Name = "") { |
2079 | if (V->getType() == DestTy) |
2080 | return V; |
2081 | |
2082 | if (auto *VC = dyn_cast<Constant>(V)) { |
2083 | return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy), |
2084 | Name); |
2085 | } |
2086 | |
2087 | return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy), |
2088 | Name); |
2089 | } |
2090 | |
2091 | Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned, |
2092 | const Twine &Name = "") { |
2093 | if (V->getType() == DestTy) |
2094 | return V; |
2095 | if (auto *VC = dyn_cast<Constant>(V)) |
2096 | return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name); |
2097 | return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name); |
2098 | } |
2099 | |
2100 | Value *CreateBitOrPointerCast(Value *V, Type *DestTy, |
2101 | const Twine &Name = "") { |
2102 | if (V->getType() == DestTy) |
2103 | return V; |
2104 | if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy()) |
2105 | return CreatePtrToInt(V, DestTy, Name); |
2106 | if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy()) |
2107 | return CreateIntToPtr(V, DestTy, Name); |
2108 | |
2109 | return CreateBitCast(V, DestTy, Name); |
2110 | } |
2111 | |
2112 | Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") { |
2113 | if (V->getType() == DestTy) |
2114 | return V; |
2115 | if (auto *VC = dyn_cast<Constant>(V)) |
2116 | return Insert(Folder.CreateFPCast(VC, DestTy), Name); |
2117 | return Insert(CastInst::CreateFPCast(V, DestTy), Name); |
2118 | } |
2119 | |
2120 | CallInst *CreateConstrainedFPCast( |
2121 | Intrinsic::ID ID, Value *V, Type *DestTy, |
2122 | Instruction *FMFSource = nullptr, const Twine &Name = "", |
2123 | MDNode *FPMathTag = nullptr, |
2124 | std::optional<RoundingMode> Rounding = std::nullopt, |
2125 | std::optional<fp::ExceptionBehavior> Except = std::nullopt); |
2126 | |
2127 | // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a |
2128 | // compile time error, instead of converting the string to bool for the |
2129 | // isSigned parameter. |
2130 | Value *CreateIntCast(Value *, Type *, const char *) = delete; |
2131 | |
2132 | //===--------------------------------------------------------------------===// |
2133 | // Instruction creation methods: Compare Instructions |
2134 | //===--------------------------------------------------------------------===// |
2135 | |
2136 | Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") { |
2137 | return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name); |
2138 | } |
2139 | |
2140 | Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2141 | return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name); |
2142 | } |
2143 | |
2144 | Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") { |
2145 | return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name); |
2146 | } |
2147 | |
2148 | Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2149 | return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name); |
2150 | } |
2151 | |
2152 | Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") { |
2153 | return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name); |
2154 | } |
2155 | |
2156 | Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2157 | return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name); |
2158 | } |
2159 | |
2160 | Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") { |
2161 | return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name); |
2162 | } |
2163 | |
2164 | Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2165 | return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name); |
2166 | } |
2167 | |
2168 | Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") { |
2169 | return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name); |
2170 | } |
2171 | |
2172 | Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") { |
2173 | return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name); |
2174 | } |
2175 | |
2176 | Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "", |
2177 | MDNode *FPMathTag = nullptr) { |
2178 | return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag); |
2179 | } |
2180 | |
2181 | Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "", |
2182 | MDNode *FPMathTag = nullptr) { |
2183 | return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag); |
2184 | } |
2185 | |
2186 | Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "", |
2187 | MDNode *FPMathTag = nullptr) { |
2188 | return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag); |
2189 | } |
2190 | |
2191 | Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "", |
2192 | MDNode *FPMathTag = nullptr) { |
2193 | return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag); |
2194 | } |
2195 | |
2196 | Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "", |
2197 | MDNode *FPMathTag = nullptr) { |
2198 | return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag); |
2199 | } |
2200 | |
2201 | Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "", |
2202 | MDNode *FPMathTag = nullptr) { |
2203 | return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag); |
2204 | } |
2205 | |
2206 | Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "", |
2207 | MDNode *FPMathTag = nullptr) { |
2208 | return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag); |
2209 | } |
2210 | |
2211 | Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "", |
2212 | MDNode *FPMathTag = nullptr) { |
2213 | return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag); |
2214 | } |
2215 | |
2216 | Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "", |
2217 | MDNode *FPMathTag = nullptr) { |
2218 | return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag); |
2219 | } |
2220 | |
2221 | Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "", |
2222 | MDNode *FPMathTag = nullptr) { |
2223 | return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag); |
2224 | } |
2225 | |
2226 | Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "", |
2227 | MDNode *FPMathTag = nullptr) { |
2228 | return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag); |
2229 | } |
2230 | |
2231 | Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "", |
2232 | MDNode *FPMathTag = nullptr) { |
2233 | return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag); |
2234 | } |
2235 | |
2236 | Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "", |
2237 | MDNode *FPMathTag = nullptr) { |
2238 | return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag); |
2239 | } |
2240 | |
2241 | Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "", |
2242 | MDNode *FPMathTag = nullptr) { |
2243 | return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag); |
2244 | } |
2245 | |
2246 | Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, |
2247 | const Twine &Name = "") { |
2248 | if (auto *V = Folder.FoldICmp(P, LHS, RHS)) |
2249 | return V; |
2250 | return Insert(new ICmpInst(P, LHS, RHS), Name); |
2251 | } |
2252 | |
2253 | // Create a quiet floating-point comparison (i.e. one that raises an FP |
2254 | // exception only in the case where an input is a signaling NaN). |
2255 | // Note that this differs from CreateFCmpS only if IsFPConstrained is true. |
2256 | Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, |
2257 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2258 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false); |
2259 | } |
2260 | |
2261 | Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, |
2262 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2263 | return CmpInst::isFPPredicate(Pred) |
2264 | ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag) |
2265 | : CreateICmp(Pred, LHS, RHS, Name); |
2266 | } |
2267 | |
2268 | // Create a signaling floating-point comparison (i.e. one that raises an FP |
2269 | // exception whenever an input is any NaN, signaling or quiet). |
2270 | // Note that this differs from CreateFCmp only if IsFPConstrained is true. |
2271 | Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS, |
2272 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2273 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true); |
2274 | } |
2275 | |
2276 | private: |
2277 | // Helper routine to create either a signaling or a quiet FP comparison. |
2278 | Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS, |
2279 | const Twine &Name, MDNode *FPMathTag, |
2280 | bool IsSignaling); |
2281 | |
2282 | public: |
2283 | CallInst *CreateConstrainedFPCmp( |
2284 | Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, |
2285 | const Twine &Name = "", |
2286 | std::optional<fp::ExceptionBehavior> Except = std::nullopt); |
2287 | |
2288 | //===--------------------------------------------------------------------===// |
2289 | // Instruction creation methods: Other Instructions |
2290 | //===--------------------------------------------------------------------===// |
2291 | |
2292 | PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues, |
2293 | const Twine &Name = "") { |
2294 | PHINode *Phi = PHINode::Create(Ty, NumReservedValues); |
2295 | if (isa<FPMathOperator>(Phi)) |
2296 | setFPAttrs(Phi, nullptr /* MDNode* */, FMF); |
2297 | return Insert(Phi, Name); |
2298 | } |
2299 | |
2300 | private: |
2301 | CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops, |
2302 | const Twine &Name = "", |
2303 | Instruction *FMFSource = nullptr, |
2304 | ArrayRef<OperandBundleDef> OpBundles = {}); |
2305 | |
2306 | public: |
2307 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, |
2308 | ArrayRef<Value *> Args = std::nullopt, |
2309 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2310 | CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles); |
2311 | if (IsFPConstrained) |
2312 | setConstrainedFPCallAttr(CI); |
2313 | if (isa<FPMathOperator>(CI)) |
2314 | setFPAttrs(CI, FPMathTag, FMF); |
2315 | return Insert(CI, Name); |
2316 | } |
2317 | |
2318 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args, |
2319 | ArrayRef<OperandBundleDef> OpBundles, |
2320 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2321 | CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles); |
2322 | if (IsFPConstrained) |
2323 | setConstrainedFPCallAttr(CI); |
2324 | if (isa<FPMathOperator>(CI)) |
2325 | setFPAttrs(CI, FPMathTag, FMF); |
2326 | return Insert(CI, Name); |
2327 | } |
2328 | |
2329 | CallInst *CreateCall(FunctionCallee Callee, |
2330 | ArrayRef<Value *> Args = std::nullopt, |
2331 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2332 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name, |
2333 | FPMathTag); |
2334 | } |
2335 | |
2336 | CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args, |
2337 | ArrayRef<OperandBundleDef> OpBundles, |
2338 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { |
2339 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, |
2340 | OpBundles, Name, FPMathTag); |
2341 | } |
2342 | |
2343 | CallInst *CreateConstrainedFPCall( |
2344 | Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "", |
2345 | std::optional<RoundingMode> Rounding = std::nullopt, |
2346 | std::optional<fp::ExceptionBehavior> Except = std::nullopt); |
2347 | |
2348 | Value *CreateSelect(Value *C, Value *True, Value *False, |
2349 | const Twine &Name = "", Instruction *MDFrom = nullptr); |
2350 | |
2351 | VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") { |
2352 | return Insert(new VAArgInst(List, Ty), Name); |
2353 | } |
2354 | |
2355 | Value *CreateExtractElement(Value *Vec, Value *Idx, |
2356 | const Twine &Name = "") { |
2357 | if (Value *V = Folder.FoldExtractElement(Vec, Idx)) |
2358 | return V; |
2359 | return Insert(ExtractElementInst::Create(Vec, Idx), Name); |
2360 | } |
2361 | |
2362 | Value *CreateExtractElement(Value *Vec, uint64_t Idx, |
2363 | const Twine &Name = "") { |
2364 | return CreateExtractElement(Vec, getInt64(Idx), Name); |
2365 | } |
2366 | |
2367 | Value *CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, |
2368 | const Twine &Name = "") { |
2369 | return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name); |
2370 | } |
2371 | |
2372 | Value *CreateInsertElement(Type *VecTy, Value *NewElt, uint64_t Idx, |
2373 | const Twine &Name = "") { |
2374 | return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name); |
2375 | } |
2376 | |
2377 | Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, |
2378 | const Twine &Name = "") { |
2379 | if (Value *V = Folder.FoldInsertElement(Vec, NewElt, Idx)) |
2380 | return V; |
2381 | return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name); |
2382 | } |
2383 | |
2384 | Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx, |
2385 | const Twine &Name = "") { |
2386 | return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name); |
2387 | } |
2388 | |
2389 | Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask, |
2390 | const Twine &Name = "") { |
2391 | SmallVector<int, 16> IntMask; |
2392 | ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask); |
2393 | return CreateShuffleVector(V1, V2, IntMask, Name); |
2394 | } |
2395 | |
2396 | /// See class ShuffleVectorInst for a description of the mask representation. |
2397 | Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask, |
2398 | const Twine &Name = "") { |
2399 | if (Value *V = Folder.FoldShuffleVector(V1, V2, Mask)) |
2400 | return V; |
2401 | return Insert(new ShuffleVectorInst(V1, V2, Mask), Name); |
2402 | } |
2403 | |
2404 | /// Create a unary shuffle. The second vector operand of the IR instruction |
2405 | /// is poison. |
2406 | Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask, |
2407 | const Twine &Name = "") { |
2408 | return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name); |
2409 | } |
2410 | |
2411 | Value *CreateExtractValue(Value *Agg, ArrayRef<unsigned> Idxs, |
2412 | const Twine &Name = "") { |
2413 | if (auto *V = Folder.FoldExtractValue(Agg, Idxs)) |
2414 | return V; |
2415 | return Insert(ExtractValueInst::Create(Agg, Idxs), Name); |
2416 | } |
2417 | |
2418 | Value *CreateInsertValue(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, |
2419 | const Twine &Name = "") { |
2420 | if (auto *V = Folder.FoldInsertValue(Agg, Val, Idxs)) |
2421 | return V; |
2422 | return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name); |
2423 | } |
2424 | |
2425 | LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses, |
2426 | const Twine &Name = "") { |
2427 | return Insert(LandingPadInst::Create(Ty, NumClauses), Name); |
2428 | } |
2429 | |
2430 | Value *CreateFreeze(Value *V, const Twine &Name = "") { |
2431 | return Insert(new FreezeInst(V), Name); |
2432 | } |
2433 | |
2434 | //===--------------------------------------------------------------------===// |
2435 | // Utility creation methods |
2436 | //===--------------------------------------------------------------------===// |
2437 | |
2438 | /// Return a boolean value testing if \p Arg == 0. |
2439 | Value *CreateIsNull(Value *Arg, const Twine &Name = "") { |
2440 | return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()), Name); |
2441 | } |
2442 | |
2443 | /// Return a boolean value testing if \p Arg != 0. |
2444 | Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") { |
2445 | return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()), Name); |
2446 | } |
2447 | |
2448 | /// Return a boolean value testing if \p Arg < 0. |
2449 | Value *CreateIsNeg(Value *Arg, const Twine &Name = "") { |
2450 | return CreateICmpSLT(Arg, ConstantInt::getNullValue(Arg->getType()), Name); |
2451 | } |
2452 | |
2453 | /// Return a boolean value testing if \p Arg > -1. |
2454 | Value *CreateIsNotNeg(Value *Arg, const Twine &Name = "") { |
2455 | return CreateICmpSGT(Arg, ConstantInt::getAllOnesValue(Arg->getType()), |
2456 | Name); |
2457 | } |
2458 | |
2459 | /// Return the i64 difference between two pointer values, dividing out |
2460 | /// the size of the pointed-to objects. |
2461 | /// |
2462 | /// This is intended to implement C-style pointer subtraction. As such, the |
2463 | /// pointers must be appropriately aligned for their element types and |
2464 | /// pointing into the same object. |
2465 | Value *CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, |
2466 | const Twine &Name = ""); |
2467 | |
2468 | /// Create a launder.invariant.group intrinsic call. If Ptr type is |
2469 | /// different from pointer to i8, it's casted to pointer to i8 in the same |
2470 | /// address space before call and casted back to Ptr type after call. |
2471 | Value *CreateLaunderInvariantGroup(Value *Ptr); |
2472 | |
2473 | /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is |
2474 | /// different from pointer to i8, it's casted to pointer to i8 in the same |
2475 | /// address space before call and casted back to Ptr type after call. |
2476 | Value *CreateStripInvariantGroup(Value *Ptr); |
2477 | |
2478 | /// Return a vector value that contains the vector V reversed |
2479 | Value *CreateVectorReverse(Value *V, const Twine &Name = ""); |
2480 | |
2481 | /// Return a vector splice intrinsic if using scalable vectors, otherwise |
2482 | /// return a shufflevector. If the immediate is positive, a vector is |
2483 | /// extracted from concat(V1, V2), starting at Imm. If the immediate |
2484 | /// is negative, we extract -Imm elements from V1 and the remaining |
2485 | /// elements from V2. Imm is a signed integer in the range |
2486 | /// -VL <= Imm < VL (where VL is the runtime vector length of the |
2487 | /// source/result vector) |
2488 | Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, |
2489 | const Twine &Name = ""); |
2490 | |
2491 | /// Return a vector value that contains \arg V broadcasted to \p |
2492 | /// NumElts elements. |
2493 | Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = ""); |
2494 | |
2495 | /// Return a vector value that contains \arg V broadcasted to \p |
2496 | /// EC elements. |
2497 | Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = ""); |
2498 | |
2499 | /// Return a value that has been extracted from a larger integer type. |
2500 | Value *CreateExtractInteger(const DataLayout &DL, Value *From, |
2501 | IntegerType *ExtractedTy, uint64_t Offset, |
2502 | const Twine &Name); |
2503 | |
2504 | Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, |
2505 | unsigned Dimension, unsigned LastIndex, |
2506 | MDNode *DbgInfo); |
2507 | |
2508 | Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex, |
2509 | MDNode *DbgInfo); |
2510 | |
2511 | Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base, |
2512 | unsigned Index, unsigned FieldIndex, |
2513 | MDNode *DbgInfo); |
2514 | |
2515 | private: |
2516 | /// Helper function that creates an assume intrinsic call that |
2517 | /// represents an alignment assumption on the provided pointer \p PtrValue |
2518 | /// with offset \p OffsetValue and alignment value \p AlignValue. |
2519 | CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL, |
2520 | Value *PtrValue, Value *AlignValue, |
2521 | Value *OffsetValue); |
2522 | |
2523 | public: |
2524 | /// Create an assume intrinsic call that represents an alignment |
2525 | /// assumption on the provided pointer. |
2526 | /// |
2527 | /// An optional offset can be provided, and if it is provided, the offset |
2528 | /// must be subtracted from the provided pointer to get the pointer with the |
2529 | /// specified alignment. |
2530 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, |
2531 | unsigned Alignment, |
2532 | Value *OffsetValue = nullptr); |
2533 | |
2534 | /// Create an assume intrinsic call that represents an alignment |
2535 | /// assumption on the provided pointer. |
2536 | /// |
2537 | /// An optional offset can be provided, and if it is provided, the offset |
2538 | /// must be subtracted from the provided pointer to get the pointer with the |
2539 | /// specified alignment. |
2540 | /// |
2541 | /// This overload handles the condition where the Alignment is dependent |
2542 | /// on an existing value rather than a static value. |
2543 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, |
2544 | Value *Alignment, |
2545 | Value *OffsetValue = nullptr); |
2546 | }; |
2547 | |
2548 | /// This provides a uniform API for creating instructions and inserting |
2549 | /// them into a basic block: either at the end of a BasicBlock, or at a specific |
2550 | /// iterator location in a block. |
2551 | /// |
2552 | /// Note that the builder does not expose the full generality of LLVM |
2553 | /// instructions. For access to extra instruction properties, use the mutators |
2554 | /// (e.g. setVolatile) on the instructions after they have been |
2555 | /// created. Convenience state exists to specify fast-math flags and fp-math |
2556 | /// tags. |
2557 | /// |
2558 | /// The first template argument specifies a class to use for creating constants. |
2559 | /// This defaults to creating minimally folded constants. The second template |
2560 | /// argument allows clients to specify custom insertion hooks that are called on |
2561 | /// every newly created insertion. |
2562 | template <typename FolderTy = ConstantFolder, |
2563 | typename InserterTy = IRBuilderDefaultInserter> |
2564 | class IRBuilder : public IRBuilderBase { |
2565 | private: |
2566 | FolderTy Folder; |
2567 | InserterTy Inserter; |
2568 | |
2569 | public: |
2570 | IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(), |
2571 | MDNode *FPMathTag = nullptr, |
2572 | ArrayRef<OperandBundleDef> OpBundles = std::nullopt) |
2573 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles), |
2574 | Folder(Folder), Inserter(Inserter) {} |
2575 | |
2576 | explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr, |
2577 | ArrayRef<OperandBundleDef> OpBundles = std::nullopt) |
2578 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {} |
2579 | |
2580 | explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder, |
2581 | MDNode *FPMathTag = nullptr, |
2582 | ArrayRef<OperandBundleDef> OpBundles = std::nullopt) |
2583 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, |
2584 | FPMathTag, OpBundles), |
2585 | Folder(Folder) { |
2586 | SetInsertPoint(TheBB); |
2587 | } |
2588 | |
2589 | explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr, |
2590 | ArrayRef<OperandBundleDef> OpBundles = std::nullopt) |
2591 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, |
2592 | FPMathTag, OpBundles) { |
2593 | SetInsertPoint(TheBB); |
2594 | } |
2595 | |
2596 | explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr, |
2597 | ArrayRef<OperandBundleDef> OpBundles = std::nullopt) |
2598 | : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, FPMathTag, |
2599 | OpBundles) { |
2600 | SetInsertPoint(IP); |
2601 | } |
2602 | |
2603 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder, |
2604 | MDNode *FPMathTag = nullptr, |
2605 | ArrayRef<OperandBundleDef> OpBundles = std::nullopt) |
2606 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, |
2607 | FPMathTag, OpBundles), |
2608 | Folder(Folder) { |
2609 | SetInsertPoint(TheBB, IP); |
2610 | } |
2611 | |
2612 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, |
2613 | MDNode *FPMathTag = nullptr, |
2614 | ArrayRef<OperandBundleDef> OpBundles = std::nullopt) |
2615 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, |
2616 | FPMathTag, OpBundles) { |
2617 | SetInsertPoint(TheBB, IP); |
2618 | } |
2619 | |
2620 | /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard |
2621 | /// or FastMathFlagGuard instead. |
2622 | IRBuilder(const IRBuilder &) = delete; |
2623 | |
2624 | InserterTy &getInserter() { return Inserter; } |
2625 | }; |
2626 | |
2627 | template <typename FolderTy, typename InserterTy> |
2628 | IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, |
2629 | ArrayRef<OperandBundleDef>) -> IRBuilder<FolderTy, InserterTy>; |
2630 | IRBuilder(LLVMContext &, MDNode *, ArrayRef<OperandBundleDef>) -> IRBuilder<>; |
2631 | template <typename FolderTy> |
2632 | IRBuilder(BasicBlock *, FolderTy, MDNode *, ArrayRef<OperandBundleDef>) |
2633 | -> IRBuilder<FolderTy>; |
2634 | IRBuilder(BasicBlock *, MDNode *, ArrayRef<OperandBundleDef>) -> IRBuilder<>; |
2635 | IRBuilder(Instruction *, MDNode *, ArrayRef<OperandBundleDef>) -> IRBuilder<>; |
2636 | template <typename FolderTy> |
2637 | IRBuilder(BasicBlock *, BasicBlock::iterator, FolderTy, MDNode *, |
2638 | ArrayRef<OperandBundleDef>) -> IRBuilder<FolderTy>; |
2639 | IRBuilder(BasicBlock *, BasicBlock::iterator, MDNode *, |
2640 | ArrayRef<OperandBundleDef>) -> IRBuilder<>; |
2641 | |
2642 | |
2643 | // Create wrappers for C Binding types (see CBindingWrapping.h). |
2644 | DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast <IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef >(const_cast<IRBuilder<>*>(P)); } |
2645 | |
2646 | } // end namespace llvm |
2647 | |
2648 | #endif // LLVM_IR_IRBUILDER_H |
1 | //===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file exposes the class definitions of all of the subclasses of the | |||
10 | // Instruction class. This is meant to be an easy way to get access to all | |||
11 | // instruction subclasses. | |||
12 | // | |||
13 | //===----------------------------------------------------------------------===// | |||
14 | ||||
15 | #ifndef LLVM_IR_INSTRUCTIONS_H | |||
16 | #define LLVM_IR_INSTRUCTIONS_H | |||
17 | ||||
18 | #include "llvm/ADT/ArrayRef.h" | |||
19 | #include "llvm/ADT/Bitfields.h" | |||
20 | #include "llvm/ADT/MapVector.h" | |||
21 | #include "llvm/ADT/STLExtras.h" | |||
22 | #include "llvm/ADT/SmallVector.h" | |||
23 | #include "llvm/ADT/Twine.h" | |||
24 | #include "llvm/ADT/iterator.h" | |||
25 | #include "llvm/ADT/iterator_range.h" | |||
26 | #include "llvm/IR/CFG.h" | |||
27 | #include "llvm/IR/Constant.h" | |||
28 | #include "llvm/IR/DerivedTypes.h" | |||
29 | #include "llvm/IR/InstrTypes.h" | |||
30 | #include "llvm/IR/Instruction.h" | |||
31 | #include "llvm/IR/OperandTraits.h" | |||
32 | #include "llvm/IR/Use.h" | |||
33 | #include "llvm/IR/User.h" | |||
34 | #include "llvm/Support/AtomicOrdering.h" | |||
35 | #include "llvm/Support/ErrorHandling.h" | |||
36 | #include <cassert> | |||
37 | #include <cstddef> | |||
38 | #include <cstdint> | |||
39 | #include <iterator> | |||
40 | #include <optional> | |||
41 | ||||
42 | namespace llvm { | |||
43 | ||||
44 | class APFloat; | |||
45 | class APInt; | |||
46 | class BasicBlock; | |||
47 | class ConstantInt; | |||
48 | class DataLayout; | |||
49 | class StringRef; | |||
50 | class Type; | |||
51 | class Value; | |||
52 | ||||
53 | //===----------------------------------------------------------------------===// | |||
54 | // AllocaInst Class | |||
55 | //===----------------------------------------------------------------------===// | |||
56 | ||||
57 | /// an instruction to allocate memory on the stack | |||
58 | class AllocaInst : public UnaryInstruction { | |||
59 | Type *AllocatedType; | |||
60 | ||||
61 | using AlignmentField = AlignmentBitfieldElementT<0>; | |||
62 | using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>; | |||
63 | using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>; | |||
64 | static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField, | |||
65 | SwiftErrorField>(), | |||
66 | "Bitfields must be contiguous"); | |||
67 | ||||
68 | protected: | |||
69 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
70 | friend class Instruction; | |||
71 | ||||
72 | AllocaInst *cloneImpl() const; | |||
73 | ||||
74 | public: | |||
75 | explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, | |||
76 | const Twine &Name, Instruction *InsertBefore); | |||
77 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, | |||
78 | const Twine &Name, BasicBlock *InsertAtEnd); | |||
79 | ||||
80 | AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name, | |||
81 | Instruction *InsertBefore); | |||
82 | AllocaInst(Type *Ty, unsigned AddrSpace, | |||
83 | const Twine &Name, BasicBlock *InsertAtEnd); | |||
84 | ||||
85 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, | |||
86 | const Twine &Name = "", Instruction *InsertBefore = nullptr); | |||
87 | AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align, | |||
88 | const Twine &Name, BasicBlock *InsertAtEnd); | |||
89 | ||||
90 | /// Return true if there is an allocation size parameter to the allocation | |||
91 | /// instruction that is not 1. | |||
92 | bool isArrayAllocation() const; | |||
93 | ||||
94 | /// Get the number of elements allocated. For a simple allocation of a single | |||
95 | /// element, this will return a constant 1 value. | |||
96 | const Value *getArraySize() const { return getOperand(0); } | |||
97 | Value *getArraySize() { return getOperand(0); } | |||
98 | ||||
99 | /// Overload to return most specific pointer type. | |||
100 | PointerType *getType() const { | |||
101 | return cast<PointerType>(Instruction::getType()); | |||
102 | } | |||
103 | ||||
104 | /// Return the address space for the allocation. | |||
105 | unsigned getAddressSpace() const { | |||
106 | return getType()->getAddressSpace(); | |||
107 | } | |||
108 | ||||
109 | /// Get allocation size in bytes. Returns std::nullopt if size can't be | |||
110 | /// determined, e.g. in case of a VLA. | |||
111 | std::optional<TypeSize> getAllocationSize(const DataLayout &DL) const; | |||
112 | ||||
113 | /// Get allocation size in bits. Returns std::nullopt if size can't be | |||
114 | /// determined, e.g. in case of a VLA. | |||
115 | std::optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const; | |||
116 | ||||
117 | /// Return the type that is being allocated by the instruction. | |||
118 | Type *getAllocatedType() const { return AllocatedType; } | |||
119 | /// for use only in special circumstances that need to generically | |||
120 | /// transform a whole instruction (eg: IR linking and vectorization). | |||
121 | void setAllocatedType(Type *Ty) { AllocatedType = Ty; } | |||
122 | ||||
123 | /// Return the alignment of the memory that is being allocated by the | |||
124 | /// instruction. | |||
125 | Align getAlign() const { | |||
126 | return Align(1ULL << getSubclassData<AlignmentField>()); | |||
127 | } | |||
128 | ||||
129 | void setAlignment(Align Align) { | |||
130 | setSubclassData<AlignmentField>(Log2(Align)); | |||
131 | } | |||
132 | ||||
133 | /// Return true if this alloca is in the entry block of the function and is a | |||
134 | /// constant size. If so, the code generator will fold it into the | |||
135 | /// prolog/epilog code, so it is basically free. | |||
136 | bool isStaticAlloca() const; | |||
137 | ||||
138 | /// Return true if this alloca is used as an inalloca argument to a call. Such | |||
139 | /// allocas are never considered static even if they are in the entry block. | |||
140 | bool isUsedWithInAlloca() const { | |||
141 | return getSubclassData<UsedWithInAllocaField>(); | |||
142 | } | |||
143 | ||||
144 | /// Specify whether this alloca is used to represent the arguments to a call. | |||
145 | void setUsedWithInAlloca(bool V) { | |||
146 | setSubclassData<UsedWithInAllocaField>(V); | |||
147 | } | |||
148 | ||||
149 | /// Return true if this alloca is used as a swifterror argument to a call. | |||
150 | bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); } | |||
151 | /// Specify whether this alloca is used to represent a swifterror. | |||
152 | void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); } | |||
153 | ||||
154 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
155 | static bool classof(const Instruction *I) { | |||
156 | return (I->getOpcode() == Instruction::Alloca); | |||
157 | } | |||
158 | static bool classof(const Value *V) { | |||
159 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
160 | } | |||
161 | ||||
162 | private: | |||
163 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
164 | // method so that subclasses cannot accidentally use it. | |||
165 | template <typename Bitfield> | |||
166 | void setSubclassData(typename Bitfield::Type Value) { | |||
167 | Instruction::setSubclassData<Bitfield>(Value); | |||
168 | } | |||
169 | }; | |||
170 | ||||
171 | //===----------------------------------------------------------------------===// | |||
172 | // LoadInst Class | |||
173 | //===----------------------------------------------------------------------===// | |||
174 | ||||
175 | /// An instruction for reading from memory. This uses the SubclassData field in | |||
176 | /// Value to store whether or not the load is volatile. | |||
177 | class LoadInst : public UnaryInstruction { | |||
178 | using VolatileField = BoolBitfieldElementT<0>; | |||
179 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; | |||
180 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; | |||
181 | static_assert( | |||
182 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), | |||
183 | "Bitfields must be contiguous"); | |||
184 | ||||
185 | void AssertOK(); | |||
186 | ||||
187 | protected: | |||
188 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
189 | friend class Instruction; | |||
190 | ||||
191 | LoadInst *cloneImpl() const; | |||
192 | ||||
193 | public: | |||
194 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, | |||
195 | Instruction *InsertBefore); | |||
196 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
197 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
198 | Instruction *InsertBefore); | |||
199 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
200 | BasicBlock *InsertAtEnd); | |||
201 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
202 | Align Align, Instruction *InsertBefore = nullptr); | |||
203 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
204 | Align Align, BasicBlock *InsertAtEnd); | |||
205 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
206 | Align Align, AtomicOrdering Order, | |||
207 | SyncScope::ID SSID = SyncScope::System, | |||
208 | Instruction *InsertBefore = nullptr); | |||
209 | LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile, | |||
210 | Align Align, AtomicOrdering Order, SyncScope::ID SSID, | |||
211 | BasicBlock *InsertAtEnd); | |||
212 | ||||
213 | /// Return true if this is a load from a volatile memory location. | |||
214 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
215 | ||||
216 | /// Specify whether this is a volatile load or not. | |||
217 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
218 | ||||
219 | /// Return the alignment of the access that is being performed. | |||
220 | Align getAlign() const { | |||
221 | return Align(1ULL << (getSubclassData<AlignmentField>())); | |||
222 | } | |||
223 | ||||
224 | void setAlignment(Align Align) { | |||
225 | setSubclassData<AlignmentField>(Log2(Align)); | |||
226 | } | |||
227 | ||||
228 | /// Returns the ordering constraint of this load instruction. | |||
229 | AtomicOrdering getOrdering() const { | |||
230 | return getSubclassData<OrderingField>(); | |||
231 | } | |||
232 | /// Sets the ordering constraint of this load instruction. May not be Release | |||
233 | /// or AcquireRelease. | |||
234 | void setOrdering(AtomicOrdering Ordering) { | |||
235 | setSubclassData<OrderingField>(Ordering); | |||
236 | } | |||
237 | ||||
238 | /// Returns the synchronization scope ID of this load instruction. | |||
239 | SyncScope::ID getSyncScopeID() const { | |||
240 | return SSID; | |||
241 | } | |||
242 | ||||
243 | /// Sets the synchronization scope ID of this load instruction. | |||
244 | void setSyncScopeID(SyncScope::ID SSID) { | |||
245 | this->SSID = SSID; | |||
246 | } | |||
247 | ||||
248 | /// Sets the ordering constraint and the synchronization scope ID of this load | |||
249 | /// instruction. | |||
250 | void setAtomic(AtomicOrdering Ordering, | |||
251 | SyncScope::ID SSID = SyncScope::System) { | |||
252 | setOrdering(Ordering); | |||
253 | setSyncScopeID(SSID); | |||
254 | } | |||
255 | ||||
256 | bool isSimple() const { return !isAtomic() && !isVolatile(); } | |||
257 | ||||
258 | bool isUnordered() const { | |||
259 | return (getOrdering() == AtomicOrdering::NotAtomic || | |||
260 | getOrdering() == AtomicOrdering::Unordered) && | |||
261 | !isVolatile(); | |||
262 | } | |||
263 | ||||
264 | Value *getPointerOperand() { return getOperand(0); } | |||
265 | const Value *getPointerOperand() const { return getOperand(0); } | |||
266 | static unsigned getPointerOperandIndex() { return 0U; } | |||
267 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } | |||
268 | ||||
269 | /// Returns the address space of the pointer operand. | |||
270 | unsigned getPointerAddressSpace() const { | |||
271 | return getPointerOperandType()->getPointerAddressSpace(); | |||
272 | } | |||
273 | ||||
274 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
275 | static bool classof(const Instruction *I) { | |||
276 | return I->getOpcode() == Instruction::Load; | |||
277 | } | |||
278 | static bool classof(const Value *V) { | |||
279 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
280 | } | |||
281 | ||||
282 | private: | |||
283 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
284 | // method so that subclasses cannot accidentally use it. | |||
285 | template <typename Bitfield> | |||
286 | void setSubclassData(typename Bitfield::Type Value) { | |||
287 | Instruction::setSubclassData<Bitfield>(Value); | |||
288 | } | |||
289 | ||||
290 | /// The synchronization scope ID of this load instruction. Not quite enough | |||
291 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
292 | /// own field. | |||
293 | SyncScope::ID SSID; | |||
294 | }; | |||
295 | ||||
296 | //===----------------------------------------------------------------------===// | |||
297 | // StoreInst Class | |||
298 | //===----------------------------------------------------------------------===// | |||
299 | ||||
300 | /// An instruction for storing to memory. | |||
301 | class StoreInst : public Instruction { | |||
302 | using VolatileField = BoolBitfieldElementT<0>; | |||
303 | using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>; | |||
304 | using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>; | |||
305 | static_assert( | |||
306 | Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(), | |||
307 | "Bitfields must be contiguous"); | |||
308 | ||||
309 | void AssertOK(); | |||
310 | ||||
311 | protected: | |||
312 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
313 | friend class Instruction; | |||
314 | ||||
315 | StoreInst *cloneImpl() const; | |||
316 | ||||
317 | public: | |||
318 | StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore); | |||
319 | StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd); | |||
320 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore); | |||
321 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd); | |||
322 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
323 | Instruction *InsertBefore = nullptr); | |||
324 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
325 | BasicBlock *InsertAtEnd); | |||
326 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
327 | AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System, | |||
328 | Instruction *InsertBefore = nullptr); | |||
329 | StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align, | |||
330 | AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd); | |||
331 | ||||
332 | // allocate space for exactly two operands | |||
333 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
334 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
335 | ||||
336 | /// Return true if this is a store to a volatile memory location. | |||
337 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
338 | ||||
339 | /// Specify whether this is a volatile store or not. | |||
340 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
341 | ||||
342 | /// Transparently provide more efficient getOperand methods. | |||
343 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
344 | ||||
345 | Align getAlign() const { | |||
346 | return Align(1ULL << (getSubclassData<AlignmentField>())); | |||
347 | } | |||
348 | ||||
349 | void setAlignment(Align Align) { | |||
350 | setSubclassData<AlignmentField>(Log2(Align)); | |||
351 | } | |||
352 | ||||
353 | /// Returns the ordering constraint of this store instruction. | |||
354 | AtomicOrdering getOrdering() const { | |||
355 | return getSubclassData<OrderingField>(); | |||
356 | } | |||
357 | ||||
358 | /// Sets the ordering constraint of this store instruction. May not be | |||
359 | /// Acquire or AcquireRelease. | |||
360 | void setOrdering(AtomicOrdering Ordering) { | |||
361 | setSubclassData<OrderingField>(Ordering); | |||
362 | } | |||
363 | ||||
364 | /// Returns the synchronization scope ID of this store instruction. | |||
365 | SyncScope::ID getSyncScopeID() const { | |||
366 | return SSID; | |||
367 | } | |||
368 | ||||
369 | /// Sets the synchronization scope ID of this store instruction. | |||
370 | void setSyncScopeID(SyncScope::ID SSID) { | |||
371 | this->SSID = SSID; | |||
372 | } | |||
373 | ||||
374 | /// Sets the ordering constraint and the synchronization scope ID of this | |||
375 | /// store instruction. | |||
376 | void setAtomic(AtomicOrdering Ordering, | |||
377 | SyncScope::ID SSID = SyncScope::System) { | |||
378 | setOrdering(Ordering); | |||
379 | setSyncScopeID(SSID); | |||
380 | } | |||
381 | ||||
382 | bool isSimple() const { return !isAtomic() && !isVolatile(); } | |||
383 | ||||
384 | bool isUnordered() const { | |||
385 | return (getOrdering() == AtomicOrdering::NotAtomic || | |||
386 | getOrdering() == AtomicOrdering::Unordered) && | |||
387 | !isVolatile(); | |||
388 | } | |||
389 | ||||
390 | Value *getValueOperand() { return getOperand(0); } | |||
391 | const Value *getValueOperand() const { return getOperand(0); } | |||
392 | ||||
393 | Value *getPointerOperand() { return getOperand(1); } | |||
394 | const Value *getPointerOperand() const { return getOperand(1); } | |||
395 | static unsigned getPointerOperandIndex() { return 1U; } | |||
396 | Type *getPointerOperandType() const { return getPointerOperand()->getType(); } | |||
397 | ||||
398 | /// Returns the address space of the pointer operand. | |||
399 | unsigned getPointerAddressSpace() const { | |||
400 | return getPointerOperandType()->getPointerAddressSpace(); | |||
401 | } | |||
402 | ||||
403 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
404 | static bool classof(const Instruction *I) { | |||
405 | return I->getOpcode() == Instruction::Store; | |||
406 | } | |||
407 | static bool classof(const Value *V) { | |||
408 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
409 | } | |||
410 | ||||
411 | private: | |||
412 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
413 | // method so that subclasses cannot accidentally use it. | |||
414 | template <typename Bitfield> | |||
415 | void setSubclassData(typename Bitfield::Type Value) { | |||
416 | Instruction::setSubclassData<Bitfield>(Value); | |||
417 | } | |||
418 | ||||
419 | /// The synchronization scope ID of this store instruction. Not quite enough | |||
420 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
421 | /// own field. | |||
422 | SyncScope::ID SSID; | |||
423 | }; | |||
424 | ||||
425 | template <> | |||
426 | struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> { | |||
427 | }; | |||
428 | ||||
429 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits <StoreInst>::op_begin(this); } StoreInst::const_op_iterator StoreInst::op_begin() const { return OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this)); } StoreInst ::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst >::op_end(this); } StoreInst::const_op_iterator StoreInst:: op_end() const { return OperandTraits<StoreInst>::op_end (const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand (unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<StoreInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 429, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<StoreInst >::op_begin(const_cast<StoreInst*>(this))[i_nocapture ].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 429, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<StoreInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned StoreInst::getNumOperands() const { return OperandTraits<StoreInst>::operands(this); } template <int Idx_nocapture> Use &StoreInst::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &StoreInst::Op() const { return this->OpFrom <Idx_nocapture>(this); } | |||
430 | ||||
431 | //===----------------------------------------------------------------------===// | |||
432 | // FenceInst Class | |||
433 | //===----------------------------------------------------------------------===// | |||
434 | ||||
435 | /// An instruction for ordering other memory operations. | |||
436 | class FenceInst : public Instruction { | |||
437 | using OrderingField = AtomicOrderingBitfieldElementT<0>; | |||
438 | ||||
439 | void Init(AtomicOrdering Ordering, SyncScope::ID SSID); | |||
440 | ||||
441 | protected: | |||
442 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
443 | friend class Instruction; | |||
444 | ||||
445 | FenceInst *cloneImpl() const; | |||
446 | ||||
447 | public: | |||
448 | // Ordering may only be Acquire, Release, AcquireRelease, or | |||
449 | // SequentiallyConsistent. | |||
450 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, | |||
451 | SyncScope::ID SSID = SyncScope::System, | |||
452 | Instruction *InsertBefore = nullptr); | |||
453 | FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID, | |||
454 | BasicBlock *InsertAtEnd); | |||
455 | ||||
456 | // allocate space for exactly zero operands | |||
457 | void *operator new(size_t S) { return User::operator new(S, 0); } | |||
458 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
459 | ||||
460 | /// Returns the ordering constraint of this fence instruction. | |||
461 | AtomicOrdering getOrdering() const { | |||
462 | return getSubclassData<OrderingField>(); | |||
463 | } | |||
464 | ||||
465 | /// Sets the ordering constraint of this fence instruction. May only be | |||
466 | /// Acquire, Release, AcquireRelease, or SequentiallyConsistent. | |||
467 | void setOrdering(AtomicOrdering Ordering) { | |||
468 | setSubclassData<OrderingField>(Ordering); | |||
469 | } | |||
470 | ||||
471 | /// Returns the synchronization scope ID of this fence instruction. | |||
472 | SyncScope::ID getSyncScopeID() const { | |||
473 | return SSID; | |||
474 | } | |||
475 | ||||
476 | /// Sets the synchronization scope ID of this fence instruction. | |||
477 | void setSyncScopeID(SyncScope::ID SSID) { | |||
478 | this->SSID = SSID; | |||
479 | } | |||
480 | ||||
481 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
482 | static bool classof(const Instruction *I) { | |||
483 | return I->getOpcode() == Instruction::Fence; | |||
484 | } | |||
485 | static bool classof(const Value *V) { | |||
486 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
487 | } | |||
488 | ||||
489 | private: | |||
490 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
491 | // method so that subclasses cannot accidentally use it. | |||
492 | template <typename Bitfield> | |||
493 | void setSubclassData(typename Bitfield::Type Value) { | |||
494 | Instruction::setSubclassData<Bitfield>(Value); | |||
495 | } | |||
496 | ||||
497 | /// The synchronization scope ID of this fence instruction. Not quite enough | |||
498 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
499 | /// own field. | |||
500 | SyncScope::ID SSID; | |||
501 | }; | |||
502 | ||||
503 | //===----------------------------------------------------------------------===// | |||
504 | // AtomicCmpXchgInst Class | |||
505 | //===----------------------------------------------------------------------===// | |||
506 | ||||
507 | /// An instruction that atomically checks whether a | |||
508 | /// specified value is in a memory location, and, if it is, stores a new value | |||
509 | /// there. The value returned by this instruction is a pair containing the | |||
510 | /// original value as first element, and an i1 indicating success (true) or | |||
511 | /// failure (false) as second element. | |||
512 | /// | |||
513 | class AtomicCmpXchgInst : public Instruction { | |||
514 | void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align, | |||
515 | AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering, | |||
516 | SyncScope::ID SSID); | |||
517 | ||||
518 | template <unsigned Offset> | |||
519 | using AtomicOrderingBitfieldElement = | |||
520 | typename Bitfield::Element<AtomicOrdering, Offset, 3, | |||
521 | AtomicOrdering::LAST>; | |||
522 | ||||
523 | protected: | |||
524 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
525 | friend class Instruction; | |||
526 | ||||
527 | AtomicCmpXchgInst *cloneImpl() const; | |||
528 | ||||
529 | public: | |||
530 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, | |||
531 | AtomicOrdering SuccessOrdering, | |||
532 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, | |||
533 | Instruction *InsertBefore = nullptr); | |||
534 | AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment, | |||
535 | AtomicOrdering SuccessOrdering, | |||
536 | AtomicOrdering FailureOrdering, SyncScope::ID SSID, | |||
537 | BasicBlock *InsertAtEnd); | |||
538 | ||||
539 | // allocate space for exactly three operands | |||
540 | void *operator new(size_t S) { return User::operator new(S, 3); } | |||
541 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
542 | ||||
543 | using VolatileField = BoolBitfieldElementT<0>; | |||
544 | using WeakField = BoolBitfieldElementT<VolatileField::NextBit>; | |||
545 | using SuccessOrderingField = | |||
546 | AtomicOrderingBitfieldElementT<WeakField::NextBit>; | |||
547 | using FailureOrderingField = | |||
548 | AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>; | |||
549 | using AlignmentField = | |||
550 | AlignmentBitfieldElementT<FailureOrderingField::NextBit>; | |||
551 | static_assert( | |||
552 | Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField, | |||
553 | FailureOrderingField, AlignmentField>(), | |||
554 | "Bitfields must be contiguous"); | |||
555 | ||||
556 | /// Return the alignment of the memory that is being allocated by the | |||
557 | /// instruction. | |||
558 | Align getAlign() const { | |||
559 | return Align(1ULL << getSubclassData<AlignmentField>()); | |||
560 | } | |||
561 | ||||
562 | void setAlignment(Align Align) { | |||
563 | setSubclassData<AlignmentField>(Log2(Align)); | |||
564 | } | |||
565 | ||||
566 | /// Return true if this is a cmpxchg from a volatile memory | |||
567 | /// location. | |||
568 | /// | |||
569 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
570 | ||||
571 | /// Specify whether this is a volatile cmpxchg. | |||
572 | /// | |||
573 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
574 | ||||
575 | /// Return true if this cmpxchg may spuriously fail. | |||
576 | bool isWeak() const { return getSubclassData<WeakField>(); } | |||
577 | ||||
578 | void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); } | |||
579 | ||||
580 | /// Transparently provide more efficient getOperand methods. | |||
581 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
582 | ||||
583 | static bool isValidSuccessOrdering(AtomicOrdering Ordering) { | |||
584 | return Ordering != AtomicOrdering::NotAtomic && | |||
585 | Ordering != AtomicOrdering::Unordered; | |||
586 | } | |||
587 | ||||
588 | static bool isValidFailureOrdering(AtomicOrdering Ordering) { | |||
589 | return Ordering != AtomicOrdering::NotAtomic && | |||
590 | Ordering != AtomicOrdering::Unordered && | |||
591 | Ordering != AtomicOrdering::AcquireRelease && | |||
592 | Ordering != AtomicOrdering::Release; | |||
593 | } | |||
594 | ||||
595 | /// Returns the success ordering constraint of this cmpxchg instruction. | |||
596 | AtomicOrdering getSuccessOrdering() const { | |||
597 | return getSubclassData<SuccessOrderingField>(); | |||
598 | } | |||
599 | ||||
600 | /// Sets the success ordering constraint of this cmpxchg instruction. | |||
601 | void setSuccessOrdering(AtomicOrdering Ordering) { | |||
602 | assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) && "invalid CmpXchg success ordering") ? void (0) : __assert_fail ("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\"" , "llvm/include/llvm/IR/Instructions.h", 603, __extension__ __PRETTY_FUNCTION__ )) | |||
603 | "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) && "invalid CmpXchg success ordering") ? void (0) : __assert_fail ("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\"" , "llvm/include/llvm/IR/Instructions.h", 603, __extension__ __PRETTY_FUNCTION__ )); | |||
604 | setSubclassData<SuccessOrderingField>(Ordering); | |||
605 | } | |||
606 | ||||
607 | /// Returns the failure ordering constraint of this cmpxchg instruction. | |||
608 | AtomicOrdering getFailureOrdering() const { | |||
609 | return getSubclassData<FailureOrderingField>(); | |||
610 | } | |||
611 | ||||
612 | /// Sets the failure ordering constraint of this cmpxchg instruction. | |||
613 | void setFailureOrdering(AtomicOrdering Ordering) { | |||
614 | assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) && "invalid CmpXchg failure ordering") ? void (0) : __assert_fail ("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\"" , "llvm/include/llvm/IR/Instructions.h", 615, __extension__ __PRETTY_FUNCTION__ )) | |||
615 | "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) && "invalid CmpXchg failure ordering") ? void (0) : __assert_fail ("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\"" , "llvm/include/llvm/IR/Instructions.h", 615, __extension__ __PRETTY_FUNCTION__ )); | |||
616 | setSubclassData<FailureOrderingField>(Ordering); | |||
617 | } | |||
618 | ||||
619 | /// Returns a single ordering which is at least as strong as both the | |||
620 | /// success and failure orderings for this cmpxchg. | |||
621 | AtomicOrdering getMergedOrdering() const { | |||
622 | if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent) | |||
623 | return AtomicOrdering::SequentiallyConsistent; | |||
624 | if (getFailureOrdering() == AtomicOrdering::Acquire) { | |||
625 | if (getSuccessOrdering() == AtomicOrdering::Monotonic) | |||
626 | return AtomicOrdering::Acquire; | |||
627 | if (getSuccessOrdering() == AtomicOrdering::Release) | |||
628 | return AtomicOrdering::AcquireRelease; | |||
629 | } | |||
630 | return getSuccessOrdering(); | |||
631 | } | |||
632 | ||||
633 | /// Returns the synchronization scope ID of this cmpxchg instruction. | |||
634 | SyncScope::ID getSyncScopeID() const { | |||
635 | return SSID; | |||
636 | } | |||
637 | ||||
638 | /// Sets the synchronization scope ID of this cmpxchg instruction. | |||
639 | void setSyncScopeID(SyncScope::ID SSID) { | |||
640 | this->SSID = SSID; | |||
641 | } | |||
642 | ||||
643 | Value *getPointerOperand() { return getOperand(0); } | |||
644 | const Value *getPointerOperand() const { return getOperand(0); } | |||
645 | static unsigned getPointerOperandIndex() { return 0U; } | |||
646 | ||||
647 | Value *getCompareOperand() { return getOperand(1); } | |||
648 | const Value *getCompareOperand() const { return getOperand(1); } | |||
649 | ||||
650 | Value *getNewValOperand() { return getOperand(2); } | |||
651 | const Value *getNewValOperand() const { return getOperand(2); } | |||
652 | ||||
653 | /// Returns the address space of the pointer operand. | |||
654 | unsigned getPointerAddressSpace() const { | |||
655 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
656 | } | |||
657 | ||||
658 | /// Returns the strongest permitted ordering on failure, given the | |||
659 | /// desired ordering on success. | |||
660 | /// | |||
661 | /// If the comparison in a cmpxchg operation fails, there is no atomic store | |||
662 | /// so release semantics cannot be provided. So this function drops explicit | |||
663 | /// Release requests from the AtomicOrdering. A SequentiallyConsistent | |||
664 | /// operation would remain SequentiallyConsistent. | |||
665 | static AtomicOrdering | |||
666 | getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) { | |||
667 | switch (SuccessOrdering) { | |||
668 | default: | |||
669 | llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering" , "llvm/include/llvm/IR/Instructions.h", 669); | |||
670 | case AtomicOrdering::Release: | |||
671 | case AtomicOrdering::Monotonic: | |||
672 | return AtomicOrdering::Monotonic; | |||
673 | case AtomicOrdering::AcquireRelease: | |||
674 | case AtomicOrdering::Acquire: | |||
675 | return AtomicOrdering::Acquire; | |||
676 | case AtomicOrdering::SequentiallyConsistent: | |||
677 | return AtomicOrdering::SequentiallyConsistent; | |||
678 | } | |||
679 | } | |||
680 | ||||
681 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
682 | static bool classof(const Instruction *I) { | |||
683 | return I->getOpcode() == Instruction::AtomicCmpXchg; | |||
684 | } | |||
685 | static bool classof(const Value *V) { | |||
686 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
687 | } | |||
688 | ||||
689 | private: | |||
690 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
691 | // method so that subclasses cannot accidentally use it. | |||
692 | template <typename Bitfield> | |||
693 | void setSubclassData(typename Bitfield::Type Value) { | |||
694 | Instruction::setSubclassData<Bitfield>(Value); | |||
695 | } | |||
696 | ||||
697 | /// The synchronization scope ID of this cmpxchg instruction. Not quite | |||
698 | /// enough room in SubClassData for everything, so synchronization scope ID | |||
699 | /// gets its own field. | |||
700 | SyncScope::ID SSID; | |||
701 | }; | |||
702 | ||||
703 | template <> | |||
704 | struct OperandTraits<AtomicCmpXchgInst> : | |||
705 | public FixedNumOperandTraits<AtomicCmpXchgInst, 3> { | |||
706 | }; | |||
707 | ||||
708 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() { return OperandTraits<AtomicCmpXchgInst>::op_begin(this ); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst:: op_begin() const { return OperandTraits<AtomicCmpXchgInst> ::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst ::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits <AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst:: const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits <AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst *>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 708, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst >::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture ].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 708, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands () const { return OperandTraits<AtomicCmpXchgInst>::operands (this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &AtomicCmpXchgInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
709 | ||||
710 | //===----------------------------------------------------------------------===// | |||
711 | // AtomicRMWInst Class | |||
712 | //===----------------------------------------------------------------------===// | |||
713 | ||||
714 | /// an instruction that atomically reads a memory location, | |||
715 | /// combines it with another value, and then stores the result back. Returns | |||
716 | /// the old value. | |||
717 | /// | |||
718 | class AtomicRMWInst : public Instruction { | |||
719 | protected: | |||
720 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
721 | friend class Instruction; | |||
722 | ||||
723 | AtomicRMWInst *cloneImpl() const; | |||
724 | ||||
725 | public: | |||
726 | /// This enumeration lists the possible modifications atomicrmw can make. In | |||
727 | /// the descriptions, 'p' is the pointer to the instruction's memory location, | |||
728 | /// 'old' is the initial value of *p, and 'v' is the other value passed to the | |||
729 | /// instruction. These instructions always return 'old'. | |||
730 | enum BinOp : unsigned { | |||
731 | /// *p = v | |||
732 | Xchg, | |||
733 | /// *p = old + v | |||
734 | Add, | |||
735 | /// *p = old - v | |||
736 | Sub, | |||
737 | /// *p = old & v | |||
738 | And, | |||
739 | /// *p = ~(old & v) | |||
740 | Nand, | |||
741 | /// *p = old | v | |||
742 | Or, | |||
743 | /// *p = old ^ v | |||
744 | Xor, | |||
745 | /// *p = old >signed v ? old : v | |||
746 | Max, | |||
747 | /// *p = old <signed v ? old : v | |||
748 | Min, | |||
749 | /// *p = old >unsigned v ? old : v | |||
750 | UMax, | |||
751 | /// *p = old <unsigned v ? old : v | |||
752 | UMin, | |||
753 | ||||
754 | /// *p = old + v | |||
755 | FAdd, | |||
756 | ||||
757 | /// *p = old - v | |||
758 | FSub, | |||
759 | ||||
760 | /// *p = maxnum(old, v) | |||
761 | /// \p maxnum matches the behavior of \p llvm.maxnum.*. | |||
762 | FMax, | |||
763 | ||||
764 | /// *p = minnum(old, v) | |||
765 | /// \p minnum matches the behavior of \p llvm.minnum.*. | |||
766 | FMin, | |||
767 | ||||
768 | /// Increment one up to a maximum value. | |||
769 | /// *p = (old u>= v) ? 0 : (old + 1) | |||
770 | UIncWrap, | |||
771 | ||||
772 | /// Decrement one until a minimum value or zero. | |||
773 | /// *p = ((old == 0) || (old u> v)) ? v : (old - 1) | |||
774 | UDecWrap, | |||
775 | ||||
776 | FIRST_BINOP = Xchg, | |||
777 | LAST_BINOP = UDecWrap, | |||
778 | BAD_BINOP | |||
779 | }; | |||
780 | ||||
781 | private: | |||
782 | template <unsigned Offset> | |||
783 | using AtomicOrderingBitfieldElement = | |||
784 | typename Bitfield::Element<AtomicOrdering, Offset, 3, | |||
785 | AtomicOrdering::LAST>; | |||
786 | ||||
787 | template <unsigned Offset> | |||
788 | using BinOpBitfieldElement = | |||
789 | typename Bitfield::Element<BinOp, Offset, 5, BinOp::LAST_BINOP>; | |||
790 | ||||
791 | public: | |||
792 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, | |||
793 | AtomicOrdering Ordering, SyncScope::ID SSID, | |||
794 | Instruction *InsertBefore = nullptr); | |||
795 | AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment, | |||
796 | AtomicOrdering Ordering, SyncScope::ID SSID, | |||
797 | BasicBlock *InsertAtEnd); | |||
798 | ||||
799 | // allocate space for exactly two operands | |||
800 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
801 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
802 | ||||
803 | using VolatileField = BoolBitfieldElementT<0>; | |||
804 | using AtomicOrderingField = | |||
805 | AtomicOrderingBitfieldElementT<VolatileField::NextBit>; | |||
806 | using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>; | |||
807 | using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>; | |||
808 | static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField, | |||
809 | OperationField, AlignmentField>(), | |||
810 | "Bitfields must be contiguous"); | |||
811 | ||||
812 | BinOp getOperation() const { return getSubclassData<OperationField>(); } | |||
813 | ||||
814 | static StringRef getOperationName(BinOp Op); | |||
815 | ||||
816 | static bool isFPOperation(BinOp Op) { | |||
817 | switch (Op) { | |||
818 | case AtomicRMWInst::FAdd: | |||
819 | case AtomicRMWInst::FSub: | |||
820 | case AtomicRMWInst::FMax: | |||
821 | case AtomicRMWInst::FMin: | |||
822 | return true; | |||
823 | default: | |||
824 | return false; | |||
825 | } | |||
826 | } | |||
827 | ||||
828 | void setOperation(BinOp Operation) { | |||
829 | setSubclassData<OperationField>(Operation); | |||
830 | } | |||
831 | ||||
832 | /// Return the alignment of the memory that is being allocated by the | |||
833 | /// instruction. | |||
834 | Align getAlign() const { | |||
835 | return Align(1ULL << getSubclassData<AlignmentField>()); | |||
836 | } | |||
837 | ||||
838 | void setAlignment(Align Align) { | |||
839 | setSubclassData<AlignmentField>(Log2(Align)); | |||
840 | } | |||
841 | ||||
842 | /// Return true if this is a RMW on a volatile memory location. | |||
843 | /// | |||
844 | bool isVolatile() const { return getSubclassData<VolatileField>(); } | |||
845 | ||||
846 | /// Specify whether this is a volatile RMW or not. | |||
847 | /// | |||
848 | void setVolatile(bool V) { setSubclassData<VolatileField>(V); } | |||
849 | ||||
850 | /// Transparently provide more efficient getOperand methods. | |||
851 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
852 | ||||
853 | /// Returns the ordering constraint of this rmw instruction. | |||
854 | AtomicOrdering getOrdering() const { | |||
855 | return getSubclassData<AtomicOrderingField>(); | |||
856 | } | |||
857 | ||||
858 | /// Sets the ordering constraint of this rmw instruction. | |||
859 | void setOrdering(AtomicOrdering Ordering) { | |||
860 | assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "llvm/include/llvm/IR/Instructions.h", 861, __extension__ __PRETTY_FUNCTION__ )) | |||
861 | "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic && "atomicrmw instructions can only be atomic.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\"" , "llvm/include/llvm/IR/Instructions.h", 861, __extension__ __PRETTY_FUNCTION__ )); | |||
862 | assert(Ordering != AtomicOrdering::Unordered &&(static_cast <bool> (Ordering != AtomicOrdering::Unordered && "atomicrmw instructions cannot be unordered.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\"" , "llvm/include/llvm/IR/Instructions.h", 863, __extension__ __PRETTY_FUNCTION__ )) | |||
863 | "atomicrmw instructions cannot be unordered.")(static_cast <bool> (Ordering != AtomicOrdering::Unordered && "atomicrmw instructions cannot be unordered.") ? void (0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\"" , "llvm/include/llvm/IR/Instructions.h", 863, __extension__ __PRETTY_FUNCTION__ )); | |||
864 | setSubclassData<AtomicOrderingField>(Ordering); | |||
865 | } | |||
866 | ||||
867 | /// Returns the synchronization scope ID of this rmw instruction. | |||
868 | SyncScope::ID getSyncScopeID() const { | |||
869 | return SSID; | |||
870 | } | |||
871 | ||||
872 | /// Sets the synchronization scope ID of this rmw instruction. | |||
873 | void setSyncScopeID(SyncScope::ID SSID) { | |||
874 | this->SSID = SSID; | |||
875 | } | |||
876 | ||||
877 | Value *getPointerOperand() { return getOperand(0); } | |||
878 | const Value *getPointerOperand() const { return getOperand(0); } | |||
879 | static unsigned getPointerOperandIndex() { return 0U; } | |||
880 | ||||
881 | Value *getValOperand() { return getOperand(1); } | |||
882 | const Value *getValOperand() const { return getOperand(1); } | |||
883 | ||||
884 | /// Returns the address space of the pointer operand. | |||
885 | unsigned getPointerAddressSpace() const { | |||
886 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
887 | } | |||
888 | ||||
889 | bool isFloatingPointOperation() const { | |||
890 | return isFPOperation(getOperation()); | |||
891 | } | |||
892 | ||||
893 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
894 | static bool classof(const Instruction *I) { | |||
895 | return I->getOpcode() == Instruction::AtomicRMW; | |||
896 | } | |||
897 | static bool classof(const Value *V) { | |||
898 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
899 | } | |||
900 | ||||
901 | private: | |||
902 | void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align, | |||
903 | AtomicOrdering Ordering, SyncScope::ID SSID); | |||
904 | ||||
905 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
906 | // method so that subclasses cannot accidentally use it. | |||
907 | template <typename Bitfield> | |||
908 | void setSubclassData(typename Bitfield::Type Value) { | |||
909 | Instruction::setSubclassData<Bitfield>(Value); | |||
910 | } | |||
911 | ||||
912 | /// The synchronization scope ID of this rmw instruction. Not quite enough | |||
913 | /// room in SubClassData for everything, so synchronization scope ID gets its | |||
914 | /// own field. | |||
915 | SyncScope::ID SSID; | |||
916 | }; | |||
917 | ||||
918 | template <> | |||
919 | struct OperandTraits<AtomicRMWInst> | |||
920 | : public FixedNumOperandTraits<AtomicRMWInst,2> { | |||
921 | }; | |||
922 | ||||
923 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst ::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits <AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*> (this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end() { return OperandTraits<AtomicRMWInst>::op_end(this); } AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const { return OperandTraits<AtomicRMWInst>::op_end(const_cast <AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand (unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 923, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<AtomicRMWInst >::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture ].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 923, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands() const { return OperandTraits<AtomicRMWInst>::operands( this); } template <int Idx_nocapture> Use &AtomicRMWInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &AtomicRMWInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
924 | ||||
925 | //===----------------------------------------------------------------------===// | |||
926 | // GetElementPtrInst Class | |||
927 | //===----------------------------------------------------------------------===// | |||
928 | ||||
929 | // checkGEPType - Simple wrapper function to give a better assertion failure | |||
930 | // message on bad indexes for a gep instruction. | |||
931 | // | |||
932 | inline Type *checkGEPType(Type *Ty) { | |||
933 | assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!" ) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\"" , "llvm/include/llvm/IR/Instructions.h", 933, __extension__ __PRETTY_FUNCTION__ )); | |||
934 | return Ty; | |||
935 | } | |||
936 | ||||
937 | /// an instruction for type-safe pointer arithmetic to | |||
938 | /// access elements of arrays and structs | |||
939 | /// | |||
940 | class GetElementPtrInst : public Instruction { | |||
941 | Type *SourceElementType; | |||
942 | Type *ResultElementType; | |||
943 | ||||
944 | GetElementPtrInst(const GetElementPtrInst &GEPI); | |||
945 | ||||
946 | /// Constructors - Create a getelementptr instruction with a base pointer an | |||
947 | /// list of indices. The first ctor can optionally insert before an existing | |||
948 | /// instruction, the second appends the new instruction to the specified | |||
949 | /// BasicBlock. | |||
950 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
951 | ArrayRef<Value *> IdxList, unsigned Values, | |||
952 | const Twine &NameStr, Instruction *InsertBefore); | |||
953 | inline GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
954 | ArrayRef<Value *> IdxList, unsigned Values, | |||
955 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
956 | ||||
957 | void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr); | |||
958 | ||||
959 | protected: | |||
960 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
961 | friend class Instruction; | |||
962 | ||||
963 | GetElementPtrInst *cloneImpl() const; | |||
964 | ||||
965 | public: | |||
966 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, | |||
967 | ArrayRef<Value *> IdxList, | |||
968 | const Twine &NameStr = "", | |||
969 | Instruction *InsertBefore = nullptr) { | |||
970 | unsigned Values = 1 + unsigned(IdxList.size()); | |||
971 | assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type" ) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\"" , "llvm/include/llvm/IR/Instructions.h", 971, __extension__ __PRETTY_FUNCTION__ )); | |||
972 | assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 973, __extension__ __PRETTY_FUNCTION__ )) | |||
973 | ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 973, __extension__ __PRETTY_FUNCTION__ )); | |||
974 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, | |||
975 | NameStr, InsertBefore); | |||
976 | } | |||
977 | ||||
978 | static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr, | |||
979 | ArrayRef<Value *> IdxList, | |||
980 | const Twine &NameStr, | |||
981 | BasicBlock *InsertAtEnd) { | |||
982 | unsigned Values = 1 + unsigned(IdxList.size()); | |||
983 | assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type" ) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\"" , "llvm/include/llvm/IR/Instructions.h", 983, __extension__ __PRETTY_FUNCTION__ )); | |||
984 | assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 985, __extension__ __PRETTY_FUNCTION__ )) | |||
985 | ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType ()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType )) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)" , "llvm/include/llvm/IR/Instructions.h", 985, __extension__ __PRETTY_FUNCTION__ )); | |||
986 | return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values, | |||
987 | NameStr, InsertAtEnd); | |||
988 | } | |||
989 | ||||
990 | /// Create an "inbounds" getelementptr. See the documentation for the | |||
991 | /// "inbounds" flag in LangRef.html for details. | |||
992 | static GetElementPtrInst * | |||
993 | CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList, | |||
994 | const Twine &NameStr = "", | |||
995 | Instruction *InsertBefore = nullptr) { | |||
996 | GetElementPtrInst *GEP = | |||
997 | Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore); | |||
998 | GEP->setIsInBounds(true); | |||
999 | return GEP; | |||
1000 | } | |||
1001 | ||||
1002 | static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr, | |||
1003 | ArrayRef<Value *> IdxList, | |||
1004 | const Twine &NameStr, | |||
1005 | BasicBlock *InsertAtEnd) { | |||
1006 | GetElementPtrInst *GEP = | |||
1007 | Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd); | |||
1008 | GEP->setIsInBounds(true); | |||
1009 | return GEP; | |||
1010 | } | |||
1011 | ||||
1012 | /// Transparently provide more efficient getOperand methods. | |||
1013 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1014 | ||||
1015 | Type *getSourceElementType() const { return SourceElementType; } | |||
1016 | ||||
1017 | void setSourceElementType(Type *Ty) { SourceElementType = Ty; } | |||
1018 | void setResultElementType(Type *Ty) { ResultElementType = Ty; } | |||
1019 | ||||
1020 | Type *getResultElementType() const { | |||
1021 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1022, __extension__ __PRETTY_FUNCTION__ )) | |||
1022 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1022, __extension__ __PRETTY_FUNCTION__ )); | |||
1023 | return ResultElementType; | |||
1024 | } | |||
1025 | ||||
1026 | /// Returns the address space of this instruction's pointer type. | |||
1027 | unsigned getAddressSpace() const { | |||
1028 | // Note that this is always the same as the pointer operand's address space | |||
1029 | // and that is cheaper to compute, so cheat here. | |||
1030 | return getPointerAddressSpace(); | |||
1031 | } | |||
1032 | ||||
1033 | /// Returns the result type of a getelementptr with the given source | |||
1034 | /// element type and indexes. | |||
1035 | /// | |||
1036 | /// Null is returned if the indices are invalid for the specified | |||
1037 | /// source element type. | |||
1038 | static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList); | |||
1039 | static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList); | |||
1040 | static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList); | |||
1041 | ||||
1042 | /// Return the type of the element at the given index of an indexable | |||
1043 | /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})". | |||
1044 | /// | |||
1045 | /// Returns null if the type can't be indexed, or the given index is not | |||
1046 | /// legal for the given type. | |||
1047 | static Type *getTypeAtIndex(Type *Ty, Value *Idx); | |||
1048 | static Type *getTypeAtIndex(Type *Ty, uint64_t Idx); | |||
1049 | ||||
1050 | inline op_iterator idx_begin() { return op_begin()+1; } | |||
1051 | inline const_op_iterator idx_begin() const { return op_begin()+1; } | |||
1052 | inline op_iterator idx_end() { return op_end(); } | |||
1053 | inline const_op_iterator idx_end() const { return op_end(); } | |||
1054 | ||||
1055 | inline iterator_range<op_iterator> indices() { | |||
1056 | return make_range(idx_begin(), idx_end()); | |||
1057 | } | |||
1058 | ||||
1059 | inline iterator_range<const_op_iterator> indices() const { | |||
1060 | return make_range(idx_begin(), idx_end()); | |||
1061 | } | |||
1062 | ||||
1063 | Value *getPointerOperand() { | |||
1064 | return getOperand(0); | |||
1065 | } | |||
1066 | const Value *getPointerOperand() const { | |||
1067 | return getOperand(0); | |||
1068 | } | |||
1069 | static unsigned getPointerOperandIndex() { | |||
1070 | return 0U; // get index for modifying correct operand. | |||
1071 | } | |||
1072 | ||||
1073 | /// Method to return the pointer operand as a | |||
1074 | /// PointerType. | |||
1075 | Type *getPointerOperandType() const { | |||
1076 | return getPointerOperand()->getType(); | |||
1077 | } | |||
1078 | ||||
1079 | /// Returns the address space of the pointer operand. | |||
1080 | unsigned getPointerAddressSpace() const { | |||
1081 | return getPointerOperandType()->getPointerAddressSpace(); | |||
1082 | } | |||
1083 | ||||
1084 | /// Returns the pointer type returned by the GEP | |||
1085 | /// instruction, which may be a vector of pointers. | |||
1086 | static Type *getGEPReturnType(Type *ElTy, Value *Ptr, | |||
1087 | ArrayRef<Value *> IdxList) { | |||
1088 | PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType()); | |||
1089 | unsigned AddrSpace = OrigPtrTy->getAddressSpace(); | |||
1090 | Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList)); | |||
1091 | Type *PtrTy = OrigPtrTy->isOpaque() | |||
1092 | ? PointerType::get(OrigPtrTy->getContext(), AddrSpace) | |||
1093 | : PointerType::get(ResultElemTy, AddrSpace); | |||
1094 | // Vector GEP | |||
1095 | if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) { | |||
1096 | ElementCount EltCount = PtrVTy->getElementCount(); | |||
1097 | return VectorType::get(PtrTy, EltCount); | |||
1098 | } | |||
1099 | for (Value *Index : IdxList) | |||
1100 | if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) { | |||
1101 | ElementCount EltCount = IndexVTy->getElementCount(); | |||
1102 | return VectorType::get(PtrTy, EltCount); | |||
1103 | } | |||
1104 | // Scalar GEP | |||
1105 | return PtrTy; | |||
1106 | } | |||
1107 | ||||
1108 | unsigned getNumIndices() const { // Note: always non-negative | |||
1109 | return getNumOperands() - 1; | |||
1110 | } | |||
1111 | ||||
1112 | bool hasIndices() const { | |||
1113 | return getNumOperands() > 1; | |||
1114 | } | |||
1115 | ||||
1116 | /// Return true if all of the indices of this GEP are | |||
1117 | /// zeros. If so, the result pointer and the first operand have the same | |||
1118 | /// value, just potentially different types. | |||
1119 | bool hasAllZeroIndices() const; | |||
1120 | ||||
1121 | /// Return true if all of the indices of this GEP are | |||
1122 | /// constant integers. If so, the result pointer and the first operand have | |||
1123 | /// a constant offset between them. | |||
1124 | bool hasAllConstantIndices() const; | |||
1125 | ||||
1126 | /// Set or clear the inbounds flag on this GEP instruction. | |||
1127 | /// See LangRef.html for the meaning of inbounds on a getelementptr. | |||
1128 | void setIsInBounds(bool b = true); | |||
1129 | ||||
1130 | /// Determine whether the GEP has the inbounds flag. | |||
1131 | bool isInBounds() const; | |||
1132 | ||||
1133 | /// Accumulate the constant address offset of this GEP if possible. | |||
1134 | /// | |||
1135 | /// This routine accepts an APInt into which it will accumulate the constant | |||
1136 | /// offset of this GEP if the GEP is in fact constant. If the GEP is not | |||
1137 | /// all-constant, it returns false and the value of the offset APInt is | |||
1138 | /// undefined (it is *not* preserved!). The APInt passed into this routine | |||
1139 | /// must be at least as wide as the IntPtr type for the address space of | |||
1140 | /// the base GEP pointer. | |||
1141 | bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const; | |||
1142 | bool collectOffset(const DataLayout &DL, unsigned BitWidth, | |||
1143 | MapVector<Value *, APInt> &VariableOffsets, | |||
1144 | APInt &ConstantOffset) const; | |||
1145 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1146 | static bool classof(const Instruction *I) { | |||
1147 | return (I->getOpcode() == Instruction::GetElementPtr); | |||
1148 | } | |||
1149 | static bool classof(const Value *V) { | |||
1150 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1151 | } | |||
1152 | }; | |||
1153 | ||||
1154 | template <> | |||
1155 | struct OperandTraits<GetElementPtrInst> : | |||
1156 | public VariadicOperandTraits<GetElementPtrInst, 1> { | |||
1157 | }; | |||
1158 | ||||
1159 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
1160 | ArrayRef<Value *> IdxList, unsigned Values, | |||
1161 | const Twine &NameStr, | |||
1162 | Instruction *InsertBefore) | |||
1163 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, | |||
1164 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, | |||
1165 | Values, InsertBefore), | |||
1166 | SourceElementType(PointeeType), | |||
1167 | ResultElementType(getIndexedType(PointeeType, IdxList)) { | |||
1168 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1169, __extension__ __PRETTY_FUNCTION__ )) | |||
1169 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1169, __extension__ __PRETTY_FUNCTION__ )); | |||
1170 | init(Ptr, IdxList, NameStr); | |||
1171 | } | |||
1172 | ||||
1173 | GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr, | |||
1174 | ArrayRef<Value *> IdxList, unsigned Values, | |||
1175 | const Twine &NameStr, | |||
1176 | BasicBlock *InsertAtEnd) | |||
1177 | : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr, | |||
1178 | OperandTraits<GetElementPtrInst>::op_end(this) - Values, | |||
1179 | Values, InsertAtEnd), | |||
1180 | SourceElementType(PointeeType), | |||
1181 | ResultElementType(getIndexedType(PointeeType, IdxList)) { | |||
1182 | assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1183, __extension__ __PRETTY_FUNCTION__ )) | |||
1183 | ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()-> getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType )) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)" , "llvm/include/llvm/IR/Instructions.h", 1183, __extension__ __PRETTY_FUNCTION__ )); | |||
1184 | init(Ptr, IdxList, NameStr); | |||
1185 | } | |||
1186 | ||||
1187 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() { return OperandTraits<GetElementPtrInst>::op_begin(this ); } GetElementPtrInst::const_op_iterator GetElementPtrInst:: op_begin() const { return OperandTraits<GetElementPtrInst> ::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst ::op_iterator GetElementPtrInst::op_end() { return OperandTraits <GetElementPtrInst>::op_end(this); } GetElementPtrInst:: const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits <GetElementPtrInst>::op_end(const_cast<GetElementPtrInst *>(this)); } Value *GetElementPtrInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1187, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<GetElementPtrInst >::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture ].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1187, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands () const { return OperandTraits<GetElementPtrInst>::operands (this); } template <int Idx_nocapture> Use &GetElementPtrInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &GetElementPtrInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
1188 | ||||
1189 | //===----------------------------------------------------------------------===// | |||
1190 | // ICmpInst Class | |||
1191 | //===----------------------------------------------------------------------===// | |||
1192 | ||||
1193 | /// This instruction compares its operands according to the predicate given | |||
1194 | /// to the constructor. It only operates on integers or pointers. The operands | |||
1195 | /// must be identical types. | |||
1196 | /// Represent an integer comparison operator. | |||
1197 | class ICmpInst: public CmpInst { | |||
1198 | void AssertOK() { | |||
1199 | assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value" ) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1200, __extension__ __PRETTY_FUNCTION__ )) | |||
1200 | "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value" ) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1200, __extension__ __PRETTY_FUNCTION__ )); | |||
1201 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1202, __extension__ __PRETTY_FUNCTION__ )) | |||
1202 | "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to ICmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1202, __extension__ __PRETTY_FUNCTION__ )); | |||
1203 | // Check that the operands are the right type | |||
1204 | assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1206, __extension__ __PRETTY_FUNCTION__ )) | |||
1205 | getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1206, __extension__ __PRETTY_FUNCTION__ )) | |||
1206 | "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy () || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && "Invalid operand types for ICmp instruction") ? void (0) : __assert_fail ("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1206, __extension__ __PRETTY_FUNCTION__ )); | |||
1207 | } | |||
1208 | ||||
1209 | protected: | |||
1210 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1211 | friend class Instruction; | |||
1212 | ||||
1213 | /// Clone an identical ICmpInst | |||
1214 | ICmpInst *cloneImpl() const; | |||
1215 | ||||
1216 | public: | |||
1217 | /// Constructor with insert-before-instruction semantics. | |||
1218 | ICmpInst( | |||
1219 | Instruction *InsertBefore, ///< Where to insert | |||
1220 | Predicate pred, ///< The predicate to use for the comparison | |||
1221 | Value *LHS, ///< The left-hand-side of the expression | |||
1222 | Value *RHS, ///< The right-hand-side of the expression | |||
1223 | const Twine &NameStr = "" ///< Name of the instruction | |||
1224 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1225 | Instruction::ICmp, pred, LHS, RHS, NameStr, | |||
1226 | InsertBefore) { | |||
1227 | #ifndef NDEBUG | |||
1228 | AssertOK(); | |||
1229 | #endif | |||
1230 | } | |||
1231 | ||||
1232 | /// Constructor with insert-at-end semantics. | |||
1233 | ICmpInst( | |||
1234 | BasicBlock &InsertAtEnd, ///< Block to insert into. | |||
1235 | Predicate pred, ///< The predicate to use for the comparison | |||
1236 | Value *LHS, ///< The left-hand-side of the expression | |||
1237 | Value *RHS, ///< The right-hand-side of the expression | |||
1238 | const Twine &NameStr = "" ///< Name of the instruction | |||
1239 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1240 | Instruction::ICmp, pred, LHS, RHS, NameStr, | |||
1241 | &InsertAtEnd) { | |||
1242 | #ifndef NDEBUG | |||
1243 | AssertOK(); | |||
1244 | #endif | |||
1245 | } | |||
1246 | ||||
1247 | /// Constructor with no-insertion semantics | |||
1248 | ICmpInst( | |||
1249 | Predicate pred, ///< The predicate to use for the comparison | |||
1250 | Value *LHS, ///< The left-hand-side of the expression | |||
1251 | Value *RHS, ///< The right-hand-side of the expression | |||
1252 | const Twine &NameStr = "" ///< Name of the instruction | |||
1253 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
| ||||
1254 | Instruction::ICmp, pred, LHS, RHS, NameStr) { | |||
1255 | #ifndef NDEBUG | |||
1256 | AssertOK(); | |||
1257 | #endif | |||
1258 | } | |||
1259 | ||||
1260 | /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc. | |||
1261 | /// @returns the predicate that would be the result if the operand were | |||
1262 | /// regarded as signed. | |||
1263 | /// Return the signed version of the predicate | |||
1264 | Predicate getSignedPredicate() const { | |||
1265 | return getSignedPredicate(getPredicate()); | |||
1266 | } | |||
1267 | ||||
1268 | /// This is a static version that you can use without an instruction. | |||
1269 | /// Return the signed version of the predicate. | |||
1270 | static Predicate getSignedPredicate(Predicate pred); | |||
1271 | ||||
1272 | /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc. | |||
1273 | /// @returns the predicate that would be the result if the operand were | |||
1274 | /// regarded as unsigned. | |||
1275 | /// Return the unsigned version of the predicate | |||
1276 | Predicate getUnsignedPredicate() const { | |||
1277 | return getUnsignedPredicate(getPredicate()); | |||
1278 | } | |||
1279 | ||||
1280 | /// This is a static version that you can use without an instruction. | |||
1281 | /// Return the unsigned version of the predicate. | |||
1282 | static Predicate getUnsignedPredicate(Predicate pred); | |||
1283 | ||||
1284 | /// Return true if this predicate is either EQ or NE. This also | |||
1285 | /// tests for commutativity. | |||
1286 | static bool isEquality(Predicate P) { | |||
1287 | return P == ICMP_EQ || P == ICMP_NE; | |||
1288 | } | |||
1289 | ||||
1290 | /// Return true if this predicate is either EQ or NE. This also | |||
1291 | /// tests for commutativity. | |||
1292 | bool isEquality() const { | |||
1293 | return isEquality(getPredicate()); | |||
1294 | } | |||
1295 | ||||
1296 | /// @returns true if the predicate of this ICmpInst is commutative | |||
1297 | /// Determine if this relation is commutative. | |||
1298 | bool isCommutative() const { return isEquality(); } | |||
1299 | ||||
1300 | /// Return true if the predicate is relational (not EQ or NE). | |||
1301 | /// | |||
1302 | bool isRelational() const { | |||
1303 | return !isEquality(); | |||
1304 | } | |||
1305 | ||||
1306 | /// Return true if the predicate is relational (not EQ or NE). | |||
1307 | /// | |||
1308 | static bool isRelational(Predicate P) { | |||
1309 | return !isEquality(P); | |||
1310 | } | |||
1311 | ||||
1312 | /// Return true if the predicate is SGT or UGT. | |||
1313 | /// | |||
1314 | static bool isGT(Predicate P) { | |||
1315 | return P == ICMP_SGT || P == ICMP_UGT; | |||
1316 | } | |||
1317 | ||||
1318 | /// Return true if the predicate is SLT or ULT. | |||
1319 | /// | |||
1320 | static bool isLT(Predicate P) { | |||
1321 | return P == ICMP_SLT || P == ICMP_ULT; | |||
1322 | } | |||
1323 | ||||
1324 | /// Return true if the predicate is SGE or UGE. | |||
1325 | /// | |||
1326 | static bool isGE(Predicate P) { | |||
1327 | return P == ICMP_SGE || P == ICMP_UGE; | |||
1328 | } | |||
1329 | ||||
1330 | /// Return true if the predicate is SLE or ULE. | |||
1331 | /// | |||
1332 | static bool isLE(Predicate P) { | |||
1333 | return P == ICMP_SLE || P == ICMP_ULE; | |||
1334 | } | |||
1335 | ||||
1336 | /// Returns the sequence of all ICmp predicates. | |||
1337 | /// | |||
1338 | static auto predicates() { return ICmpPredicates(); } | |||
1339 | ||||
1340 | /// Exchange the two operands to this instruction in such a way that it does | |||
1341 | /// not modify the semantics of the instruction. The predicate value may be | |||
1342 | /// changed to retain the same result if the predicate is order dependent | |||
1343 | /// (e.g. ult). | |||
1344 | /// Swap operands and adjust predicate. | |||
1345 | void swapOperands() { | |||
1346 | setPredicate(getSwappedPredicate()); | |||
1347 | Op<0>().swap(Op<1>()); | |||
1348 | } | |||
1349 | ||||
1350 | /// Return result of `LHS Pred RHS` comparison. | |||
1351 | static bool compare(const APInt &LHS, const APInt &RHS, | |||
1352 | ICmpInst::Predicate Pred); | |||
1353 | ||||
1354 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1355 | static bool classof(const Instruction *I) { | |||
1356 | return I->getOpcode() == Instruction::ICmp; | |||
1357 | } | |||
1358 | static bool classof(const Value *V) { | |||
1359 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1360 | } | |||
1361 | }; | |||
1362 | ||||
1363 | //===----------------------------------------------------------------------===// | |||
1364 | // FCmpInst Class | |||
1365 | //===----------------------------------------------------------------------===// | |||
1366 | ||||
1367 | /// This instruction compares its operands according to the predicate given | |||
1368 | /// to the constructor. It only operates on floating point values or packed | |||
1369 | /// vectors of floating point values. The operands must be identical types. | |||
1370 | /// Represents a floating point comparison operator. | |||
1371 | class FCmpInst: public CmpInst { | |||
1372 | void AssertOK() { | |||
1373 | assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value" ) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\"" , "llvm/include/llvm/IR/Instructions.h", 1373, __extension__ __PRETTY_FUNCTION__ )); | |||
1374 | assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1375, __extension__ __PRETTY_FUNCTION__ )) | |||
1375 | "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand (1)->getType() && "Both operands to FCmp instruction are not of the same type!" ) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\"" , "llvm/include/llvm/IR/Instructions.h", 1375, __extension__ __PRETTY_FUNCTION__ )); | |||
1376 | // Check that the operands are the right type | |||
1377 | assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy () && "Invalid operand types for FCmp instruction") ? void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1378, __extension__ __PRETTY_FUNCTION__ )) | |||
1378 | "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy () && "Invalid operand types for FCmp instruction") ? void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\"" , "llvm/include/llvm/IR/Instructions.h", 1378, __extension__ __PRETTY_FUNCTION__ )); | |||
1379 | } | |||
1380 | ||||
1381 | protected: | |||
1382 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1383 | friend class Instruction; | |||
1384 | ||||
1385 | /// Clone an identical FCmpInst | |||
1386 | FCmpInst *cloneImpl() const; | |||
1387 | ||||
1388 | public: | |||
1389 | /// Constructor with insert-before-instruction semantics. | |||
1390 | FCmpInst( | |||
1391 | Instruction *InsertBefore, ///< Where to insert | |||
1392 | Predicate pred, ///< The predicate to use for the comparison | |||
1393 | Value *LHS, ///< The left-hand-side of the expression | |||
1394 | Value *RHS, ///< The right-hand-side of the expression | |||
1395 | const Twine &NameStr = "" ///< Name of the instruction | |||
1396 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1397 | Instruction::FCmp, pred, LHS, RHS, NameStr, | |||
1398 | InsertBefore) { | |||
1399 | AssertOK(); | |||
1400 | } | |||
1401 | ||||
1402 | /// Constructor with insert-at-end semantics. | |||
1403 | FCmpInst( | |||
1404 | BasicBlock &InsertAtEnd, ///< Block to insert into. | |||
1405 | Predicate pred, ///< The predicate to use for the comparison | |||
1406 | Value *LHS, ///< The left-hand-side of the expression | |||
1407 | Value *RHS, ///< The right-hand-side of the expression | |||
1408 | const Twine &NameStr = "" ///< Name of the instruction | |||
1409 | ) : CmpInst(makeCmpResultType(LHS->getType()), | |||
1410 | Instruction::FCmp, pred, LHS, RHS, NameStr, | |||
1411 | &InsertAtEnd) { | |||
1412 | AssertOK(); | |||
1413 | } | |||
1414 | ||||
1415 | /// Constructor with no-insertion semantics | |||
1416 | FCmpInst( | |||
1417 | Predicate Pred, ///< The predicate to use for the comparison | |||
1418 | Value *LHS, ///< The left-hand-side of the expression | |||
1419 | Value *RHS, ///< The right-hand-side of the expression | |||
1420 | const Twine &NameStr = "", ///< Name of the instruction | |||
1421 | Instruction *FlagsSource = nullptr | |||
1422 | ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS, | |||
1423 | RHS, NameStr, nullptr, FlagsSource) { | |||
1424 | AssertOK(); | |||
1425 | } | |||
1426 | ||||
1427 | /// @returns true if the predicate of this instruction is EQ or NE. | |||
1428 | /// Determine if this is an equality predicate. | |||
1429 | static bool isEquality(Predicate Pred) { | |||
1430 | return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ || | |||
1431 | Pred == FCMP_UNE; | |||
1432 | } | |||
1433 | ||||
1434 | /// @returns true if the predicate of this instruction is EQ or NE. | |||
1435 | /// Determine if this is an equality predicate. | |||
1436 | bool isEquality() const { return isEquality(getPredicate()); } | |||
1437 | ||||
1438 | /// @returns true if the predicate of this instruction is commutative. | |||
1439 | /// Determine if this is a commutative predicate. | |||
1440 | bool isCommutative() const { | |||
1441 | return isEquality() || | |||
1442 | getPredicate() == FCMP_FALSE || | |||
1443 | getPredicate() == FCMP_TRUE || | |||
1444 | getPredicate() == FCMP_ORD || | |||
1445 | getPredicate() == FCMP_UNO; | |||
1446 | } | |||
1447 | ||||
1448 | /// @returns true if the predicate is relational (not EQ or NE). | |||
1449 | /// Determine if this a relational predicate. | |||
1450 | bool isRelational() const { return !isEquality(); } | |||
1451 | ||||
1452 | /// Exchange the two operands to this instruction in such a way that it does | |||
1453 | /// not modify the semantics of the instruction. The predicate value may be | |||
1454 | /// changed to retain the same result if the predicate is order dependent | |||
1455 | /// (e.g. ult). | |||
1456 | /// Swap operands and adjust predicate. | |||
1457 | void swapOperands() { | |||
1458 | setPredicate(getSwappedPredicate()); | |||
1459 | Op<0>().swap(Op<1>()); | |||
1460 | } | |||
1461 | ||||
1462 | /// Returns the sequence of all FCmp predicates. | |||
1463 | /// | |||
1464 | static auto predicates() { return FCmpPredicates(); } | |||
1465 | ||||
1466 | /// Return result of `LHS Pred RHS` comparison. | |||
1467 | static bool compare(const APFloat &LHS, const APFloat &RHS, | |||
1468 | FCmpInst::Predicate Pred); | |||
1469 | ||||
1470 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1471 | static bool classof(const Instruction *I) { | |||
1472 | return I->getOpcode() == Instruction::FCmp; | |||
1473 | } | |||
1474 | static bool classof(const Value *V) { | |||
1475 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1476 | } | |||
1477 | }; | |||
1478 | ||||
1479 | //===----------------------------------------------------------------------===// | |||
1480 | /// This class represents a function call, abstracting a target | |||
1481 | /// machine's calling convention. This class uses low bit of the SubClassData | |||
1482 | /// field to indicate whether or not this is a tail call. The rest of the bits | |||
1483 | /// hold the calling convention of the call. | |||
1484 | /// | |||
1485 | class CallInst : public CallBase { | |||
1486 | CallInst(const CallInst &CI); | |||
1487 | ||||
1488 | /// Construct a CallInst given a range of arguments. | |||
1489 | /// Construct a CallInst from a range of arguments | |||
1490 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1491 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1492 | Instruction *InsertBefore); | |||
1493 | ||||
1494 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1495 | const Twine &NameStr, Instruction *InsertBefore) | |||
1496 | : CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore) {} | |||
1497 | ||||
1498 | /// Construct a CallInst given a range of arguments. | |||
1499 | /// Construct a CallInst from a range of arguments | |||
1500 | inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1501 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1502 | BasicBlock *InsertAtEnd); | |||
1503 | ||||
1504 | explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr, | |||
1505 | Instruction *InsertBefore); | |||
1506 | ||||
1507 | CallInst(FunctionType *ty, Value *F, const Twine &NameStr, | |||
1508 | BasicBlock *InsertAtEnd); | |||
1509 | ||||
1510 | void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args, | |||
1511 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); | |||
1512 | void init(FunctionType *FTy, Value *Func, const Twine &NameStr); | |||
1513 | ||||
1514 | /// Compute the number of operands to allocate. | |||
1515 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { | |||
1516 | // We need one operand for the called function, plus the input operand | |||
1517 | // counts provided. | |||
1518 | return 1 + NumArgs + NumBundleInputs; | |||
1519 | } | |||
1520 | ||||
1521 | protected: | |||
1522 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1523 | friend class Instruction; | |||
1524 | ||||
1525 | CallInst *cloneImpl() const; | |||
1526 | ||||
1527 | public: | |||
1528 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "", | |||
1529 | Instruction *InsertBefore = nullptr) { | |||
1530 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore); | |||
1531 | } | |||
1532 | ||||
1533 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1534 | const Twine &NameStr, | |||
1535 | Instruction *InsertBefore = nullptr) { | |||
1536 | return new (ComputeNumOperands(Args.size())) | |||
1537 | CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertBefore); | |||
1538 | } | |||
1539 | ||||
1540 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1541 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
1542 | const Twine &NameStr = "", | |||
1543 | Instruction *InsertBefore = nullptr) { | |||
1544 | const int NumOperands = | |||
1545 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
1546 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
1547 | ||||
1548 | return new (NumOperands, DescriptorBytes) | |||
1549 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore); | |||
1550 | } | |||
1551 | ||||
1552 | static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr, | |||
1553 | BasicBlock *InsertAtEnd) { | |||
1554 | return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd); | |||
1555 | } | |||
1556 | ||||
1557 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1558 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1559 | return new (ComputeNumOperands(Args.size())) | |||
1560 | CallInst(Ty, Func, Args, std::nullopt, NameStr, InsertAtEnd); | |||
1561 | } | |||
1562 | ||||
1563 | static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1564 | ArrayRef<OperandBundleDef> Bundles, | |||
1565 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1566 | const int NumOperands = | |||
1567 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
1568 | const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
1569 | ||||
1570 | return new (NumOperands, DescriptorBytes) | |||
1571 | CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd); | |||
1572 | } | |||
1573 | ||||
1574 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "", | |||
1575 | Instruction *InsertBefore = nullptr) { | |||
1576 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, | |||
1577 | InsertBefore); | |||
1578 | } | |||
1579 | ||||
1580 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1581 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
1582 | const Twine &NameStr = "", | |||
1583 | Instruction *InsertBefore = nullptr) { | |||
1584 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, | |||
1585 | NameStr, InsertBefore); | |||
1586 | } | |||
1587 | ||||
1588 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1589 | const Twine &NameStr, | |||
1590 | Instruction *InsertBefore = nullptr) { | |||
1591 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, | |||
1592 | InsertBefore); | |||
1593 | } | |||
1594 | ||||
1595 | static CallInst *Create(FunctionCallee Func, const Twine &NameStr, | |||
1596 | BasicBlock *InsertAtEnd) { | |||
1597 | return Create(Func.getFunctionType(), Func.getCallee(), NameStr, | |||
1598 | InsertAtEnd); | |||
1599 | } | |||
1600 | ||||
1601 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1602 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1603 | return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr, | |||
1604 | InsertAtEnd); | |||
1605 | } | |||
1606 | ||||
1607 | static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args, | |||
1608 | ArrayRef<OperandBundleDef> Bundles, | |||
1609 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
1610 | return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles, | |||
1611 | NameStr, InsertAtEnd); | |||
1612 | } | |||
1613 | ||||
1614 | /// Create a clone of \p CI with a different set of operand bundles and | |||
1615 | /// insert it before \p InsertPt. | |||
1616 | /// | |||
1617 | /// The returned call instruction is identical \p CI in every way except that | |||
1618 | /// the operand bundles for the new instruction are set to the operand bundles | |||
1619 | /// in \p Bundles. | |||
1620 | static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles, | |||
1621 | Instruction *InsertPt = nullptr); | |||
1622 | ||||
1623 | /// Generate the IR for a call to malloc: | |||
1624 | /// 1. Compute the malloc call's argument as the specified type's size, | |||
1625 | /// possibly multiplied by the array size if the array size is not | |||
1626 | /// constant 1. | |||
1627 | /// 2. Call malloc with that argument. | |||
1628 | /// 3. Bitcast the result of the malloc call to the specified type. | |||
1629 | static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, | |||
1630 | Type *AllocTy, Value *AllocSize, | |||
1631 | Value *ArraySize = nullptr, | |||
1632 | Function *MallocF = nullptr, | |||
1633 | const Twine &Name = ""); | |||
1634 | static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, | |||
1635 | Type *AllocTy, Value *AllocSize, | |||
1636 | Value *ArraySize = nullptr, | |||
1637 | Function *MallocF = nullptr, | |||
1638 | const Twine &Name = ""); | |||
1639 | static Instruction * | |||
1640 | CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy, Type *AllocTy, | |||
1641 | Value *AllocSize, Value *ArraySize = nullptr, | |||
1642 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
1643 | Function *MallocF = nullptr, const Twine &Name = ""); | |||
1644 | static Instruction * | |||
1645 | CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy, Type *AllocTy, | |||
1646 | Value *AllocSize, Value *ArraySize = nullptr, | |||
1647 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
1648 | Function *MallocF = nullptr, const Twine &Name = ""); | |||
1649 | /// Generate the IR for a call to the builtin free function. | |||
1650 | static Instruction *CreateFree(Value *Source, Instruction *InsertBefore); | |||
1651 | static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd); | |||
1652 | static Instruction *CreateFree(Value *Source, | |||
1653 | ArrayRef<OperandBundleDef> Bundles, | |||
1654 | Instruction *InsertBefore); | |||
1655 | static Instruction *CreateFree(Value *Source, | |||
1656 | ArrayRef<OperandBundleDef> Bundles, | |||
1657 | BasicBlock *InsertAtEnd); | |||
1658 | ||||
1659 | // Note that 'musttail' implies 'tail'. | |||
1660 | enum TailCallKind : unsigned { | |||
1661 | TCK_None = 0, | |||
1662 | TCK_Tail = 1, | |||
1663 | TCK_MustTail = 2, | |||
1664 | TCK_NoTail = 3, | |||
1665 | TCK_LAST = TCK_NoTail | |||
1666 | }; | |||
1667 | ||||
1668 | using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>; | |||
1669 | static_assert( | |||
1670 | Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(), | |||
1671 | "Bitfields must be contiguous"); | |||
1672 | ||||
1673 | TailCallKind getTailCallKind() const { | |||
1674 | return getSubclassData<TailCallKindField>(); | |||
1675 | } | |||
1676 | ||||
1677 | bool isTailCall() const { | |||
1678 | TailCallKind Kind = getTailCallKind(); | |||
1679 | return Kind == TCK_Tail || Kind == TCK_MustTail; | |||
1680 | } | |||
1681 | ||||
1682 | bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; } | |||
1683 | ||||
1684 | bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; } | |||
1685 | ||||
1686 | void setTailCallKind(TailCallKind TCK) { | |||
1687 | setSubclassData<TailCallKindField>(TCK); | |||
1688 | } | |||
1689 | ||||
1690 | void setTailCall(bool IsTc = true) { | |||
1691 | setTailCallKind(IsTc ? TCK_Tail : TCK_None); | |||
1692 | } | |||
1693 | ||||
1694 | /// Return true if the call can return twice | |||
1695 | bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); } | |||
1696 | void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); } | |||
1697 | ||||
1698 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1699 | static bool classof(const Instruction *I) { | |||
1700 | return I->getOpcode() == Instruction::Call; | |||
1701 | } | |||
1702 | static bool classof(const Value *V) { | |||
1703 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1704 | } | |||
1705 | ||||
1706 | /// Updates profile metadata by scaling it by \p S / \p T. | |||
1707 | void updateProfWeight(uint64_t S, uint64_t T); | |||
1708 | ||||
1709 | private: | |||
1710 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
1711 | // method so that subclasses cannot accidentally use it. | |||
1712 | template <typename Bitfield> | |||
1713 | void setSubclassData(typename Bitfield::Type Value) { | |||
1714 | Instruction::setSubclassData<Bitfield>(Value); | |||
1715 | } | |||
1716 | }; | |||
1717 | ||||
1718 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1719 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1720 | BasicBlock *InsertAtEnd) | |||
1721 | : CallBase(Ty->getReturnType(), Instruction::Call, | |||
1722 | OperandTraits<CallBase>::op_end(this) - | |||
1723 | (Args.size() + CountBundleInputs(Bundles) + 1), | |||
1724 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), | |||
1725 | InsertAtEnd) { | |||
1726 | init(Ty, Func, Args, Bundles, NameStr); | |||
1727 | } | |||
1728 | ||||
1729 | CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args, | |||
1730 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr, | |||
1731 | Instruction *InsertBefore) | |||
1732 | : CallBase(Ty->getReturnType(), Instruction::Call, | |||
1733 | OperandTraits<CallBase>::op_end(this) - | |||
1734 | (Args.size() + CountBundleInputs(Bundles) + 1), | |||
1735 | unsigned(Args.size() + CountBundleInputs(Bundles) + 1), | |||
1736 | InsertBefore) { | |||
1737 | init(Ty, Func, Args, Bundles, NameStr); | |||
1738 | } | |||
1739 | ||||
1740 | //===----------------------------------------------------------------------===// | |||
1741 | // SelectInst Class | |||
1742 | //===----------------------------------------------------------------------===// | |||
1743 | ||||
1744 | /// This class represents the LLVM 'select' instruction. | |||
1745 | /// | |||
1746 | class SelectInst : public Instruction { | |||
1747 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, | |||
1748 | Instruction *InsertBefore) | |||
1749 | : Instruction(S1->getType(), Instruction::Select, | |||
1750 | &Op<0>(), 3, InsertBefore) { | |||
1751 | init(C, S1, S2); | |||
1752 | setName(NameStr); | |||
1753 | } | |||
1754 | ||||
1755 | SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr, | |||
1756 | BasicBlock *InsertAtEnd) | |||
1757 | : Instruction(S1->getType(), Instruction::Select, | |||
1758 | &Op<0>(), 3, InsertAtEnd) { | |||
1759 | init(C, S1, S2); | |||
1760 | setName(NameStr); | |||
1761 | } | |||
1762 | ||||
1763 | void init(Value *C, Value *S1, Value *S2) { | |||
1764 | assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) && "Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\"" , "llvm/include/llvm/IR/Instructions.h", 1764, __extension__ __PRETTY_FUNCTION__ )); | |||
1765 | Op<0>() = C; | |||
1766 | Op<1>() = S1; | |||
1767 | Op<2>() = S2; | |||
1768 | } | |||
1769 | ||||
1770 | protected: | |||
1771 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1772 | friend class Instruction; | |||
1773 | ||||
1774 | SelectInst *cloneImpl() const; | |||
1775 | ||||
1776 | public: | |||
1777 | static SelectInst *Create(Value *C, Value *S1, Value *S2, | |||
1778 | const Twine &NameStr = "", | |||
1779 | Instruction *InsertBefore = nullptr, | |||
1780 | Instruction *MDFrom = nullptr) { | |||
1781 | SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore); | |||
1782 | if (MDFrom) | |||
1783 | Sel->copyMetadata(*MDFrom); | |||
1784 | return Sel; | |||
1785 | } | |||
1786 | ||||
1787 | static SelectInst *Create(Value *C, Value *S1, Value *S2, | |||
1788 | const Twine &NameStr, | |||
1789 | BasicBlock *InsertAtEnd) { | |||
1790 | return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd); | |||
1791 | } | |||
1792 | ||||
1793 | const Value *getCondition() const { return Op<0>(); } | |||
1794 | const Value *getTrueValue() const { return Op<1>(); } | |||
1795 | const Value *getFalseValue() const { return Op<2>(); } | |||
1796 | Value *getCondition() { return Op<0>(); } | |||
1797 | Value *getTrueValue() { return Op<1>(); } | |||
1798 | Value *getFalseValue() { return Op<2>(); } | |||
1799 | ||||
1800 | void setCondition(Value *V) { Op<0>() = V; } | |||
1801 | void setTrueValue(Value *V) { Op<1>() = V; } | |||
1802 | void setFalseValue(Value *V) { Op<2>() = V; } | |||
1803 | ||||
1804 | /// Swap the true and false values of the select instruction. | |||
1805 | /// This doesn't swap prof metadata. | |||
1806 | void swapValues() { Op<1>().swap(Op<2>()); } | |||
1807 | ||||
1808 | /// Return a string if the specified operands are invalid | |||
1809 | /// for a select operation, otherwise return null. | |||
1810 | static const char *areInvalidOperands(Value *Cond, Value *True, Value *False); | |||
1811 | ||||
1812 | /// Transparently provide more efficient getOperand methods. | |||
1813 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1814 | ||||
1815 | OtherOps getOpcode() const { | |||
1816 | return static_cast<OtherOps>(Instruction::getOpcode()); | |||
1817 | } | |||
1818 | ||||
1819 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1820 | static bool classof(const Instruction *I) { | |||
1821 | return I->getOpcode() == Instruction::Select; | |||
1822 | } | |||
1823 | static bool classof(const Value *V) { | |||
1824 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1825 | } | |||
1826 | }; | |||
1827 | ||||
1828 | template <> | |||
1829 | struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> { | |||
1830 | }; | |||
1831 | ||||
1832 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits <SelectInst>::op_begin(this); } SelectInst::const_op_iterator SelectInst::op_begin() const { return OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this)); } SelectInst ::op_iterator SelectInst::op_end() { return OperandTraits< SelectInst>::op_end(this); } SelectInst::const_op_iterator SelectInst::op_end() const { return OperandTraits<SelectInst >::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<SelectInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1832, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<SelectInst >::op_begin(const_cast<SelectInst*>(this))[i_nocapture ].get()); } void SelectInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<SelectInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1832, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<SelectInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned SelectInst::getNumOperands() const { return OperandTraits<SelectInst>::operands(this); } template <int Idx_nocapture> Use &SelectInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &SelectInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
1833 | ||||
1834 | //===----------------------------------------------------------------------===// | |||
1835 | // VAArgInst Class | |||
1836 | //===----------------------------------------------------------------------===// | |||
1837 | ||||
1838 | /// This class represents the va_arg llvm instruction, which returns | |||
1839 | /// an argument of the specified type given a va_list and increments that list | |||
1840 | /// | |||
1841 | class VAArgInst : public UnaryInstruction { | |||
1842 | protected: | |||
1843 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1844 | friend class Instruction; | |||
1845 | ||||
1846 | VAArgInst *cloneImpl() const; | |||
1847 | ||||
1848 | public: | |||
1849 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "", | |||
1850 | Instruction *InsertBefore = nullptr) | |||
1851 | : UnaryInstruction(Ty, VAArg, List, InsertBefore) { | |||
1852 | setName(NameStr); | |||
1853 | } | |||
1854 | ||||
1855 | VAArgInst(Value *List, Type *Ty, const Twine &NameStr, | |||
1856 | BasicBlock *InsertAtEnd) | |||
1857 | : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) { | |||
1858 | setName(NameStr); | |||
1859 | } | |||
1860 | ||||
1861 | Value *getPointerOperand() { return getOperand(0); } | |||
1862 | const Value *getPointerOperand() const { return getOperand(0); } | |||
1863 | static unsigned getPointerOperandIndex() { return 0U; } | |||
1864 | ||||
1865 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1866 | static bool classof(const Instruction *I) { | |||
1867 | return I->getOpcode() == VAArg; | |||
1868 | } | |||
1869 | static bool classof(const Value *V) { | |||
1870 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1871 | } | |||
1872 | }; | |||
1873 | ||||
1874 | //===----------------------------------------------------------------------===// | |||
1875 | // ExtractElementInst Class | |||
1876 | //===----------------------------------------------------------------------===// | |||
1877 | ||||
1878 | /// This instruction extracts a single (scalar) | |||
1879 | /// element from a VectorType value | |||
1880 | /// | |||
1881 | class ExtractElementInst : public Instruction { | |||
1882 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "", | |||
1883 | Instruction *InsertBefore = nullptr); | |||
1884 | ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr, | |||
1885 | BasicBlock *InsertAtEnd); | |||
1886 | ||||
1887 | protected: | |||
1888 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1889 | friend class Instruction; | |||
1890 | ||||
1891 | ExtractElementInst *cloneImpl() const; | |||
1892 | ||||
1893 | public: | |||
1894 | static ExtractElementInst *Create(Value *Vec, Value *Idx, | |||
1895 | const Twine &NameStr = "", | |||
1896 | Instruction *InsertBefore = nullptr) { | |||
1897 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore); | |||
1898 | } | |||
1899 | ||||
1900 | static ExtractElementInst *Create(Value *Vec, Value *Idx, | |||
1901 | const Twine &NameStr, | |||
1902 | BasicBlock *InsertAtEnd) { | |||
1903 | return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd); | |||
1904 | } | |||
1905 | ||||
1906 | /// Return true if an extractelement instruction can be | |||
1907 | /// formed with the specified operands. | |||
1908 | static bool isValidOperands(const Value *Vec, const Value *Idx); | |||
1909 | ||||
1910 | Value *getVectorOperand() { return Op<0>(); } | |||
1911 | Value *getIndexOperand() { return Op<1>(); } | |||
1912 | const Value *getVectorOperand() const { return Op<0>(); } | |||
1913 | const Value *getIndexOperand() const { return Op<1>(); } | |||
1914 | ||||
1915 | VectorType *getVectorOperandType() const { | |||
1916 | return cast<VectorType>(getVectorOperand()->getType()); | |||
1917 | } | |||
1918 | ||||
1919 | /// Transparently provide more efficient getOperand methods. | |||
1920 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1921 | ||||
1922 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1923 | static bool classof(const Instruction *I) { | |||
1924 | return I->getOpcode() == Instruction::ExtractElement; | |||
1925 | } | |||
1926 | static bool classof(const Value *V) { | |||
1927 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1928 | } | |||
1929 | }; | |||
1930 | ||||
1931 | template <> | |||
1932 | struct OperandTraits<ExtractElementInst> : | |||
1933 | public FixedNumOperandTraits<ExtractElementInst, 2> { | |||
1934 | }; | |||
1935 | ||||
1936 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin( ) { return OperandTraits<ExtractElementInst>::op_begin( this); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_begin() const { return OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this)); } ExtractElementInst::op_iterator ExtractElementInst::op_end() { return OperandTraits<ExtractElementInst>::op_end(this ); } ExtractElementInst::const_op_iterator ExtractElementInst ::op_end() const { return OperandTraits<ExtractElementInst >::op_end(const_cast<ExtractElementInst*>(this)); } Value *ExtractElementInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits< ExtractElementInst>::operands(this) && "getOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1936, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ExtractElementInst >::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture ].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1936, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands () const { return OperandTraits<ExtractElementInst>::operands (this); } template <int Idx_nocapture> Use &ExtractElementInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ExtractElementInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
1937 | ||||
1938 | //===----------------------------------------------------------------------===// | |||
1939 | // InsertElementInst Class | |||
1940 | //===----------------------------------------------------------------------===// | |||
1941 | ||||
1942 | /// This instruction inserts a single (scalar) | |||
1943 | /// element into a VectorType value | |||
1944 | /// | |||
1945 | class InsertElementInst : public Instruction { | |||
1946 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, | |||
1947 | const Twine &NameStr = "", | |||
1948 | Instruction *InsertBefore = nullptr); | |||
1949 | InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr, | |||
1950 | BasicBlock *InsertAtEnd); | |||
1951 | ||||
1952 | protected: | |||
1953 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
1954 | friend class Instruction; | |||
1955 | ||||
1956 | InsertElementInst *cloneImpl() const; | |||
1957 | ||||
1958 | public: | |||
1959 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, | |||
1960 | const Twine &NameStr = "", | |||
1961 | Instruction *InsertBefore = nullptr) { | |||
1962 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore); | |||
1963 | } | |||
1964 | ||||
1965 | static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx, | |||
1966 | const Twine &NameStr, | |||
1967 | BasicBlock *InsertAtEnd) { | |||
1968 | return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd); | |||
1969 | } | |||
1970 | ||||
1971 | /// Return true if an insertelement instruction can be | |||
1972 | /// formed with the specified operands. | |||
1973 | static bool isValidOperands(const Value *Vec, const Value *NewElt, | |||
1974 | const Value *Idx); | |||
1975 | ||||
1976 | /// Overload to return most specific vector type. | |||
1977 | /// | |||
1978 | VectorType *getType() const { | |||
1979 | return cast<VectorType>(Instruction::getType()); | |||
1980 | } | |||
1981 | ||||
1982 | /// Transparently provide more efficient getOperand methods. | |||
1983 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
1984 | ||||
1985 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
1986 | static bool classof(const Instruction *I) { | |||
1987 | return I->getOpcode() == Instruction::InsertElement; | |||
1988 | } | |||
1989 | static bool classof(const Value *V) { | |||
1990 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
1991 | } | |||
1992 | }; | |||
1993 | ||||
1994 | template <> | |||
1995 | struct OperandTraits<InsertElementInst> : | |||
1996 | public FixedNumOperandTraits<InsertElementInst, 3> { | |||
1997 | }; | |||
1998 | ||||
1999 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() { return OperandTraits<InsertElementInst>::op_begin(this ); } InsertElementInst::const_op_iterator InsertElementInst:: op_begin() const { return OperandTraits<InsertElementInst> ::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst ::op_iterator InsertElementInst::op_end() { return OperandTraits <InsertElementInst>::op_end(this); } InsertElementInst:: const_op_iterator InsertElementInst::op_end() const { return OperandTraits <InsertElementInst>::op_end(const_cast<InsertElementInst *>(this)); } Value *InsertElementInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<InsertElementInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1999, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<InsertElementInst >::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture ].get()); } void InsertElementInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<InsertElementInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 1999, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned InsertElementInst::getNumOperands () const { return OperandTraits<InsertElementInst>::operands (this); } template <int Idx_nocapture> Use &InsertElementInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertElementInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
2000 | ||||
2001 | //===----------------------------------------------------------------------===// | |||
2002 | // ShuffleVectorInst Class | |||
2003 | //===----------------------------------------------------------------------===// | |||
2004 | ||||
2005 | constexpr int PoisonMaskElem = -1; | |||
2006 | ||||
2007 | /// This instruction constructs a fixed permutation of two | |||
2008 | /// input vectors. | |||
2009 | /// | |||
2010 | /// For each element of the result vector, the shuffle mask selects an element | |||
2011 | /// from one of the input vectors to copy to the result. Non-negative elements | |||
2012 | /// in the mask represent an index into the concatenated pair of input vectors. | |||
2013 | /// PoisonMaskElem (-1) specifies that the result element is poison. | |||
2014 | /// | |||
2015 | /// For scalable vectors, all the elements of the mask must be 0 or -1. This | |||
2016 | /// requirement may be relaxed in the future. | |||
2017 | class ShuffleVectorInst : public Instruction { | |||
2018 | SmallVector<int, 4> ShuffleMask; | |||
2019 | Constant *ShuffleMaskForBitcode; | |||
2020 | ||||
2021 | protected: | |||
2022 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2023 | friend class Instruction; | |||
2024 | ||||
2025 | ShuffleVectorInst *cloneImpl() const; | |||
2026 | ||||
2027 | public: | |||
2028 | ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "", | |||
2029 | Instruction *InsertBefore = nullptr); | |||
2030 | ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr, | |||
2031 | BasicBlock *InsertAtEnd); | |||
2032 | ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "", | |||
2033 | Instruction *InsertBefore = nullptr); | |||
2034 | ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr, | |||
2035 | BasicBlock *InsertAtEnd); | |||
2036 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, | |||
2037 | const Twine &NameStr = "", | |||
2038 | Instruction *InsertBefor = nullptr); | |||
2039 | ShuffleVectorInst(Value *V1, Value *V2, Value *Mask, | |||
2040 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2041 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, | |||
2042 | const Twine &NameStr = "", | |||
2043 | Instruction *InsertBefor = nullptr); | |||
2044 | ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask, | |||
2045 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2046 | ||||
2047 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
2048 | void operator delete(void *Ptr) { return User::operator delete(Ptr); } | |||
2049 | ||||
2050 | /// Swap the operands and adjust the mask to preserve the semantics | |||
2051 | /// of the instruction. | |||
2052 | void commute(); | |||
2053 | ||||
2054 | /// Return true if a shufflevector instruction can be | |||
2055 | /// formed with the specified operands. | |||
2056 | static bool isValidOperands(const Value *V1, const Value *V2, | |||
2057 | const Value *Mask); | |||
2058 | static bool isValidOperands(const Value *V1, const Value *V2, | |||
2059 | ArrayRef<int> Mask); | |||
2060 | ||||
2061 | /// Overload to return most specific vector type. | |||
2062 | /// | |||
2063 | VectorType *getType() const { | |||
2064 | return cast<VectorType>(Instruction::getType()); | |||
2065 | } | |||
2066 | ||||
2067 | /// Transparently provide more efficient getOperand methods. | |||
2068 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2069 | ||||
2070 | /// Return the shuffle mask value of this instruction for the given element | |||
2071 | /// index. Return PoisonMaskElem if the element is undef. | |||
2072 | int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; } | |||
2073 | ||||
2074 | /// Convert the input shuffle mask operand to a vector of integers. Undefined | |||
2075 | /// elements of the mask are returned as PoisonMaskElem. | |||
2076 | static void getShuffleMask(const Constant *Mask, | |||
2077 | SmallVectorImpl<int> &Result); | |||
2078 | ||||
2079 | /// Return the mask for this instruction as a vector of integers. Undefined | |||
2080 | /// elements of the mask are returned as PoisonMaskElem. | |||
2081 | void getShuffleMask(SmallVectorImpl<int> &Result) const { | |||
2082 | Result.assign(ShuffleMask.begin(), ShuffleMask.end()); | |||
2083 | } | |||
2084 | ||||
2085 | /// Return the mask for this instruction, for use in bitcode. | |||
2086 | /// | |||
2087 | /// TODO: This is temporary until we decide a new bitcode encoding for | |||
2088 | /// shufflevector. | |||
2089 | Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; } | |||
2090 | ||||
2091 | static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask, | |||
2092 | Type *ResultTy); | |||
2093 | ||||
2094 | void setShuffleMask(ArrayRef<int> Mask); | |||
2095 | ||||
2096 | ArrayRef<int> getShuffleMask() const { return ShuffleMask; } | |||
2097 | ||||
2098 | /// Return true if this shuffle returns a vector with a different number of | |||
2099 | /// elements than its source vectors. | |||
2100 | /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3> | |||
2101 | /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5> | |||
2102 | bool changesLength() const { | |||
2103 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) | |||
2104 | ->getElementCount() | |||
2105 | .getKnownMinValue(); | |||
2106 | unsigned NumMaskElts = ShuffleMask.size(); | |||
2107 | return NumSourceElts != NumMaskElts; | |||
2108 | } | |||
2109 | ||||
2110 | /// Return true if this shuffle returns a vector with a greater number of | |||
2111 | /// elements than its source vectors. | |||
2112 | /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3> | |||
2113 | bool increasesLength() const { | |||
2114 | unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType()) | |||
2115 | ->getElementCount() | |||
2116 | .getKnownMinValue(); | |||
2117 | unsigned NumMaskElts = ShuffleMask.size(); | |||
2118 | return NumSourceElts < NumMaskElts; | |||
2119 | } | |||
2120 | ||||
2121 | /// Return true if this shuffle mask chooses elements from exactly one source | |||
2122 | /// vector. | |||
2123 | /// Example: <7,5,undef,7> | |||
2124 | /// This assumes that vector operands are the same length as the mask. | |||
2125 | static bool isSingleSourceMask(ArrayRef<int> Mask); | |||
2126 | static bool isSingleSourceMask(const Constant *Mask) { | |||
2127 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2127, __extension__ __PRETTY_FUNCTION__ )); | |||
2128 | SmallVector<int, 16> MaskAsInts; | |||
2129 | getShuffleMask(Mask, MaskAsInts); | |||
2130 | return isSingleSourceMask(MaskAsInts); | |||
2131 | } | |||
2132 | ||||
2133 | /// Return true if this shuffle chooses elements from exactly one source | |||
2134 | /// vector without changing the length of that vector. | |||
2135 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3> | |||
2136 | /// TODO: Optionally allow length-changing shuffles. | |||
2137 | bool isSingleSource() const { | |||
2138 | return !changesLength() && isSingleSourceMask(ShuffleMask); | |||
2139 | } | |||
2140 | ||||
2141 | /// Return true if this shuffle mask chooses elements from exactly one source | |||
2142 | /// vector without lane crossings. A shuffle using this mask is not | |||
2143 | /// necessarily a no-op because it may change the number of elements from its | |||
2144 | /// input vectors or it may provide demanded bits knowledge via undef lanes. | |||
2145 | /// Example: <undef,undef,2,3> | |||
2146 | static bool isIdentityMask(ArrayRef<int> Mask); | |||
2147 | static bool isIdentityMask(const Constant *Mask) { | |||
2148 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2148, __extension__ __PRETTY_FUNCTION__ )); | |||
2149 | ||||
2150 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2151 | // case. | |||
2152 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2153 | return false; | |||
2154 | ||||
2155 | SmallVector<int, 16> MaskAsInts; | |||
2156 | getShuffleMask(Mask, MaskAsInts); | |||
2157 | return isIdentityMask(MaskAsInts); | |||
2158 | } | |||
2159 | ||||
2160 | /// Return true if this shuffle chooses elements from exactly one source | |||
2161 | /// vector without lane crossings and does not change the number of elements | |||
2162 | /// from its input vectors. | |||
2163 | /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef> | |||
2164 | bool isIdentity() const { | |||
2165 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2166 | // case. | |||
2167 | if (isa<ScalableVectorType>(getType())) | |||
2168 | return false; | |||
2169 | ||||
2170 | return !changesLength() && isIdentityMask(ShuffleMask); | |||
2171 | } | |||
2172 | ||||
2173 | /// Return true if this shuffle lengthens exactly one source vector with | |||
2174 | /// undefs in the high elements. | |||
2175 | bool isIdentityWithPadding() const; | |||
2176 | ||||
2177 | /// Return true if this shuffle extracts the first N elements of exactly one | |||
2178 | /// source vector. | |||
2179 | bool isIdentityWithExtract() const; | |||
2180 | ||||
2181 | /// Return true if this shuffle concatenates its 2 source vectors. This | |||
2182 | /// returns false if either input is undefined. In that case, the shuffle is | |||
2183 | /// is better classified as an identity with padding operation. | |||
2184 | bool isConcat() const; | |||
2185 | ||||
2186 | /// Return true if this shuffle mask chooses elements from its source vectors | |||
2187 | /// without lane crossings. A shuffle using this mask would be | |||
2188 | /// equivalent to a vector select with a constant condition operand. | |||
2189 | /// Example: <4,1,6,undef> | |||
2190 | /// This returns false if the mask does not choose from both input vectors. | |||
2191 | /// In that case, the shuffle is better classified as an identity shuffle. | |||
2192 | /// This assumes that vector operands are the same length as the mask | |||
2193 | /// (a length-changing shuffle can never be equivalent to a vector select). | |||
2194 | static bool isSelectMask(ArrayRef<int> Mask); | |||
2195 | static bool isSelectMask(const Constant *Mask) { | |||
2196 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2196, __extension__ __PRETTY_FUNCTION__ )); | |||
2197 | SmallVector<int, 16> MaskAsInts; | |||
2198 | getShuffleMask(Mask, MaskAsInts); | |||
2199 | return isSelectMask(MaskAsInts); | |||
2200 | } | |||
2201 | ||||
2202 | /// Return true if this shuffle chooses elements from its source vectors | |||
2203 | /// without lane crossings and all operands have the same number of elements. | |||
2204 | /// In other words, this shuffle is equivalent to a vector select with a | |||
2205 | /// constant condition operand. | |||
2206 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3> | |||
2207 | /// This returns false if the mask does not choose from both input vectors. | |||
2208 | /// In that case, the shuffle is better classified as an identity shuffle. | |||
2209 | /// TODO: Optionally allow length-changing shuffles. | |||
2210 | bool isSelect() const { | |||
2211 | return !changesLength() && isSelectMask(ShuffleMask); | |||
2212 | } | |||
2213 | ||||
2214 | /// Return true if this shuffle mask swaps the order of elements from exactly | |||
2215 | /// one source vector. | |||
2216 | /// Example: <7,6,undef,4> | |||
2217 | /// This assumes that vector operands are the same length as the mask. | |||
2218 | static bool isReverseMask(ArrayRef<int> Mask); | |||
2219 | static bool isReverseMask(const Constant *Mask) { | |||
2220 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2220, __extension__ __PRETTY_FUNCTION__ )); | |||
2221 | SmallVector<int, 16> MaskAsInts; | |||
2222 | getShuffleMask(Mask, MaskAsInts); | |||
2223 | return isReverseMask(MaskAsInts); | |||
2224 | } | |||
2225 | ||||
2226 | /// Return true if this shuffle swaps the order of elements from exactly | |||
2227 | /// one source vector. | |||
2228 | /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef> | |||
2229 | /// TODO: Optionally allow length-changing shuffles. | |||
2230 | bool isReverse() const { | |||
2231 | return !changesLength() && isReverseMask(ShuffleMask); | |||
2232 | } | |||
2233 | ||||
2234 | /// Return true if this shuffle mask chooses all elements with the same value | |||
2235 | /// as the first element of exactly one source vector. | |||
2236 | /// Example: <4,undef,undef,4> | |||
2237 | /// This assumes that vector operands are the same length as the mask. | |||
2238 | static bool isZeroEltSplatMask(ArrayRef<int> Mask); | |||
2239 | static bool isZeroEltSplatMask(const Constant *Mask) { | |||
2240 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2240, __extension__ __PRETTY_FUNCTION__ )); | |||
2241 | SmallVector<int, 16> MaskAsInts; | |||
2242 | getShuffleMask(Mask, MaskAsInts); | |||
2243 | return isZeroEltSplatMask(MaskAsInts); | |||
2244 | } | |||
2245 | ||||
2246 | /// Return true if all elements of this shuffle are the same value as the | |||
2247 | /// first element of exactly one source vector without changing the length | |||
2248 | /// of that vector. | |||
2249 | /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0> | |||
2250 | /// TODO: Optionally allow length-changing shuffles. | |||
2251 | /// TODO: Optionally allow splats from other elements. | |||
2252 | bool isZeroEltSplat() const { | |||
2253 | return !changesLength() && isZeroEltSplatMask(ShuffleMask); | |||
2254 | } | |||
2255 | ||||
2256 | /// Return true if this shuffle mask is a transpose mask. | |||
2257 | /// Transpose vector masks transpose a 2xn matrix. They read corresponding | |||
2258 | /// even- or odd-numbered vector elements from two n-dimensional source | |||
2259 | /// vectors and write each result into consecutive elements of an | |||
2260 | /// n-dimensional destination vector. Two shuffles are necessary to complete | |||
2261 | /// the transpose, one for the even elements and another for the odd elements. | |||
2262 | /// This description closely follows how the TRN1 and TRN2 AArch64 | |||
2263 | /// instructions operate. | |||
2264 | /// | |||
2265 | /// For example, a simple 2x2 matrix can be transposed with: | |||
2266 | /// | |||
2267 | /// ; Original matrix | |||
2268 | /// m0 = < a, b > | |||
2269 | /// m1 = < c, d > | |||
2270 | /// | |||
2271 | /// ; Transposed matrix | |||
2272 | /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 > | |||
2273 | /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 > | |||
2274 | /// | |||
2275 | /// For matrices having greater than n columns, the resulting nx2 transposed | |||
2276 | /// matrix is stored in two result vectors such that one vector contains | |||
2277 | /// interleaved elements from all the even-numbered rows and the other vector | |||
2278 | /// contains interleaved elements from all the odd-numbered rows. For example, | |||
2279 | /// a 2x4 matrix can be transposed with: | |||
2280 | /// | |||
2281 | /// ; Original matrix | |||
2282 | /// m0 = < a, b, c, d > | |||
2283 | /// m1 = < e, f, g, h > | |||
2284 | /// | |||
2285 | /// ; Transposed matrix | |||
2286 | /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 > | |||
2287 | /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 > | |||
2288 | static bool isTransposeMask(ArrayRef<int> Mask); | |||
2289 | static bool isTransposeMask(const Constant *Mask) { | |||
2290 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2290, __extension__ __PRETTY_FUNCTION__ )); | |||
2291 | SmallVector<int, 16> MaskAsInts; | |||
2292 | getShuffleMask(Mask, MaskAsInts); | |||
2293 | return isTransposeMask(MaskAsInts); | |||
2294 | } | |||
2295 | ||||
2296 | /// Return true if this shuffle transposes the elements of its inputs without | |||
2297 | /// changing the length of the vectors. This operation may also be known as a | |||
2298 | /// merge or interleave. See the description for isTransposeMask() for the | |||
2299 | /// exact specification. | |||
2300 | /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6> | |||
2301 | bool isTranspose() const { | |||
2302 | return !changesLength() && isTransposeMask(ShuffleMask); | |||
2303 | } | |||
2304 | ||||
2305 | /// Return true if this shuffle mask is a splice mask, concatenating the two | |||
2306 | /// inputs together and then extracts an original width vector starting from | |||
2307 | /// the splice index. | |||
2308 | /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> | |||
2309 | static bool isSpliceMask(ArrayRef<int> Mask, int &Index); | |||
2310 | static bool isSpliceMask(const Constant *Mask, int &Index) { | |||
2311 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2311, __extension__ __PRETTY_FUNCTION__ )); | |||
2312 | SmallVector<int, 16> MaskAsInts; | |||
2313 | getShuffleMask(Mask, MaskAsInts); | |||
2314 | return isSpliceMask(MaskAsInts, Index); | |||
2315 | } | |||
2316 | ||||
2317 | /// Return true if this shuffle splices two inputs without changing the length | |||
2318 | /// of the vectors. This operation concatenates the two inputs together and | |||
2319 | /// then extracts an original width vector starting from the splice index. | |||
2320 | /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4> | |||
2321 | bool isSplice(int &Index) const { | |||
2322 | return !changesLength() && isSpliceMask(ShuffleMask, Index); | |||
2323 | } | |||
2324 | ||||
2325 | /// Return true if this shuffle mask is an extract subvector mask. | |||
2326 | /// A valid extract subvector mask returns a smaller vector from a single | |||
2327 | /// source operand. The base extraction index is returned as well. | |||
2328 | static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, | |||
2329 | int &Index); | |||
2330 | static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts, | |||
2331 | int &Index) { | |||
2332 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2332, __extension__ __PRETTY_FUNCTION__ )); | |||
2333 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2334 | // case. | |||
2335 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2336 | return false; | |||
2337 | SmallVector<int, 16> MaskAsInts; | |||
2338 | getShuffleMask(Mask, MaskAsInts); | |||
2339 | return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index); | |||
2340 | } | |||
2341 | ||||
2342 | /// Return true if this shuffle mask is an extract subvector mask. | |||
2343 | bool isExtractSubvectorMask(int &Index) const { | |||
2344 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2345 | // case. | |||
2346 | if (isa<ScalableVectorType>(getType())) | |||
2347 | return false; | |||
2348 | ||||
2349 | int NumSrcElts = | |||
2350 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); | |||
2351 | return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index); | |||
2352 | } | |||
2353 | ||||
2354 | /// Return true if this shuffle mask is an insert subvector mask. | |||
2355 | /// A valid insert subvector mask inserts the lowest elements of a second | |||
2356 | /// source operand into an in-place first source operand operand. | |||
2357 | /// Both the sub vector width and the insertion index is returned. | |||
2358 | static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts, | |||
2359 | int &NumSubElts, int &Index); | |||
2360 | static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts, | |||
2361 | int &NumSubElts, int &Index) { | |||
2362 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2362, __extension__ __PRETTY_FUNCTION__ )); | |||
2363 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2364 | // case. | |||
2365 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2366 | return false; | |||
2367 | SmallVector<int, 16> MaskAsInts; | |||
2368 | getShuffleMask(Mask, MaskAsInts); | |||
2369 | return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index); | |||
2370 | } | |||
2371 | ||||
2372 | /// Return true if this shuffle mask is an insert subvector mask. | |||
2373 | bool isInsertSubvectorMask(int &NumSubElts, int &Index) const { | |||
2374 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2375 | // case. | |||
2376 | if (isa<ScalableVectorType>(getType())) | |||
2377 | return false; | |||
2378 | ||||
2379 | int NumSrcElts = | |||
2380 | cast<FixedVectorType>(Op<0>()->getType())->getNumElements(); | |||
2381 | return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index); | |||
2382 | } | |||
2383 | ||||
2384 | /// Return true if this shuffle mask replicates each of the \p VF elements | |||
2385 | /// in a vector \p ReplicationFactor times. | |||
2386 | /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is: | |||
2387 | /// <0,0,0,1,1,1,2,2,2,3,3,3> | |||
2388 | static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor, | |||
2389 | int &VF); | |||
2390 | static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor, | |||
2391 | int &VF) { | |||
2392 | assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy( ) && "Shuffle needs vector constant.") ? void (0) : __assert_fail ("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\"" , "llvm/include/llvm/IR/Instructions.h", 2392, __extension__ __PRETTY_FUNCTION__ )); | |||
2393 | // Not possible to express a shuffle mask for a scalable vector for this | |||
2394 | // case. | |||
2395 | if (isa<ScalableVectorType>(Mask->getType())) | |||
2396 | return false; | |||
2397 | SmallVector<int, 16> MaskAsInts; | |||
2398 | getShuffleMask(Mask, MaskAsInts); | |||
2399 | return isReplicationMask(MaskAsInts, ReplicationFactor, VF); | |||
2400 | } | |||
2401 | ||||
2402 | /// Return true if this shuffle mask is a replication mask. | |||
2403 | bool isReplicationMask(int &ReplicationFactor, int &VF) const; | |||
2404 | ||||
2405 | /// Return true if this shuffle mask represents "clustered" mask of size VF, | |||
2406 | /// i.e. each index between [0..VF) is used exactly once in each submask of | |||
2407 | /// size VF. | |||
2408 | /// For example, the mask for \p VF=4 is: | |||
2409 | /// 0, 1, 2, 3, 3, 2, 0, 1 - "clustered", because each submask of size 4 | |||
2410 | /// (0,1,2,3 and 3,2,0,1) uses indices [0..VF) exactly one time. | |||
2411 | /// 0, 1, 2, 3, 3, 3, 1, 0 - not "clustered", because | |||
2412 | /// element 3 is used twice in the second submask | |||
2413 | /// (3,3,1,0) and index 2 is not used at all. | |||
2414 | static bool isOneUseSingleSourceMask(ArrayRef<int> Mask, int VF); | |||
2415 | ||||
2416 | /// Return true if this shuffle mask is a one-use-single-source("clustered") | |||
2417 | /// mask. | |||
2418 | bool isOneUseSingleSourceMask(int VF) const; | |||
2419 | ||||
2420 | /// Change values in a shuffle permute mask assuming the two vector operands | |||
2421 | /// of length InVecNumElts have swapped position. | |||
2422 | static void commuteShuffleMask(MutableArrayRef<int> Mask, | |||
2423 | unsigned InVecNumElts) { | |||
2424 | for (int &Idx : Mask) { | |||
2425 | if (Idx == -1) | |||
2426 | continue; | |||
2427 | Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts; | |||
2428 | assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int )InVecNumElts * 2 && "shufflevector mask index out of range" ) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "llvm/include/llvm/IR/Instructions.h", 2429, __extension__ __PRETTY_FUNCTION__ )) | |||
2429 | "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int )InVecNumElts * 2 && "shufflevector mask index out of range" ) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\"" , "llvm/include/llvm/IR/Instructions.h", 2429, __extension__ __PRETTY_FUNCTION__ )); | |||
2430 | } | |||
2431 | } | |||
2432 | ||||
2433 | /// Return if this shuffle interleaves its two input vectors together. | |||
2434 | bool isInterleave(unsigned Factor); | |||
2435 | ||||
2436 | /// Return true if the mask interleaves one or more input vectors together. | |||
2437 | /// | |||
2438 | /// I.e. <0, LaneLen, ... , LaneLen*(Factor - 1), 1, LaneLen + 1, ...> | |||
2439 | /// E.g. For a Factor of 2 (LaneLen=4): | |||
2440 | /// <0, 4, 1, 5, 2, 6, 3, 7> | |||
2441 | /// E.g. For a Factor of 3 (LaneLen=4): | |||
2442 | /// <4, 0, 9, 5, 1, 10, 6, 2, 11, 7, 3, 12> | |||
2443 | /// E.g. For a Factor of 4 (LaneLen=2): | |||
2444 | /// <0, 2, 6, 4, 1, 3, 7, 5> | |||
2445 | /// | |||
2446 | /// NumInputElts is the total number of elements in the input vectors. | |||
2447 | /// | |||
2448 | /// StartIndexes are the first indexes of each vector being interleaved, | |||
2449 | /// substituting any indexes that were undef | |||
2450 | /// E.g. <4, -1, 2, 5, 1, 3> (Factor=3): StartIndexes=<4, 0, 2> | |||
2451 | /// | |||
2452 | /// Note that this does not check if the input vectors are consecutive: | |||
2453 | /// It will return true for masks such as | |||
2454 | /// <0, 4, 6, 1, 5, 7> (Factor=3, LaneLen=2) | |||
2455 | static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, | |||
2456 | unsigned NumInputElts, | |||
2457 | SmallVectorImpl<unsigned> &StartIndexes); | |||
2458 | static bool isInterleaveMask(ArrayRef<int> Mask, unsigned Factor, | |||
2459 | unsigned NumInputElts) { | |||
2460 | SmallVector<unsigned, 8> StartIndexes; | |||
2461 | return isInterleaveMask(Mask, Factor, NumInputElts, StartIndexes); | |||
2462 | } | |||
2463 | ||||
2464 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2465 | static bool classof(const Instruction *I) { | |||
2466 | return I->getOpcode() == Instruction::ShuffleVector; | |||
2467 | } | |||
2468 | static bool classof(const Value *V) { | |||
2469 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2470 | } | |||
2471 | }; | |||
2472 | ||||
2473 | template <> | |||
2474 | struct OperandTraits<ShuffleVectorInst> | |||
2475 | : public FixedNumOperandTraits<ShuffleVectorInst, 2> {}; | |||
2476 | ||||
2477 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() { return OperandTraits<ShuffleVectorInst>::op_begin(this ); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst:: op_begin() const { return OperandTraits<ShuffleVectorInst> ::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst ::op_iterator ShuffleVectorInst::op_end() { return OperandTraits <ShuffleVectorInst>::op_end(this); } ShuffleVectorInst:: const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits <ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst *>(this)); } Value *ShuffleVectorInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2477, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst >::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture ].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2477, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands () const { return OperandTraits<ShuffleVectorInst>::operands (this); } template <int Idx_nocapture> Use &ShuffleVectorInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ShuffleVectorInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
2478 | ||||
2479 | //===----------------------------------------------------------------------===// | |||
2480 | // ExtractValueInst Class | |||
2481 | //===----------------------------------------------------------------------===// | |||
2482 | ||||
2483 | /// This instruction extracts a struct member or array | |||
2484 | /// element value from an aggregate value. | |||
2485 | /// | |||
2486 | class ExtractValueInst : public UnaryInstruction { | |||
2487 | SmallVector<unsigned, 4> Indices; | |||
2488 | ||||
2489 | ExtractValueInst(const ExtractValueInst &EVI); | |||
2490 | ||||
2491 | /// Constructors - Create a extractvalue instruction with a base aggregate | |||
2492 | /// value and a list of indices. The first ctor can optionally insert before | |||
2493 | /// an existing instruction, the second appends the new instruction to the | |||
2494 | /// specified BasicBlock. | |||
2495 | inline ExtractValueInst(Value *Agg, | |||
2496 | ArrayRef<unsigned> Idxs, | |||
2497 | const Twine &NameStr, | |||
2498 | Instruction *InsertBefore); | |||
2499 | inline ExtractValueInst(Value *Agg, | |||
2500 | ArrayRef<unsigned> Idxs, | |||
2501 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2502 | ||||
2503 | void init(ArrayRef<unsigned> Idxs, const Twine &NameStr); | |||
2504 | ||||
2505 | protected: | |||
2506 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2507 | friend class Instruction; | |||
2508 | ||||
2509 | ExtractValueInst *cloneImpl() const; | |||
2510 | ||||
2511 | public: | |||
2512 | static ExtractValueInst *Create(Value *Agg, | |||
2513 | ArrayRef<unsigned> Idxs, | |||
2514 | const Twine &NameStr = "", | |||
2515 | Instruction *InsertBefore = nullptr) { | |||
2516 | return new | |||
2517 | ExtractValueInst(Agg, Idxs, NameStr, InsertBefore); | |||
2518 | } | |||
2519 | ||||
2520 | static ExtractValueInst *Create(Value *Agg, | |||
2521 | ArrayRef<unsigned> Idxs, | |||
2522 | const Twine &NameStr, | |||
2523 | BasicBlock *InsertAtEnd) { | |||
2524 | return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd); | |||
2525 | } | |||
2526 | ||||
2527 | /// Returns the type of the element that would be extracted | |||
2528 | /// with an extractvalue instruction with the specified parameters. | |||
2529 | /// | |||
2530 | /// Null is returned if the indices are invalid for the specified type. | |||
2531 | static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs); | |||
2532 | ||||
2533 | using idx_iterator = const unsigned*; | |||
2534 | ||||
2535 | inline idx_iterator idx_begin() const { return Indices.begin(); } | |||
2536 | inline idx_iterator idx_end() const { return Indices.end(); } | |||
2537 | inline iterator_range<idx_iterator> indices() const { | |||
2538 | return make_range(idx_begin(), idx_end()); | |||
2539 | } | |||
2540 | ||||
2541 | Value *getAggregateOperand() { | |||
2542 | return getOperand(0); | |||
2543 | } | |||
2544 | const Value *getAggregateOperand() const { | |||
2545 | return getOperand(0); | |||
2546 | } | |||
2547 | static unsigned getAggregateOperandIndex() { | |||
2548 | return 0U; // get index for modifying correct operand | |||
2549 | } | |||
2550 | ||||
2551 | ArrayRef<unsigned> getIndices() const { | |||
2552 | return Indices; | |||
2553 | } | |||
2554 | ||||
2555 | unsigned getNumIndices() const { | |||
2556 | return (unsigned)Indices.size(); | |||
2557 | } | |||
2558 | ||||
2559 | bool hasIndices() const { | |||
2560 | return true; | |||
2561 | } | |||
2562 | ||||
2563 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2564 | static bool classof(const Instruction *I) { | |||
2565 | return I->getOpcode() == Instruction::ExtractValue; | |||
2566 | } | |||
2567 | static bool classof(const Value *V) { | |||
2568 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2569 | } | |||
2570 | }; | |||
2571 | ||||
2572 | ExtractValueInst::ExtractValueInst(Value *Agg, | |||
2573 | ArrayRef<unsigned> Idxs, | |||
2574 | const Twine &NameStr, | |||
2575 | Instruction *InsertBefore) | |||
2576 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), | |||
2577 | ExtractValue, Agg, InsertBefore) { | |||
2578 | init(Idxs, NameStr); | |||
2579 | } | |||
2580 | ||||
2581 | ExtractValueInst::ExtractValueInst(Value *Agg, | |||
2582 | ArrayRef<unsigned> Idxs, | |||
2583 | const Twine &NameStr, | |||
2584 | BasicBlock *InsertAtEnd) | |||
2585 | : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)), | |||
2586 | ExtractValue, Agg, InsertAtEnd) { | |||
2587 | init(Idxs, NameStr); | |||
2588 | } | |||
2589 | ||||
2590 | //===----------------------------------------------------------------------===// | |||
2591 | // InsertValueInst Class | |||
2592 | //===----------------------------------------------------------------------===// | |||
2593 | ||||
2594 | /// This instruction inserts a struct field of array element | |||
2595 | /// value into an aggregate value. | |||
2596 | /// | |||
2597 | class InsertValueInst : public Instruction { | |||
2598 | SmallVector<unsigned, 4> Indices; | |||
2599 | ||||
2600 | InsertValueInst(const InsertValueInst &IVI); | |||
2601 | ||||
2602 | /// Constructors - Create a insertvalue instruction with a base aggregate | |||
2603 | /// value, a value to insert, and a list of indices. The first ctor can | |||
2604 | /// optionally insert before an existing instruction, the second appends | |||
2605 | /// the new instruction to the specified BasicBlock. | |||
2606 | inline InsertValueInst(Value *Agg, Value *Val, | |||
2607 | ArrayRef<unsigned> Idxs, | |||
2608 | const Twine &NameStr, | |||
2609 | Instruction *InsertBefore); | |||
2610 | inline InsertValueInst(Value *Agg, Value *Val, | |||
2611 | ArrayRef<unsigned> Idxs, | |||
2612 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
2613 | ||||
2614 | /// Constructors - These two constructors are convenience methods because one | |||
2615 | /// and two index insertvalue instructions are so common. | |||
2616 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, | |||
2617 | const Twine &NameStr = "", | |||
2618 | Instruction *InsertBefore = nullptr); | |||
2619 | InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr, | |||
2620 | BasicBlock *InsertAtEnd); | |||
2621 | ||||
2622 | void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, | |||
2623 | const Twine &NameStr); | |||
2624 | ||||
2625 | protected: | |||
2626 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2627 | friend class Instruction; | |||
2628 | ||||
2629 | InsertValueInst *cloneImpl() const; | |||
2630 | ||||
2631 | public: | |||
2632 | // allocate space for exactly two operands | |||
2633 | void *operator new(size_t S) { return User::operator new(S, 2); } | |||
2634 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
2635 | ||||
2636 | static InsertValueInst *Create(Value *Agg, Value *Val, | |||
2637 | ArrayRef<unsigned> Idxs, | |||
2638 | const Twine &NameStr = "", | |||
2639 | Instruction *InsertBefore = nullptr) { | |||
2640 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertBefore); | |||
2641 | } | |||
2642 | ||||
2643 | static InsertValueInst *Create(Value *Agg, Value *Val, | |||
2644 | ArrayRef<unsigned> Idxs, | |||
2645 | const Twine &NameStr, | |||
2646 | BasicBlock *InsertAtEnd) { | |||
2647 | return new InsertValueInst(Agg, Val, Idxs, NameStr, InsertAtEnd); | |||
2648 | } | |||
2649 | ||||
2650 | /// Transparently provide more efficient getOperand methods. | |||
2651 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2652 | ||||
2653 | using idx_iterator = const unsigned*; | |||
2654 | ||||
2655 | inline idx_iterator idx_begin() const { return Indices.begin(); } | |||
2656 | inline idx_iterator idx_end() const { return Indices.end(); } | |||
2657 | inline iterator_range<idx_iterator> indices() const { | |||
2658 | return make_range(idx_begin(), idx_end()); | |||
2659 | } | |||
2660 | ||||
2661 | Value *getAggregateOperand() { | |||
2662 | return getOperand(0); | |||
2663 | } | |||
2664 | const Value *getAggregateOperand() const { | |||
2665 | return getOperand(0); | |||
2666 | } | |||
2667 | static unsigned getAggregateOperandIndex() { | |||
2668 | return 0U; // get index for modifying correct operand | |||
2669 | } | |||
2670 | ||||
2671 | Value *getInsertedValueOperand() { | |||
2672 | return getOperand(1); | |||
2673 | } | |||
2674 | const Value *getInsertedValueOperand() const { | |||
2675 | return getOperand(1); | |||
2676 | } | |||
2677 | static unsigned getInsertedValueOperandIndex() { | |||
2678 | return 1U; // get index for modifying correct operand | |||
2679 | } | |||
2680 | ||||
2681 | ArrayRef<unsigned> getIndices() const { | |||
2682 | return Indices; | |||
2683 | } | |||
2684 | ||||
2685 | unsigned getNumIndices() const { | |||
2686 | return (unsigned)Indices.size(); | |||
2687 | } | |||
2688 | ||||
2689 | bool hasIndices() const { | |||
2690 | return true; | |||
2691 | } | |||
2692 | ||||
2693 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2694 | static bool classof(const Instruction *I) { | |||
2695 | return I->getOpcode() == Instruction::InsertValue; | |||
2696 | } | |||
2697 | static bool classof(const Value *V) { | |||
2698 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2699 | } | |||
2700 | }; | |||
2701 | ||||
2702 | template <> | |||
2703 | struct OperandTraits<InsertValueInst> : | |||
2704 | public FixedNumOperandTraits<InsertValueInst, 2> { | |||
2705 | }; | |||
2706 | ||||
2707 | InsertValueInst::InsertValueInst(Value *Agg, | |||
2708 | Value *Val, | |||
2709 | ArrayRef<unsigned> Idxs, | |||
2710 | const Twine &NameStr, | |||
2711 | Instruction *InsertBefore) | |||
2712 | : Instruction(Agg->getType(), InsertValue, | |||
2713 | OperandTraits<InsertValueInst>::op_begin(this), | |||
2714 | 2, InsertBefore) { | |||
2715 | init(Agg, Val, Idxs, NameStr); | |||
2716 | } | |||
2717 | ||||
2718 | InsertValueInst::InsertValueInst(Value *Agg, | |||
2719 | Value *Val, | |||
2720 | ArrayRef<unsigned> Idxs, | |||
2721 | const Twine &NameStr, | |||
2722 | BasicBlock *InsertAtEnd) | |||
2723 | : Instruction(Agg->getType(), InsertValue, | |||
2724 | OperandTraits<InsertValueInst>::op_begin(this), | |||
2725 | 2, InsertAtEnd) { | |||
2726 | init(Agg, Val, Idxs, NameStr); | |||
2727 | } | |||
2728 | ||||
2729 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertValueInst, Value)InsertValueInst::op_iterator InsertValueInst::op_begin() { return OperandTraits<InsertValueInst>::op_begin(this); } InsertValueInst ::const_op_iterator InsertValueInst::op_begin() const { return OperandTraits<InsertValueInst>::op_begin(const_cast< InsertValueInst*>(this)); } InsertValueInst::op_iterator InsertValueInst ::op_end() { return OperandTraits<InsertValueInst>::op_end (this); } InsertValueInst::const_op_iterator InsertValueInst:: op_end() const { return OperandTraits<InsertValueInst>:: op_end(const_cast<InsertValueInst*>(this)); } Value *InsertValueInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<InsertValueInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2729, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<InsertValueInst >::op_begin(const_cast<InsertValueInst*>(this))[i_nocapture ].get()); } void InsertValueInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<InsertValueInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertValueInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2729, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<InsertValueInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned InsertValueInst::getNumOperands () const { return OperandTraits<InsertValueInst>::operands (this); } template <int Idx_nocapture> Use &InsertValueInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &InsertValueInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
2730 | ||||
2731 | //===----------------------------------------------------------------------===// | |||
2732 | // PHINode Class | |||
2733 | //===----------------------------------------------------------------------===// | |||
2734 | ||||
2735 | // PHINode - The PHINode class is used to represent the magical mystical PHI | |||
2736 | // node, that can not exist in nature, but can be synthesized in a computer | |||
2737 | // scientist's overactive imagination. | |||
2738 | // | |||
2739 | class PHINode : public Instruction { | |||
2740 | /// The number of operands actually allocated. NumOperands is | |||
2741 | /// the number actually in use. | |||
2742 | unsigned ReservedSpace; | |||
2743 | ||||
2744 | PHINode(const PHINode &PN); | |||
2745 | ||||
2746 | explicit PHINode(Type *Ty, unsigned NumReservedValues, | |||
2747 | const Twine &NameStr = "", | |||
2748 | Instruction *InsertBefore = nullptr) | |||
2749 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertBefore), | |||
2750 | ReservedSpace(NumReservedValues) { | |||
2751 | assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!" ) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\"" , "llvm/include/llvm/IR/Instructions.h", 2751, __extension__ __PRETTY_FUNCTION__ )); | |||
2752 | setName(NameStr); | |||
2753 | allocHungoffUses(ReservedSpace); | |||
2754 | } | |||
2755 | ||||
2756 | PHINode(Type *Ty, unsigned NumReservedValues, const Twine &NameStr, | |||
2757 | BasicBlock *InsertAtEnd) | |||
2758 | : Instruction(Ty, Instruction::PHI, nullptr, 0, InsertAtEnd), | |||
2759 | ReservedSpace(NumReservedValues) { | |||
2760 | assert(!Ty->isTokenTy() && "PHI nodes cannot have token type!")(static_cast <bool> (!Ty->isTokenTy() && "PHI nodes cannot have token type!" ) ? void (0) : __assert_fail ("!Ty->isTokenTy() && \"PHI nodes cannot have token type!\"" , "llvm/include/llvm/IR/Instructions.h", 2760, __extension__ __PRETTY_FUNCTION__ )); | |||
2761 | setName(NameStr); | |||
2762 | allocHungoffUses(ReservedSpace); | |||
2763 | } | |||
2764 | ||||
2765 | protected: | |||
2766 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
2767 | friend class Instruction; | |||
2768 | ||||
2769 | PHINode *cloneImpl() const; | |||
2770 | ||||
2771 | // allocHungoffUses - this is more complicated than the generic | |||
2772 | // User::allocHungoffUses, because we have to allocate Uses for the incoming | |||
2773 | // values and pointers to the incoming blocks, all in one allocation. | |||
2774 | void allocHungoffUses(unsigned N) { | |||
2775 | User::allocHungoffUses(N, /* IsPhi */ true); | |||
2776 | } | |||
2777 | ||||
2778 | public: | |||
2779 | /// Constructors - NumReservedValues is a hint for the number of incoming | |||
2780 | /// edges that this phi node will have (use 0 if you really have no idea). | |||
2781 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, | |||
2782 | const Twine &NameStr = "", | |||
2783 | Instruction *InsertBefore = nullptr) { | |||
2784 | return new PHINode(Ty, NumReservedValues, NameStr, InsertBefore); | |||
2785 | } | |||
2786 | ||||
2787 | static PHINode *Create(Type *Ty, unsigned NumReservedValues, | |||
2788 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
2789 | return new PHINode(Ty, NumReservedValues, NameStr, InsertAtEnd); | |||
2790 | } | |||
2791 | ||||
2792 | /// Provide fast operand accessors | |||
2793 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
2794 | ||||
2795 | // Block iterator interface. This provides access to the list of incoming | |||
2796 | // basic blocks, which parallels the list of incoming values. | |||
2797 | // Please note that we are not providing non-const iterators for blocks to | |||
2798 | // force all updates go through an interface function. | |||
2799 | ||||
2800 | using block_iterator = BasicBlock **; | |||
2801 | using const_block_iterator = BasicBlock * const *; | |||
2802 | ||||
2803 | const_block_iterator block_begin() const { | |||
2804 | return reinterpret_cast<const_block_iterator>(op_begin() + ReservedSpace); | |||
2805 | } | |||
2806 | ||||
2807 | const_block_iterator block_end() const { | |||
2808 | return block_begin() + getNumOperands(); | |||
2809 | } | |||
2810 | ||||
2811 | iterator_range<const_block_iterator> blocks() const { | |||
2812 | return make_range(block_begin(), block_end()); | |||
2813 | } | |||
2814 | ||||
2815 | op_range incoming_values() { return operands(); } | |||
2816 | ||||
2817 | const_op_range incoming_values() const { return operands(); } | |||
2818 | ||||
2819 | /// Return the number of incoming edges | |||
2820 | /// | |||
2821 | unsigned getNumIncomingValues() const { return getNumOperands(); } | |||
2822 | ||||
2823 | /// Return incoming value number x | |||
2824 | /// | |||
2825 | Value *getIncomingValue(unsigned i) const { | |||
2826 | return getOperand(i); | |||
2827 | } | |||
2828 | void setIncomingValue(unsigned i, Value *V) { | |||
2829 | assert(V && "PHI node got a null value!")(static_cast <bool> (V && "PHI node got a null value!" ) ? void (0) : __assert_fail ("V && \"PHI node got a null value!\"" , "llvm/include/llvm/IR/Instructions.h", 2829, __extension__ __PRETTY_FUNCTION__ )); | |||
2830 | assert(getType() == V->getType() &&(static_cast <bool> (getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "llvm/include/llvm/IR/Instructions.h", 2831, __extension__ __PRETTY_FUNCTION__ )) | |||
2831 | "All operands to PHI node must be the same type as the PHI node!")(static_cast <bool> (getType() == V->getType() && "All operands to PHI node must be the same type as the PHI node!" ) ? void (0) : __assert_fail ("getType() == V->getType() && \"All operands to PHI node must be the same type as the PHI node!\"" , "llvm/include/llvm/IR/Instructions.h", 2831, __extension__ __PRETTY_FUNCTION__ )); | |||
2832 | setOperand(i, V); | |||
2833 | } | |||
2834 | ||||
2835 | static unsigned getOperandNumForIncomingValue(unsigned i) { | |||
2836 | return i; | |||
2837 | } | |||
2838 | ||||
2839 | static unsigned getIncomingValueNumForOperand(unsigned i) { | |||
2840 | return i; | |||
2841 | } | |||
2842 | ||||
2843 | /// Return incoming basic block number @p i. | |||
2844 | /// | |||
2845 | BasicBlock *getIncomingBlock(unsigned i) const { | |||
2846 | return block_begin()[i]; | |||
2847 | } | |||
2848 | ||||
2849 | /// Return incoming basic block corresponding | |||
2850 | /// to an operand of the PHI. | |||
2851 | /// | |||
2852 | BasicBlock *getIncomingBlock(const Use &U) const { | |||
2853 | assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?")(static_cast <bool> (this == U.getUser() && "Iterator doesn't point to PHI's Uses?" ) ? void (0) : __assert_fail ("this == U.getUser() && \"Iterator doesn't point to PHI's Uses?\"" , "llvm/include/llvm/IR/Instructions.h", 2853, __extension__ __PRETTY_FUNCTION__ )); | |||
2854 | return getIncomingBlock(unsigned(&U - op_begin())); | |||
2855 | } | |||
2856 | ||||
2857 | /// Return incoming basic block corresponding | |||
2858 | /// to value use iterator. | |||
2859 | /// | |||
2860 | BasicBlock *getIncomingBlock(Value::const_user_iterator I) const { | |||
2861 | return getIncomingBlock(I.getUse()); | |||
2862 | } | |||
2863 | ||||
2864 | void setIncomingBlock(unsigned i, BasicBlock *BB) { | |||
2865 | const_cast<block_iterator>(block_begin())[i] = BB; | |||
2866 | } | |||
2867 | ||||
2868 | /// Copies the basic blocks from \p BBRange to the incoming basic block list | |||
2869 | /// of this PHINode, starting at \p ToIdx. | |||
2870 | void copyIncomingBlocks(iterator_range<const_block_iterator> BBRange, | |||
2871 | uint32_t ToIdx = 0) { | |||
2872 | copy(BBRange, const_cast<block_iterator>(block_begin()) + ToIdx); | |||
2873 | } | |||
2874 | ||||
2875 | /// Replace every incoming basic block \p Old to basic block \p New. | |||
2876 | void replaceIncomingBlockWith(const BasicBlock *Old, BasicBlock *New) { | |||
2877 | assert(New && Old && "PHI node got a null basic block!")(static_cast <bool> (New && Old && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("New && Old && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2877, __extension__ __PRETTY_FUNCTION__ )); | |||
2878 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) | |||
2879 | if (getIncomingBlock(Op) == Old) | |||
2880 | setIncomingBlock(Op, New); | |||
2881 | } | |||
2882 | ||||
2883 | /// Add an incoming value to the end of the PHI list | |||
2884 | /// | |||
2885 | void addIncoming(Value *V, BasicBlock *BB) { | |||
2886 | if (getNumOperands() == ReservedSpace) | |||
2887 | growOperands(); // Get more space! | |||
2888 | // Initialize some new operands. | |||
2889 | setNumHungOffUseOperands(getNumOperands() + 1); | |||
2890 | setIncomingValue(getNumOperands() - 1, V); | |||
2891 | setIncomingBlock(getNumOperands() - 1, BB); | |||
2892 | } | |||
2893 | ||||
2894 | /// Remove an incoming value. This is useful if a | |||
2895 | /// predecessor basic block is deleted. The value removed is returned. | |||
2896 | /// | |||
2897 | /// If the last incoming value for a PHI node is removed (and DeletePHIIfEmpty | |||
2898 | /// is true), the PHI node is destroyed and any uses of it are replaced with | |||
2899 | /// dummy values. The only time there should be zero incoming values to a PHI | |||
2900 | /// node is when the block is dead, so this strategy is sound. | |||
2901 | /// | |||
2902 | Value *removeIncomingValue(unsigned Idx, bool DeletePHIIfEmpty = true); | |||
2903 | ||||
2904 | Value *removeIncomingValue(const BasicBlock *BB, bool DeletePHIIfEmpty=true) { | |||
2905 | int Idx = getBasicBlockIndex(BB); | |||
2906 | assert(Idx >= 0 && "Invalid basic block argument to remove!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument to remove!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument to remove!\"" , "llvm/include/llvm/IR/Instructions.h", 2906, __extension__ __PRETTY_FUNCTION__ )); | |||
2907 | return removeIncomingValue(Idx, DeletePHIIfEmpty); | |||
2908 | } | |||
2909 | ||||
2910 | /// Return the first index of the specified basic | |||
2911 | /// block in the value list for this PHI. Returns -1 if no instance. | |||
2912 | /// | |||
2913 | int getBasicBlockIndex(const BasicBlock *BB) const { | |||
2914 | for (unsigned i = 0, e = getNumOperands(); i != e; ++i) | |||
2915 | if (block_begin()[i] == BB) | |||
2916 | return i; | |||
2917 | return -1; | |||
2918 | } | |||
2919 | ||||
2920 | Value *getIncomingValueForBlock(const BasicBlock *BB) const { | |||
2921 | int Idx = getBasicBlockIndex(BB); | |||
2922 | assert(Idx >= 0 && "Invalid basic block argument!")(static_cast <bool> (Idx >= 0 && "Invalid basic block argument!" ) ? void (0) : __assert_fail ("Idx >= 0 && \"Invalid basic block argument!\"" , "llvm/include/llvm/IR/Instructions.h", 2922, __extension__ __PRETTY_FUNCTION__ )); | |||
2923 | return getIncomingValue(Idx); | |||
2924 | } | |||
2925 | ||||
2926 | /// Set every incoming value(s) for block \p BB to \p V. | |||
2927 | void setIncomingValueForBlock(const BasicBlock *BB, Value *V) { | |||
2928 | assert(BB && "PHI node got a null basic block!")(static_cast <bool> (BB && "PHI node got a null basic block!" ) ? void (0) : __assert_fail ("BB && \"PHI node got a null basic block!\"" , "llvm/include/llvm/IR/Instructions.h", 2928, __extension__ __PRETTY_FUNCTION__ )); | |||
2929 | bool Found = false; | |||
2930 | for (unsigned Op = 0, NumOps = getNumOperands(); Op != NumOps; ++Op) | |||
2931 | if (getIncomingBlock(Op) == BB) { | |||
2932 | Found = true; | |||
2933 | setIncomingValue(Op, V); | |||
2934 | } | |||
2935 | (void)Found; | |||
2936 | assert(Found && "Invalid basic block argument to set!")(static_cast <bool> (Found && "Invalid basic block argument to set!" ) ? void (0) : __assert_fail ("Found && \"Invalid basic block argument to set!\"" , "llvm/include/llvm/IR/Instructions.h", 2936, __extension__ __PRETTY_FUNCTION__ )); | |||
2937 | } | |||
2938 | ||||
2939 | /// If the specified PHI node always merges together the | |||
2940 | /// same value, return the value, otherwise return null. | |||
2941 | Value *hasConstantValue() const; | |||
2942 | ||||
2943 | /// Whether the specified PHI node always merges | |||
2944 | /// together the same value, assuming undefs are equal to a unique | |||
2945 | /// non-undef value. | |||
2946 | bool hasConstantOrUndefValue() const; | |||
2947 | ||||
2948 | /// If the PHI node is complete which means all of its parent's predecessors | |||
2949 | /// have incoming value in this PHI, return true, otherwise return false. | |||
2950 | bool isComplete() const { | |||
2951 | return llvm::all_of(predecessors(getParent()), | |||
2952 | [this](const BasicBlock *Pred) { | |||
2953 | return getBasicBlockIndex(Pred) >= 0; | |||
2954 | }); | |||
2955 | } | |||
2956 | ||||
2957 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
2958 | static bool classof(const Instruction *I) { | |||
2959 | return I->getOpcode() == Instruction::PHI; | |||
2960 | } | |||
2961 | static bool classof(const Value *V) { | |||
2962 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
2963 | } | |||
2964 | ||||
2965 | private: | |||
2966 | void growOperands(); | |||
2967 | }; | |||
2968 | ||||
2969 | template <> | |||
2970 | struct OperandTraits<PHINode> : public HungoffOperandTraits<2> { | |||
2971 | }; | |||
2972 | ||||
2973 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(PHINode, Value)PHINode::op_iterator PHINode::op_begin() { return OperandTraits <PHINode>::op_begin(this); } PHINode::const_op_iterator PHINode::op_begin() const { return OperandTraits<PHINode> ::op_begin(const_cast<PHINode*>(this)); } PHINode::op_iterator PHINode::op_end() { return OperandTraits<PHINode>::op_end (this); } PHINode::const_op_iterator PHINode::op_end() const { return OperandTraits<PHINode>::op_end(const_cast<PHINode *>(this)); } Value *PHINode::getOperand(unsigned i_nocapture ) const { (static_cast <bool> (i_nocapture < OperandTraits <PHINode>::operands(this) && "getOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2973, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<PHINode >::op_begin(const_cast<PHINode*>(this))[i_nocapture] .get()); } void PHINode::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<PHINode>::operands(this) && "setOperand() out of range!" ) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<PHINode>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 2973, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<PHINode>::op_begin(this)[i_nocapture] = Val_nocapture; } unsigned PHINode::getNumOperands() const { return OperandTraits<PHINode>::operands(this); } template <int Idx_nocapture> Use &PHINode::Op() { return this ->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture > const Use &PHINode::Op() const { return this->OpFrom <Idx_nocapture>(this); } | |||
2974 | ||||
2975 | //===----------------------------------------------------------------------===// | |||
2976 | // LandingPadInst Class | |||
2977 | //===----------------------------------------------------------------------===// | |||
2978 | ||||
2979 | //===--------------------------------------------------------------------------- | |||
2980 | /// The landingpad instruction holds all of the information | |||
2981 | /// necessary to generate correct exception handling. The landingpad instruction | |||
2982 | /// cannot be moved from the top of a landing pad block, which itself is | |||
2983 | /// accessible only from the 'unwind' edge of an invoke. This uses the | |||
2984 | /// SubclassData field in Value to store whether or not the landingpad is a | |||
2985 | /// cleanup. | |||
2986 | /// | |||
2987 | class LandingPadInst : public Instruction { | |||
2988 | using CleanupField = BoolBitfieldElementT<0>; | |||
2989 | ||||
2990 | /// The number of operands actually allocated. NumOperands is | |||
2991 | /// the number actually in use. | |||
2992 | unsigned ReservedSpace; | |||
2993 | ||||
2994 | LandingPadInst(const LandingPadInst &LP); | |||
2995 | ||||
2996 | public: | |||
2997 | enum ClauseType { Catch, Filter }; | |||
2998 | ||||
2999 | private: | |||
3000 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, | |||
3001 | const Twine &NameStr, Instruction *InsertBefore); | |||
3002 | explicit LandingPadInst(Type *RetTy, unsigned NumReservedValues, | |||
3003 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
3004 | ||||
3005 | // Allocate space for exactly zero operands. | |||
3006 | void *operator new(size_t S) { return User::operator new(S); } | |||
3007 | ||||
3008 | void growOperands(unsigned Size); | |||
3009 | void init(unsigned NumReservedValues, const Twine &NameStr); | |||
3010 | ||||
3011 | protected: | |||
3012 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3013 | friend class Instruction; | |||
3014 | ||||
3015 | LandingPadInst *cloneImpl() const; | |||
3016 | ||||
3017 | public: | |||
3018 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
3019 | ||||
3020 | /// Constructors - NumReservedClauses is a hint for the number of incoming | |||
3021 | /// clauses that this landingpad will have (use 0 if you really have no idea). | |||
3022 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, | |||
3023 | const Twine &NameStr = "", | |||
3024 | Instruction *InsertBefore = nullptr); | |||
3025 | static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses, | |||
3026 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
3027 | ||||
3028 | /// Provide fast operand accessors | |||
3029 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3030 | ||||
3031 | /// Return 'true' if this landingpad instruction is a | |||
3032 | /// cleanup. I.e., it should be run when unwinding even if its landing pad | |||
3033 | /// doesn't catch the exception. | |||
3034 | bool isCleanup() const { return getSubclassData<CleanupField>(); } | |||
3035 | ||||
3036 | /// Indicate that this landingpad instruction is a cleanup. | |||
3037 | void setCleanup(bool V) { setSubclassData<CleanupField>(V); } | |||
3038 | ||||
3039 | /// Add a catch or filter clause to the landing pad. | |||
3040 | void addClause(Constant *ClauseVal); | |||
3041 | ||||
3042 | /// Get the value of the clause at index Idx. Use isCatch/isFilter to | |||
3043 | /// determine what type of clause this is. | |||
3044 | Constant *getClause(unsigned Idx) const { | |||
3045 | return cast<Constant>(getOperandList()[Idx]); | |||
3046 | } | |||
3047 | ||||
3048 | /// Return 'true' if the clause and index Idx is a catch clause. | |||
3049 | bool isCatch(unsigned Idx) const { | |||
3050 | return !isa<ArrayType>(getOperandList()[Idx]->getType()); | |||
3051 | } | |||
3052 | ||||
3053 | /// Return 'true' if the clause and index Idx is a filter clause. | |||
3054 | bool isFilter(unsigned Idx) const { | |||
3055 | return isa<ArrayType>(getOperandList()[Idx]->getType()); | |||
3056 | } | |||
3057 | ||||
3058 | /// Get the number of clauses for this landing pad. | |||
3059 | unsigned getNumClauses() const { return getNumOperands(); } | |||
3060 | ||||
3061 | /// Grow the size of the operand list to accommodate the new | |||
3062 | /// number of clauses. | |||
3063 | void reserveClauses(unsigned Size) { growOperands(Size); } | |||
3064 | ||||
3065 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3066 | static bool classof(const Instruction *I) { | |||
3067 | return I->getOpcode() == Instruction::LandingPad; | |||
3068 | } | |||
3069 | static bool classof(const Value *V) { | |||
3070 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3071 | } | |||
3072 | }; | |||
3073 | ||||
3074 | template <> | |||
3075 | struct OperandTraits<LandingPadInst> : public HungoffOperandTraits<1> { | |||
3076 | }; | |||
3077 | ||||
3078 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(LandingPadInst, Value)LandingPadInst::op_iterator LandingPadInst::op_begin() { return OperandTraits<LandingPadInst>::op_begin(this); } LandingPadInst ::const_op_iterator LandingPadInst::op_begin() const { return OperandTraits<LandingPadInst>::op_begin(const_cast< LandingPadInst*>(this)); } LandingPadInst::op_iterator LandingPadInst ::op_end() { return OperandTraits<LandingPadInst>::op_end (this); } LandingPadInst::const_op_iterator LandingPadInst::op_end () const { return OperandTraits<LandingPadInst>::op_end (const_cast<LandingPadInst*>(this)); } Value *LandingPadInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<LandingPadInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3078, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<LandingPadInst >::op_begin(const_cast<LandingPadInst*>(this))[i_nocapture ].get()); } void LandingPadInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<LandingPadInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<LandingPadInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3078, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<LandingPadInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned LandingPadInst::getNumOperands( ) const { return OperandTraits<LandingPadInst>::operands (this); } template <int Idx_nocapture> Use &LandingPadInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &LandingPadInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
3079 | ||||
3080 | //===----------------------------------------------------------------------===// | |||
3081 | // ReturnInst Class | |||
3082 | //===----------------------------------------------------------------------===// | |||
3083 | ||||
3084 | //===--------------------------------------------------------------------------- | |||
3085 | /// Return a value (possibly void), from a function. Execution | |||
3086 | /// does not continue in this function any longer. | |||
3087 | /// | |||
3088 | class ReturnInst : public Instruction { | |||
3089 | ReturnInst(const ReturnInst &RI); | |||
3090 | ||||
3091 | private: | |||
3092 | // ReturnInst constructors: | |||
3093 | // ReturnInst() - 'ret void' instruction | |||
3094 | // ReturnInst( null) - 'ret void' instruction | |||
3095 | // ReturnInst(Value* X) - 'ret X' instruction | |||
3096 | // ReturnInst( null, Inst *I) - 'ret void' instruction, insert before I | |||
3097 | // ReturnInst(Value* X, Inst *I) - 'ret X' instruction, insert before I | |||
3098 | // ReturnInst( null, BB *B) - 'ret void' instruction, insert @ end of B | |||
3099 | // ReturnInst(Value* X, BB *B) - 'ret X' instruction, insert @ end of B | |||
3100 | // | |||
3101 | // NOTE: If the Value* passed is of type void then the constructor behaves as | |||
3102 | // if it was passed NULL. | |||
3103 | explicit ReturnInst(LLVMContext &C, Value *retVal = nullptr, | |||
3104 | Instruction *InsertBefore = nullptr); | |||
3105 | ReturnInst(LLVMContext &C, Value *retVal, BasicBlock *InsertAtEnd); | |||
3106 | explicit ReturnInst(LLVMContext &C, BasicBlock *InsertAtEnd); | |||
3107 | ||||
3108 | protected: | |||
3109 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3110 | friend class Instruction; | |||
3111 | ||||
3112 | ReturnInst *cloneImpl() const; | |||
3113 | ||||
3114 | public: | |||
3115 | static ReturnInst* Create(LLVMContext &C, Value *retVal = nullptr, | |||
3116 | Instruction *InsertBefore = nullptr) { | |||
3117 | return new(!!retVal) ReturnInst(C, retVal, InsertBefore); | |||
3118 | } | |||
3119 | ||||
3120 | static ReturnInst* Create(LLVMContext &C, Value *retVal, | |||
3121 | BasicBlock *InsertAtEnd) { | |||
3122 | return new(!!retVal) ReturnInst(C, retVal, InsertAtEnd); | |||
3123 | } | |||
3124 | ||||
3125 | static ReturnInst* Create(LLVMContext &C, BasicBlock *InsertAtEnd) { | |||
3126 | return new(0) ReturnInst(C, InsertAtEnd); | |||
3127 | } | |||
3128 | ||||
3129 | /// Provide fast operand accessors | |||
3130 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3131 | ||||
3132 | /// Convenience accessor. Returns null if there is no return value. | |||
3133 | Value *getReturnValue() const { | |||
3134 | return getNumOperands() != 0 ? getOperand(0) : nullptr; | |||
3135 | } | |||
3136 | ||||
3137 | unsigned getNumSuccessors() const { return 0; } | |||
3138 | ||||
3139 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3140 | static bool classof(const Instruction *I) { | |||
3141 | return (I->getOpcode() == Instruction::Ret); | |||
3142 | } | |||
3143 | static bool classof(const Value *V) { | |||
3144 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3145 | } | |||
3146 | ||||
3147 | private: | |||
3148 | BasicBlock *getSuccessor(unsigned idx) const { | |||
3149 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 3149); | |||
3150 | } | |||
3151 | ||||
3152 | void setSuccessor(unsigned idx, BasicBlock *B) { | |||
3153 | llvm_unreachable("ReturnInst has no successors!")::llvm::llvm_unreachable_internal("ReturnInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 3153); | |||
3154 | } | |||
3155 | }; | |||
3156 | ||||
3157 | template <> | |||
3158 | struct OperandTraits<ReturnInst> : public VariadicOperandTraits<ReturnInst> { | |||
3159 | }; | |||
3160 | ||||
3161 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ReturnInst, Value)ReturnInst::op_iterator ReturnInst::op_begin() { return OperandTraits <ReturnInst>::op_begin(this); } ReturnInst::const_op_iterator ReturnInst::op_begin() const { return OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this)); } ReturnInst ::op_iterator ReturnInst::op_end() { return OperandTraits< ReturnInst>::op_end(this); } ReturnInst::const_op_iterator ReturnInst::op_end() const { return OperandTraits<ReturnInst >::op_end(const_cast<ReturnInst*>(this)); } Value *ReturnInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<ReturnInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3161, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ReturnInst >::op_begin(const_cast<ReturnInst*>(this))[i_nocapture ].get()); } void ReturnInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3161, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ReturnInst::getNumOperands() const { return OperandTraits<ReturnInst>::operands(this); } template <int Idx_nocapture> Use &ReturnInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ReturnInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
3162 | ||||
3163 | //===----------------------------------------------------------------------===// | |||
3164 | // BranchInst Class | |||
3165 | //===----------------------------------------------------------------------===// | |||
3166 | ||||
3167 | //===--------------------------------------------------------------------------- | |||
3168 | /// Conditional or Unconditional Branch instruction. | |||
3169 | /// | |||
3170 | class BranchInst : public Instruction { | |||
3171 | /// Ops list - Branches are strange. The operands are ordered: | |||
3172 | /// [Cond, FalseDest,] TrueDest. This makes some accessors faster because | |||
3173 | /// they don't have to check for cond/uncond branchness. These are mostly | |||
3174 | /// accessed relative from op_end(). | |||
3175 | BranchInst(const BranchInst &BI); | |||
3176 | // BranchInst constructors (where {B, T, F} are blocks, and C is a condition): | |||
3177 | // BranchInst(BB *B) - 'br B' | |||
3178 | // BranchInst(BB* T, BB *F, Value *C) - 'br C, T, F' | |||
3179 | // BranchInst(BB* B, Inst *I) - 'br B' insert before I | |||
3180 | // BranchInst(BB* T, BB *F, Value *C, Inst *I) - 'br C, T, F', insert before I | |||
3181 | // BranchInst(BB* B, BB *I) - 'br B' insert at end | |||
3182 | // BranchInst(BB* T, BB *F, Value *C, BB *I) - 'br C, T, F', insert at end | |||
3183 | explicit BranchInst(BasicBlock *IfTrue, Instruction *InsertBefore = nullptr); | |||
3184 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, | |||
3185 | Instruction *InsertBefore = nullptr); | |||
3186 | BranchInst(BasicBlock *IfTrue, BasicBlock *InsertAtEnd); | |||
3187 | BranchInst(BasicBlock *IfTrue, BasicBlock *IfFalse, Value *Cond, | |||
3188 | BasicBlock *InsertAtEnd); | |||
3189 | ||||
3190 | void AssertOK(); | |||
3191 | ||||
3192 | protected: | |||
3193 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3194 | friend class Instruction; | |||
3195 | ||||
3196 | BranchInst *cloneImpl() const; | |||
3197 | ||||
3198 | public: | |||
3199 | /// Iterator type that casts an operand to a basic block. | |||
3200 | /// | |||
3201 | /// This only makes sense because the successors are stored as adjacent | |||
3202 | /// operands for branch instructions. | |||
3203 | struct succ_op_iterator | |||
3204 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, | |||
3205 | std::random_access_iterator_tag, BasicBlock *, | |||
3206 | ptrdiff_t, BasicBlock *, BasicBlock *> { | |||
3207 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} | |||
3208 | ||||
3209 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3210 | BasicBlock *operator->() const { return operator*(); } | |||
3211 | }; | |||
3212 | ||||
3213 | /// The const version of `succ_op_iterator`. | |||
3214 | struct const_succ_op_iterator | |||
3215 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, | |||
3216 | std::random_access_iterator_tag, | |||
3217 | const BasicBlock *, ptrdiff_t, const BasicBlock *, | |||
3218 | const BasicBlock *> { | |||
3219 | explicit const_succ_op_iterator(const_value_op_iterator I) | |||
3220 | : iterator_adaptor_base(I) {} | |||
3221 | ||||
3222 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3223 | const BasicBlock *operator->() const { return operator*(); } | |||
3224 | }; | |||
3225 | ||||
3226 | static BranchInst *Create(BasicBlock *IfTrue, | |||
3227 | Instruction *InsertBefore = nullptr) { | |||
3228 | return new(1) BranchInst(IfTrue, InsertBefore); | |||
3229 | } | |||
3230 | ||||
3231 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, | |||
3232 | Value *Cond, Instruction *InsertBefore = nullptr) { | |||
3233 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertBefore); | |||
3234 | } | |||
3235 | ||||
3236 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *InsertAtEnd) { | |||
3237 | return new(1) BranchInst(IfTrue, InsertAtEnd); | |||
3238 | } | |||
3239 | ||||
3240 | static BranchInst *Create(BasicBlock *IfTrue, BasicBlock *IfFalse, | |||
3241 | Value *Cond, BasicBlock *InsertAtEnd) { | |||
3242 | return new(3) BranchInst(IfTrue, IfFalse, Cond, InsertAtEnd); | |||
3243 | } | |||
3244 | ||||
3245 | /// Transparently provide more efficient getOperand methods. | |||
3246 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3247 | ||||
3248 | bool isUnconditional() const { return getNumOperands() == 1; } | |||
3249 | bool isConditional() const { return getNumOperands() == 3; } | |||
3250 | ||||
3251 | Value *getCondition() const { | |||
3252 | assert(isConditional() && "Cannot get condition of an uncond branch!")(static_cast <bool> (isConditional() && "Cannot get condition of an uncond branch!" ) ? void (0) : __assert_fail ("isConditional() && \"Cannot get condition of an uncond branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3252, __extension__ __PRETTY_FUNCTION__ )); | |||
3253 | return Op<-3>(); | |||
3254 | } | |||
3255 | ||||
3256 | void setCondition(Value *V) { | |||
3257 | assert(isConditional() && "Cannot set condition of unconditional branch!")(static_cast <bool> (isConditional() && "Cannot set condition of unconditional branch!" ) ? void (0) : __assert_fail ("isConditional() && \"Cannot set condition of unconditional branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3257, __extension__ __PRETTY_FUNCTION__ )); | |||
3258 | Op<-3>() = V; | |||
3259 | } | |||
3260 | ||||
3261 | unsigned getNumSuccessors() const { return 1+isConditional(); } | |||
3262 | ||||
3263 | BasicBlock *getSuccessor(unsigned i) const { | |||
3264 | assert(i < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (i < getNumSuccessors() && "Successor # out of range for Branch!") ? void (0) : __assert_fail ("i < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3264, __extension__ __PRETTY_FUNCTION__ )); | |||
3265 | return cast_or_null<BasicBlock>((&Op<-1>() - i)->get()); | |||
3266 | } | |||
3267 | ||||
3268 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { | |||
3269 | assert(idx < getNumSuccessors() && "Successor # out of range for Branch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor # out of range for Branch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for Branch!\"" , "llvm/include/llvm/IR/Instructions.h", 3269, __extension__ __PRETTY_FUNCTION__ )); | |||
3270 | *(&Op<-1>() - idx) = NewSucc; | |||
3271 | } | |||
3272 | ||||
3273 | /// Swap the successors of this branch instruction. | |||
3274 | /// | |||
3275 | /// Swaps the successors of the branch instruction. This also swaps any | |||
3276 | /// branch weight metadata associated with the instruction so that it | |||
3277 | /// continues to map correctly to each operand. | |||
3278 | void swapSuccessors(); | |||
3279 | ||||
3280 | iterator_range<succ_op_iterator> successors() { | |||
3281 | return make_range( | |||
3282 | succ_op_iterator(std::next(value_op_begin(), isConditional() ? 1 : 0)), | |||
3283 | succ_op_iterator(value_op_end())); | |||
3284 | } | |||
3285 | ||||
3286 | iterator_range<const_succ_op_iterator> successors() const { | |||
3287 | return make_range(const_succ_op_iterator( | |||
3288 | std::next(value_op_begin(), isConditional() ? 1 : 0)), | |||
3289 | const_succ_op_iterator(value_op_end())); | |||
3290 | } | |||
3291 | ||||
3292 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3293 | static bool classof(const Instruction *I) { | |||
3294 | return (I->getOpcode() == Instruction::Br); | |||
3295 | } | |||
3296 | static bool classof(const Value *V) { | |||
3297 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3298 | } | |||
3299 | }; | |||
3300 | ||||
3301 | template <> | |||
3302 | struct OperandTraits<BranchInst> : public VariadicOperandTraits<BranchInst, 1> { | |||
3303 | }; | |||
3304 | ||||
3305 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(BranchInst, Value)BranchInst::op_iterator BranchInst::op_begin() { return OperandTraits <BranchInst>::op_begin(this); } BranchInst::const_op_iterator BranchInst::op_begin() const { return OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this)); } BranchInst ::op_iterator BranchInst::op_end() { return OperandTraits< BranchInst>::op_end(this); } BranchInst::const_op_iterator BranchInst::op_end() const { return OperandTraits<BranchInst >::op_end(const_cast<BranchInst*>(this)); } Value *BranchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<BranchInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3305, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<BranchInst >::op_begin(const_cast<BranchInst*>(this))[i_nocapture ].get()); } void BranchInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<BranchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<BranchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3305, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<BranchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned BranchInst::getNumOperands() const { return OperandTraits<BranchInst>::operands(this); } template <int Idx_nocapture> Use &BranchInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &BranchInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
3306 | ||||
3307 | //===----------------------------------------------------------------------===// | |||
3308 | // SwitchInst Class | |||
3309 | //===----------------------------------------------------------------------===// | |||
3310 | ||||
3311 | //===--------------------------------------------------------------------------- | |||
3312 | /// Multiway switch | |||
3313 | /// | |||
3314 | class SwitchInst : public Instruction { | |||
3315 | unsigned ReservedSpace; | |||
3316 | ||||
3317 | // Operand[0] = Value to switch on | |||
3318 | // Operand[1] = Default basic block destination | |||
3319 | // Operand[2n ] = Value to match | |||
3320 | // Operand[2n+1] = BasicBlock to go to on match | |||
3321 | SwitchInst(const SwitchInst &SI); | |||
3322 | ||||
3323 | /// Create a new switch instruction, specifying a value to switch on and a | |||
3324 | /// default destination. The number of additional cases can be specified here | |||
3325 | /// to make memory allocation more efficient. This constructor can also | |||
3326 | /// auto-insert before another instruction. | |||
3327 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, | |||
3328 | Instruction *InsertBefore); | |||
3329 | ||||
3330 | /// Create a new switch instruction, specifying a value to switch on and a | |||
3331 | /// default destination. The number of additional cases can be specified here | |||
3332 | /// to make memory allocation more efficient. This constructor also | |||
3333 | /// auto-inserts at the end of the specified BasicBlock. | |||
3334 | SwitchInst(Value *Value, BasicBlock *Default, unsigned NumCases, | |||
3335 | BasicBlock *InsertAtEnd); | |||
3336 | ||||
3337 | // allocate space for exactly zero operands | |||
3338 | void *operator new(size_t S) { return User::operator new(S); } | |||
3339 | ||||
3340 | void init(Value *Value, BasicBlock *Default, unsigned NumReserved); | |||
3341 | void growOperands(); | |||
3342 | ||||
3343 | protected: | |||
3344 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3345 | friend class Instruction; | |||
3346 | ||||
3347 | SwitchInst *cloneImpl() const; | |||
3348 | ||||
3349 | public: | |||
3350 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
3351 | ||||
3352 | // -2 | |||
3353 | static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1); | |||
3354 | ||||
3355 | template <typename CaseHandleT> class CaseIteratorImpl; | |||
3356 | ||||
3357 | /// A handle to a particular switch case. It exposes a convenient interface | |||
3358 | /// to both the case value and the successor block. | |||
3359 | /// | |||
3360 | /// We define this as a template and instantiate it to form both a const and | |||
3361 | /// non-const handle. | |||
3362 | template <typename SwitchInstT, typename ConstantIntT, typename BasicBlockT> | |||
3363 | class CaseHandleImpl { | |||
3364 | // Directly befriend both const and non-const iterators. | |||
3365 | friend class SwitchInst::CaseIteratorImpl< | |||
3366 | CaseHandleImpl<SwitchInstT, ConstantIntT, BasicBlockT>>; | |||
3367 | ||||
3368 | protected: | |||
3369 | // Expose the switch type we're parameterized with to the iterator. | |||
3370 | using SwitchInstType = SwitchInstT; | |||
3371 | ||||
3372 | SwitchInstT *SI; | |||
3373 | ptrdiff_t Index; | |||
3374 | ||||
3375 | CaseHandleImpl() = default; | |||
3376 | CaseHandleImpl(SwitchInstT *SI, ptrdiff_t Index) : SI(SI), Index(Index) {} | |||
3377 | ||||
3378 | public: | |||
3379 | /// Resolves case value for current case. | |||
3380 | ConstantIntT *getCaseValue() const { | |||
3381 | assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3382, __extension__ __PRETTY_FUNCTION__ )) | |||
3382 | "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3382, __extension__ __PRETTY_FUNCTION__ )); | |||
3383 | return reinterpret_cast<ConstantIntT *>(SI->getOperand(2 + Index * 2)); | |||
3384 | } | |||
3385 | ||||
3386 | /// Resolves successor for current case. | |||
3387 | BasicBlockT *getCaseSuccessor() const { | |||
3388 | assert(((unsigned)Index < SI->getNumCases() ||(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3390, __extension__ __PRETTY_FUNCTION__ )) | |||
3389 | (unsigned)Index == DefaultPseudoIndex) &&(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3390, __extension__ __PRETTY_FUNCTION__ )) | |||
3390 | "Index out the number of cases.")(static_cast <bool> (((unsigned)Index < SI->getNumCases () || (unsigned)Index == DefaultPseudoIndex) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index < SI->getNumCases() || (unsigned)Index == DefaultPseudoIndex) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3390, __extension__ __PRETTY_FUNCTION__ )); | |||
3391 | return SI->getSuccessor(getSuccessorIndex()); | |||
3392 | } | |||
3393 | ||||
3394 | /// Returns number of current case. | |||
3395 | unsigned getCaseIndex() const { return Index; } | |||
3396 | ||||
3397 | /// Returns successor index for current case successor. | |||
3398 | unsigned getSuccessorIndex() const { | |||
3399 | assert(((unsigned)Index == DefaultPseudoIndex ||(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3401, __extension__ __PRETTY_FUNCTION__ )) | |||
3400 | (unsigned)Index < SI->getNumCases()) &&(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3401, __extension__ __PRETTY_FUNCTION__ )) | |||
3401 | "Index out the number of cases.")(static_cast <bool> (((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && "Index out the number of cases." ) ? void (0) : __assert_fail ("((unsigned)Index == DefaultPseudoIndex || (unsigned)Index < SI->getNumCases()) && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3401, __extension__ __PRETTY_FUNCTION__ )); | |||
3402 | return (unsigned)Index != DefaultPseudoIndex ? Index + 1 : 0; | |||
3403 | } | |||
3404 | ||||
3405 | bool operator==(const CaseHandleImpl &RHS) const { | |||
3406 | assert(SI == RHS.SI && "Incompatible operators.")(static_cast <bool> (SI == RHS.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("SI == RHS.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3406, __extension__ __PRETTY_FUNCTION__ )); | |||
3407 | return Index == RHS.Index; | |||
3408 | } | |||
3409 | }; | |||
3410 | ||||
3411 | using ConstCaseHandle = | |||
3412 | CaseHandleImpl<const SwitchInst, const ConstantInt, const BasicBlock>; | |||
3413 | ||||
3414 | class CaseHandle | |||
3415 | : public CaseHandleImpl<SwitchInst, ConstantInt, BasicBlock> { | |||
3416 | friend class SwitchInst::CaseIteratorImpl<CaseHandle>; | |||
3417 | ||||
3418 | public: | |||
3419 | CaseHandle(SwitchInst *SI, ptrdiff_t Index) : CaseHandleImpl(SI, Index) {} | |||
3420 | ||||
3421 | /// Sets the new value for current case. | |||
3422 | void setValue(ConstantInt *V) const { | |||
3423 | assert((unsigned)Index < SI->getNumCases() &&(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3424, __extension__ __PRETTY_FUNCTION__ )) | |||
3424 | "Index out the number of cases.")(static_cast <bool> ((unsigned)Index < SI->getNumCases () && "Index out the number of cases.") ? void (0) : __assert_fail ("(unsigned)Index < SI->getNumCases() && \"Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3424, __extension__ __PRETTY_FUNCTION__ )); | |||
3425 | SI->setOperand(2 + Index*2, reinterpret_cast<Value*>(V)); | |||
3426 | } | |||
3427 | ||||
3428 | /// Sets the new successor for current case. | |||
3429 | void setSuccessor(BasicBlock *S) const { | |||
3430 | SI->setSuccessor(getSuccessorIndex(), S); | |||
3431 | } | |||
3432 | }; | |||
3433 | ||||
3434 | template <typename CaseHandleT> | |||
3435 | class CaseIteratorImpl | |||
3436 | : public iterator_facade_base<CaseIteratorImpl<CaseHandleT>, | |||
3437 | std::random_access_iterator_tag, | |||
3438 | const CaseHandleT> { | |||
3439 | using SwitchInstT = typename CaseHandleT::SwitchInstType; | |||
3440 | ||||
3441 | CaseHandleT Case; | |||
3442 | ||||
3443 | public: | |||
3444 | /// Default constructed iterator is in an invalid state until assigned to | |||
3445 | /// a case for a particular switch. | |||
3446 | CaseIteratorImpl() = default; | |||
3447 | ||||
3448 | /// Initializes case iterator for given SwitchInst and for given | |||
3449 | /// case number. | |||
3450 | CaseIteratorImpl(SwitchInstT *SI, unsigned CaseNum) : Case(SI, CaseNum) {} | |||
3451 | ||||
3452 | /// Initializes case iterator for given SwitchInst and for given | |||
3453 | /// successor index. | |||
3454 | static CaseIteratorImpl fromSuccessorIndex(SwitchInstT *SI, | |||
3455 | unsigned SuccessorIndex) { | |||
3456 | assert(SuccessorIndex < SI->getNumSuccessors() &&(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors () && "Successor index # out of range!") ? void (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3457, __extension__ __PRETTY_FUNCTION__ )) | |||
3457 | "Successor index # out of range!")(static_cast <bool> (SuccessorIndex < SI->getNumSuccessors () && "Successor index # out of range!") ? void (0) : __assert_fail ("SuccessorIndex < SI->getNumSuccessors() && \"Successor index # out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3457, __extension__ __PRETTY_FUNCTION__ )); | |||
3458 | return SuccessorIndex != 0 ? CaseIteratorImpl(SI, SuccessorIndex - 1) | |||
3459 | : CaseIteratorImpl(SI, DefaultPseudoIndex); | |||
3460 | } | |||
3461 | ||||
3462 | /// Support converting to the const variant. This will be a no-op for const | |||
3463 | /// variant. | |||
3464 | operator CaseIteratorImpl<ConstCaseHandle>() const { | |||
3465 | return CaseIteratorImpl<ConstCaseHandle>(Case.SI, Case.Index); | |||
3466 | } | |||
3467 | ||||
3468 | CaseIteratorImpl &operator+=(ptrdiff_t N) { | |||
3469 | // Check index correctness after addition. | |||
3470 | // Note: Index == getNumCases() means end(). | |||
3471 | assert(Case.Index + N >= 0 &&(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3473, __extension__ __PRETTY_FUNCTION__ )) | |||
3472 | (unsigned)(Case.Index + N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3473, __extension__ __PRETTY_FUNCTION__ )) | |||
3473 | "Case.Index out the number of cases.")(static_cast <bool> (Case.Index + N >= 0 && ( unsigned)(Case.Index + N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index + N >= 0 && (unsigned)(Case.Index + N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3473, __extension__ __PRETTY_FUNCTION__ )); | |||
3474 | Case.Index += N; | |||
3475 | return *this; | |||
3476 | } | |||
3477 | CaseIteratorImpl &operator-=(ptrdiff_t N) { | |||
3478 | // Check index correctness after subtraction. | |||
3479 | // Note: Case.Index == getNumCases() means end(). | |||
3480 | assert(Case.Index - N >= 0 &&(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3482, __extension__ __PRETTY_FUNCTION__ )) | |||
3481 | (unsigned)(Case.Index - N) <= Case.SI->getNumCases() &&(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3482, __extension__ __PRETTY_FUNCTION__ )) | |||
3482 | "Case.Index out the number of cases.")(static_cast <bool> (Case.Index - N >= 0 && ( unsigned)(Case.Index - N) <= Case.SI->getNumCases() && "Case.Index out the number of cases.") ? void (0) : __assert_fail ("Case.Index - N >= 0 && (unsigned)(Case.Index - N) <= Case.SI->getNumCases() && \"Case.Index out the number of cases.\"" , "llvm/include/llvm/IR/Instructions.h", 3482, __extension__ __PRETTY_FUNCTION__ )); | |||
3483 | Case.Index -= N; | |||
3484 | return *this; | |||
3485 | } | |||
3486 | ptrdiff_t operator-(const CaseIteratorImpl &RHS) const { | |||
3487 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3487, __extension__ __PRETTY_FUNCTION__ )); | |||
3488 | return Case.Index - RHS.Case.Index; | |||
3489 | } | |||
3490 | bool operator==(const CaseIteratorImpl &RHS) const { | |||
3491 | return Case == RHS.Case; | |||
3492 | } | |||
3493 | bool operator<(const CaseIteratorImpl &RHS) const { | |||
3494 | assert(Case.SI == RHS.Case.SI && "Incompatible operators.")(static_cast <bool> (Case.SI == RHS.Case.SI && "Incompatible operators." ) ? void (0) : __assert_fail ("Case.SI == RHS.Case.SI && \"Incompatible operators.\"" , "llvm/include/llvm/IR/Instructions.h", 3494, __extension__ __PRETTY_FUNCTION__ )); | |||
3495 | return Case.Index < RHS.Case.Index; | |||
3496 | } | |||
3497 | const CaseHandleT &operator*() const { return Case; } | |||
3498 | }; | |||
3499 | ||||
3500 | using CaseIt = CaseIteratorImpl<CaseHandle>; | |||
3501 | using ConstCaseIt = CaseIteratorImpl<ConstCaseHandle>; | |||
3502 | ||||
3503 | static SwitchInst *Create(Value *Value, BasicBlock *Default, | |||
3504 | unsigned NumCases, | |||
3505 | Instruction *InsertBefore = nullptr) { | |||
3506 | return new SwitchInst(Value, Default, NumCases, InsertBefore); | |||
3507 | } | |||
3508 | ||||
3509 | static SwitchInst *Create(Value *Value, BasicBlock *Default, | |||
3510 | unsigned NumCases, BasicBlock *InsertAtEnd) { | |||
3511 | return new SwitchInst(Value, Default, NumCases, InsertAtEnd); | |||
3512 | } | |||
3513 | ||||
3514 | /// Provide fast operand accessors | |||
3515 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3516 | ||||
3517 | // Accessor Methods for Switch stmt | |||
3518 | Value *getCondition() const { return getOperand(0); } | |||
3519 | void setCondition(Value *V) { setOperand(0, V); } | |||
3520 | ||||
3521 | BasicBlock *getDefaultDest() const { | |||
3522 | return cast<BasicBlock>(getOperand(1)); | |||
3523 | } | |||
3524 | ||||
3525 | void setDefaultDest(BasicBlock *DefaultCase) { | |||
3526 | setOperand(1, reinterpret_cast<Value*>(DefaultCase)); | |||
3527 | } | |||
3528 | ||||
3529 | /// Return the number of 'cases' in this switch instruction, excluding the | |||
3530 | /// default case. | |||
3531 | unsigned getNumCases() const { | |||
3532 | return getNumOperands()/2 - 1; | |||
3533 | } | |||
3534 | ||||
3535 | /// Returns a read/write iterator that points to the first case in the | |||
3536 | /// SwitchInst. | |||
3537 | CaseIt case_begin() { | |||
3538 | return CaseIt(this, 0); | |||
3539 | } | |||
3540 | ||||
3541 | /// Returns a read-only iterator that points to the first case in the | |||
3542 | /// SwitchInst. | |||
3543 | ConstCaseIt case_begin() const { | |||
3544 | return ConstCaseIt(this, 0); | |||
3545 | } | |||
3546 | ||||
3547 | /// Returns a read/write iterator that points one past the last in the | |||
3548 | /// SwitchInst. | |||
3549 | CaseIt case_end() { | |||
3550 | return CaseIt(this, getNumCases()); | |||
3551 | } | |||
3552 | ||||
3553 | /// Returns a read-only iterator that points one past the last in the | |||
3554 | /// SwitchInst. | |||
3555 | ConstCaseIt case_end() const { | |||
3556 | return ConstCaseIt(this, getNumCases()); | |||
3557 | } | |||
3558 | ||||
3559 | /// Iteration adapter for range-for loops. | |||
3560 | iterator_range<CaseIt> cases() { | |||
3561 | return make_range(case_begin(), case_end()); | |||
3562 | } | |||
3563 | ||||
3564 | /// Constant iteration adapter for range-for loops. | |||
3565 | iterator_range<ConstCaseIt> cases() const { | |||
3566 | return make_range(case_begin(), case_end()); | |||
3567 | } | |||
3568 | ||||
3569 | /// Returns an iterator that points to the default case. | |||
3570 | /// Note: this iterator allows to resolve successor only. Attempt | |||
3571 | /// to resolve case value causes an assertion. | |||
3572 | /// Also note, that increment and decrement also causes an assertion and | |||
3573 | /// makes iterator invalid. | |||
3574 | CaseIt case_default() { | |||
3575 | return CaseIt(this, DefaultPseudoIndex); | |||
3576 | } | |||
3577 | ConstCaseIt case_default() const { | |||
3578 | return ConstCaseIt(this, DefaultPseudoIndex); | |||
3579 | } | |||
3580 | ||||
3581 | /// Search all of the case values for the specified constant. If it is | |||
3582 | /// explicitly handled, return the case iterator of it, otherwise return | |||
3583 | /// default case iterator to indicate that it is handled by the default | |||
3584 | /// handler. | |||
3585 | CaseIt findCaseValue(const ConstantInt *C) { | |||
3586 | return CaseIt( | |||
3587 | this, | |||
3588 | const_cast<const SwitchInst *>(this)->findCaseValue(C)->getCaseIndex()); | |||
3589 | } | |||
3590 | ConstCaseIt findCaseValue(const ConstantInt *C) const { | |||
3591 | ConstCaseIt I = llvm::find_if(cases(), [C](const ConstCaseHandle &Case) { | |||
3592 | return Case.getCaseValue() == C; | |||
3593 | }); | |||
3594 | if (I != case_end()) | |||
3595 | return I; | |||
3596 | ||||
3597 | return case_default(); | |||
3598 | } | |||
3599 | ||||
3600 | /// Finds the unique case value for a given successor. Returns null if the | |||
3601 | /// successor is not found, not unique, or is the default case. | |||
3602 | ConstantInt *findCaseDest(BasicBlock *BB) { | |||
3603 | if (BB == getDefaultDest()) | |||
3604 | return nullptr; | |||
3605 | ||||
3606 | ConstantInt *CI = nullptr; | |||
3607 | for (auto Case : cases()) { | |||
3608 | if (Case.getCaseSuccessor() != BB) | |||
3609 | continue; | |||
3610 | ||||
3611 | if (CI) | |||
3612 | return nullptr; // Multiple cases lead to BB. | |||
3613 | ||||
3614 | CI = Case.getCaseValue(); | |||
3615 | } | |||
3616 | ||||
3617 | return CI; | |||
3618 | } | |||
3619 | ||||
3620 | /// Add an entry to the switch instruction. | |||
3621 | /// Note: | |||
3622 | /// This action invalidates case_end(). Old case_end() iterator will | |||
3623 | /// point to the added case. | |||
3624 | void addCase(ConstantInt *OnVal, BasicBlock *Dest); | |||
3625 | ||||
3626 | /// This method removes the specified case and its successor from the switch | |||
3627 | /// instruction. Note that this operation may reorder the remaining cases at | |||
3628 | /// index idx and above. | |||
3629 | /// Note: | |||
3630 | /// This action invalidates iterators for all cases following the one removed, | |||
3631 | /// including the case_end() iterator. It returns an iterator for the next | |||
3632 | /// case. | |||
3633 | CaseIt removeCase(CaseIt I); | |||
3634 | ||||
3635 | unsigned getNumSuccessors() const { return getNumOperands()/2; } | |||
3636 | BasicBlock *getSuccessor(unsigned idx) const { | |||
3637 | assert(idx < getNumSuccessors() &&"Successor idx out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor idx out of range for switch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() &&\"Successor idx out of range for switch!\"" , "llvm/include/llvm/IR/Instructions.h", 3637, __extension__ __PRETTY_FUNCTION__ )); | |||
3638 | return cast<BasicBlock>(getOperand(idx*2+1)); | |||
3639 | } | |||
3640 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { | |||
3641 | assert(idx < getNumSuccessors() && "Successor # out of range for switch!")(static_cast <bool> (idx < getNumSuccessors() && "Successor # out of range for switch!") ? void (0) : __assert_fail ("idx < getNumSuccessors() && \"Successor # out of range for switch!\"" , "llvm/include/llvm/IR/Instructions.h", 3641, __extension__ __PRETTY_FUNCTION__ )); | |||
3642 | setOperand(idx * 2 + 1, NewSucc); | |||
3643 | } | |||
3644 | ||||
3645 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3646 | static bool classof(const Instruction *I) { | |||
3647 | return I->getOpcode() == Instruction::Switch; | |||
3648 | } | |||
3649 | static bool classof(const Value *V) { | |||
3650 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3651 | } | |||
3652 | }; | |||
3653 | ||||
3654 | /// A wrapper class to simplify modification of SwitchInst cases along with | |||
3655 | /// their prof branch_weights metadata. | |||
3656 | class SwitchInstProfUpdateWrapper { | |||
3657 | SwitchInst &SI; | |||
3658 | std::optional<SmallVector<uint32_t, 8>> Weights; | |||
3659 | bool Changed = false; | |||
3660 | ||||
3661 | protected: | |||
3662 | MDNode *buildProfBranchWeightsMD(); | |||
3663 | ||||
3664 | void init(); | |||
3665 | ||||
3666 | public: | |||
3667 | using CaseWeightOpt = std::optional<uint32_t>; | |||
3668 | SwitchInst *operator->() { return &SI; } | |||
3669 | SwitchInst &operator*() { return SI; } | |||
3670 | operator SwitchInst *() { return &SI; } | |||
3671 | ||||
3672 | SwitchInstProfUpdateWrapper(SwitchInst &SI) : SI(SI) { init(); } | |||
3673 | ||||
3674 | ~SwitchInstProfUpdateWrapper() { | |||
3675 | if (Changed) | |||
3676 | SI.setMetadata(LLVMContext::MD_prof, buildProfBranchWeightsMD()); | |||
3677 | } | |||
3678 | ||||
3679 | /// Delegate the call to the underlying SwitchInst::removeCase() and remove | |||
3680 | /// correspondent branch weight. | |||
3681 | SwitchInst::CaseIt removeCase(SwitchInst::CaseIt I); | |||
3682 | ||||
3683 | /// Delegate the call to the underlying SwitchInst::addCase() and set the | |||
3684 | /// specified branch weight for the added case. | |||
3685 | void addCase(ConstantInt *OnVal, BasicBlock *Dest, CaseWeightOpt W); | |||
3686 | ||||
3687 | /// Delegate the call to the underlying SwitchInst::eraseFromParent() and mark | |||
3688 | /// this object to not touch the underlying SwitchInst in destructor. | |||
3689 | SymbolTableList<Instruction>::iterator eraseFromParent(); | |||
3690 | ||||
3691 | void setSuccessorWeight(unsigned idx, CaseWeightOpt W); | |||
3692 | CaseWeightOpt getSuccessorWeight(unsigned idx); | |||
3693 | ||||
3694 | static CaseWeightOpt getSuccessorWeight(const SwitchInst &SI, unsigned idx); | |||
3695 | }; | |||
3696 | ||||
3697 | template <> | |||
3698 | struct OperandTraits<SwitchInst> : public HungoffOperandTraits<2> { | |||
3699 | }; | |||
3700 | ||||
3701 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SwitchInst, Value)SwitchInst::op_iterator SwitchInst::op_begin() { return OperandTraits <SwitchInst>::op_begin(this); } SwitchInst::const_op_iterator SwitchInst::op_begin() const { return OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this)); } SwitchInst ::op_iterator SwitchInst::op_end() { return OperandTraits< SwitchInst>::op_end(this); } SwitchInst::const_op_iterator SwitchInst::op_end() const { return OperandTraits<SwitchInst >::op_end(const_cast<SwitchInst*>(this)); } Value *SwitchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<SwitchInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3701, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<SwitchInst >::op_begin(const_cast<SwitchInst*>(this))[i_nocapture ].get()); } void SwitchInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<SwitchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SwitchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3701, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<SwitchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned SwitchInst::getNumOperands() const { return OperandTraits<SwitchInst>::operands(this); } template <int Idx_nocapture> Use &SwitchInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &SwitchInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
3702 | ||||
3703 | //===----------------------------------------------------------------------===// | |||
3704 | // IndirectBrInst Class | |||
3705 | //===----------------------------------------------------------------------===// | |||
3706 | ||||
3707 | //===--------------------------------------------------------------------------- | |||
3708 | /// Indirect Branch Instruction. | |||
3709 | /// | |||
3710 | class IndirectBrInst : public Instruction { | |||
3711 | unsigned ReservedSpace; | |||
3712 | ||||
3713 | // Operand[0] = Address to jump to | |||
3714 | // Operand[n+1] = n-th destination | |||
3715 | IndirectBrInst(const IndirectBrInst &IBI); | |||
3716 | ||||
3717 | /// Create a new indirectbr instruction, specifying an | |||
3718 | /// Address to jump to. The number of expected destinations can be specified | |||
3719 | /// here to make memory allocation more efficient. This constructor can also | |||
3720 | /// autoinsert before another instruction. | |||
3721 | IndirectBrInst(Value *Address, unsigned NumDests, Instruction *InsertBefore); | |||
3722 | ||||
3723 | /// Create a new indirectbr instruction, specifying an | |||
3724 | /// Address to jump to. The number of expected destinations can be specified | |||
3725 | /// here to make memory allocation more efficient. This constructor also | |||
3726 | /// autoinserts at the end of the specified BasicBlock. | |||
3727 | IndirectBrInst(Value *Address, unsigned NumDests, BasicBlock *InsertAtEnd); | |||
3728 | ||||
3729 | // allocate space for exactly zero operands | |||
3730 | void *operator new(size_t S) { return User::operator new(S); } | |||
3731 | ||||
3732 | void init(Value *Address, unsigned NumDests); | |||
3733 | void growOperands(); | |||
3734 | ||||
3735 | protected: | |||
3736 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3737 | friend class Instruction; | |||
3738 | ||||
3739 | IndirectBrInst *cloneImpl() const; | |||
3740 | ||||
3741 | public: | |||
3742 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
3743 | ||||
3744 | /// Iterator type that casts an operand to a basic block. | |||
3745 | /// | |||
3746 | /// This only makes sense because the successors are stored as adjacent | |||
3747 | /// operands for indirectbr instructions. | |||
3748 | struct succ_op_iterator | |||
3749 | : iterator_adaptor_base<succ_op_iterator, value_op_iterator, | |||
3750 | std::random_access_iterator_tag, BasicBlock *, | |||
3751 | ptrdiff_t, BasicBlock *, BasicBlock *> { | |||
3752 | explicit succ_op_iterator(value_op_iterator I) : iterator_adaptor_base(I) {} | |||
3753 | ||||
3754 | BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3755 | BasicBlock *operator->() const { return operator*(); } | |||
3756 | }; | |||
3757 | ||||
3758 | /// The const version of `succ_op_iterator`. | |||
3759 | struct const_succ_op_iterator | |||
3760 | : iterator_adaptor_base<const_succ_op_iterator, const_value_op_iterator, | |||
3761 | std::random_access_iterator_tag, | |||
3762 | const BasicBlock *, ptrdiff_t, const BasicBlock *, | |||
3763 | const BasicBlock *> { | |||
3764 | explicit const_succ_op_iterator(const_value_op_iterator I) | |||
3765 | : iterator_adaptor_base(I) {} | |||
3766 | ||||
3767 | const BasicBlock *operator*() const { return cast<BasicBlock>(*I); } | |||
3768 | const BasicBlock *operator->() const { return operator*(); } | |||
3769 | }; | |||
3770 | ||||
3771 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, | |||
3772 | Instruction *InsertBefore = nullptr) { | |||
3773 | return new IndirectBrInst(Address, NumDests, InsertBefore); | |||
3774 | } | |||
3775 | ||||
3776 | static IndirectBrInst *Create(Value *Address, unsigned NumDests, | |||
3777 | BasicBlock *InsertAtEnd) { | |||
3778 | return new IndirectBrInst(Address, NumDests, InsertAtEnd); | |||
3779 | } | |||
3780 | ||||
3781 | /// Provide fast operand accessors. | |||
3782 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
3783 | ||||
3784 | // Accessor Methods for IndirectBrInst instruction. | |||
3785 | Value *getAddress() { return getOperand(0); } | |||
3786 | const Value *getAddress() const { return getOperand(0); } | |||
3787 | void setAddress(Value *V) { setOperand(0, V); } | |||
3788 | ||||
3789 | /// return the number of possible destinations in this | |||
3790 | /// indirectbr instruction. | |||
3791 | unsigned getNumDestinations() const { return getNumOperands()-1; } | |||
3792 | ||||
3793 | /// Return the specified destination. | |||
3794 | BasicBlock *getDestination(unsigned i) { return getSuccessor(i); } | |||
3795 | const BasicBlock *getDestination(unsigned i) const { return getSuccessor(i); } | |||
3796 | ||||
3797 | /// Add a destination. | |||
3798 | /// | |||
3799 | void addDestination(BasicBlock *Dest); | |||
3800 | ||||
3801 | /// This method removes the specified successor from the | |||
3802 | /// indirectbr instruction. | |||
3803 | void removeDestination(unsigned i); | |||
3804 | ||||
3805 | unsigned getNumSuccessors() const { return getNumOperands()-1; } | |||
3806 | BasicBlock *getSuccessor(unsigned i) const { | |||
3807 | return cast<BasicBlock>(getOperand(i+1)); | |||
3808 | } | |||
3809 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { | |||
3810 | setOperand(i + 1, NewSucc); | |||
3811 | } | |||
3812 | ||||
3813 | iterator_range<succ_op_iterator> successors() { | |||
3814 | return make_range(succ_op_iterator(std::next(value_op_begin())), | |||
3815 | succ_op_iterator(value_op_end())); | |||
3816 | } | |||
3817 | ||||
3818 | iterator_range<const_succ_op_iterator> successors() const { | |||
3819 | return make_range(const_succ_op_iterator(std::next(value_op_begin())), | |||
3820 | const_succ_op_iterator(value_op_end())); | |||
3821 | } | |||
3822 | ||||
3823 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
3824 | static bool classof(const Instruction *I) { | |||
3825 | return I->getOpcode() == Instruction::IndirectBr; | |||
3826 | } | |||
3827 | static bool classof(const Value *V) { | |||
3828 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
3829 | } | |||
3830 | }; | |||
3831 | ||||
3832 | template <> | |||
3833 | struct OperandTraits<IndirectBrInst> : public HungoffOperandTraits<1> { | |||
3834 | }; | |||
3835 | ||||
3836 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(IndirectBrInst, Value)IndirectBrInst::op_iterator IndirectBrInst::op_begin() { return OperandTraits<IndirectBrInst>::op_begin(this); } IndirectBrInst ::const_op_iterator IndirectBrInst::op_begin() const { return OperandTraits<IndirectBrInst>::op_begin(const_cast< IndirectBrInst*>(this)); } IndirectBrInst::op_iterator IndirectBrInst ::op_end() { return OperandTraits<IndirectBrInst>::op_end (this); } IndirectBrInst::const_op_iterator IndirectBrInst::op_end () const { return OperandTraits<IndirectBrInst>::op_end (const_cast<IndirectBrInst*>(this)); } Value *IndirectBrInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<IndirectBrInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3836, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<IndirectBrInst >::op_begin(const_cast<IndirectBrInst*>(this))[i_nocapture ].get()); } void IndirectBrInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<IndirectBrInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 3836, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<IndirectBrInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned IndirectBrInst::getNumOperands( ) const { return OperandTraits<IndirectBrInst>::operands (this); } template <int Idx_nocapture> Use &IndirectBrInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &IndirectBrInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
3837 | ||||
3838 | //===----------------------------------------------------------------------===// | |||
3839 | // InvokeInst Class | |||
3840 | //===----------------------------------------------------------------------===// | |||
3841 | ||||
3842 | /// Invoke instruction. The SubclassData field is used to hold the | |||
3843 | /// calling convention of the call. | |||
3844 | /// | |||
3845 | class InvokeInst : public CallBase { | |||
3846 | /// The number of operands for this call beyond the called function, | |||
3847 | /// arguments, and operand bundles. | |||
3848 | static constexpr int NumExtraOperands = 2; | |||
3849 | ||||
3850 | /// The index from the end of the operand array to the normal destination. | |||
3851 | static constexpr int NormalDestOpEndIdx = -3; | |||
3852 | ||||
3853 | /// The index from the end of the operand array to the unwind destination. | |||
3854 | static constexpr int UnwindDestOpEndIdx = -2; | |||
3855 | ||||
3856 | InvokeInst(const InvokeInst &BI); | |||
3857 | ||||
3858 | /// Construct an InvokeInst given a range of arguments. | |||
3859 | /// | |||
3860 | /// Construct an InvokeInst from a range of arguments | |||
3861 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3862 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3863 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3864 | const Twine &NameStr, Instruction *InsertBefore); | |||
3865 | ||||
3866 | inline InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3867 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3868 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
3869 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
3870 | ||||
3871 | void init(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3872 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3873 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); | |||
3874 | ||||
3875 | /// Compute the number of operands to allocate. | |||
3876 | static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) { | |||
3877 | // We need one operand for the called function, plus our extra operands and | |||
3878 | // the input operand counts provided. | |||
3879 | return 1 + NumExtraOperands + NumArgs + NumBundleInputs; | |||
3880 | } | |||
3881 | ||||
3882 | protected: | |||
3883 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
3884 | friend class Instruction; | |||
3885 | ||||
3886 | InvokeInst *cloneImpl() const; | |||
3887 | ||||
3888 | public: | |||
3889 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3890 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3891 | const Twine &NameStr, | |||
3892 | Instruction *InsertBefore = nullptr) { | |||
3893 | int NumOperands = ComputeNumOperands(Args.size()); | |||
3894 | return new (NumOperands) | |||
3895 | InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, | |||
3896 | NumOperands, NameStr, InsertBefore); | |||
3897 | } | |||
3898 | ||||
3899 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3900 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3901 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
3902 | const Twine &NameStr = "", | |||
3903 | Instruction *InsertBefore = nullptr) { | |||
3904 | int NumOperands = | |||
3905 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
3906 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
3907 | ||||
3908 | return new (NumOperands, DescriptorBytes) | |||
3909 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, | |||
3910 | NameStr, InsertBefore); | |||
3911 | } | |||
3912 | ||||
3913 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3914 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3915 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3916 | int NumOperands = ComputeNumOperands(Args.size()); | |||
3917 | return new (NumOperands) | |||
3918 | InvokeInst(Ty, Func, IfNormal, IfException, Args, std::nullopt, | |||
3919 | NumOperands, NameStr, InsertAtEnd); | |||
3920 | } | |||
3921 | ||||
3922 | static InvokeInst *Create(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
3923 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3924 | ArrayRef<OperandBundleDef> Bundles, | |||
3925 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3926 | int NumOperands = | |||
3927 | ComputeNumOperands(Args.size(), CountBundleInputs(Bundles)); | |||
3928 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
3929 | ||||
3930 | return new (NumOperands, DescriptorBytes) | |||
3931 | InvokeInst(Ty, Func, IfNormal, IfException, Args, Bundles, NumOperands, | |||
3932 | NameStr, InsertAtEnd); | |||
3933 | } | |||
3934 | ||||
3935 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3936 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3937 | const Twine &NameStr, | |||
3938 | Instruction *InsertBefore = nullptr) { | |||
3939 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3940 | IfException, Args, std::nullopt, NameStr, InsertBefore); | |||
3941 | } | |||
3942 | ||||
3943 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3944 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3945 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
3946 | const Twine &NameStr = "", | |||
3947 | Instruction *InsertBefore = nullptr) { | |||
3948 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3949 | IfException, Args, Bundles, NameStr, InsertBefore); | |||
3950 | } | |||
3951 | ||||
3952 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3953 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3954 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3955 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3956 | IfException, Args, NameStr, InsertAtEnd); | |||
3957 | } | |||
3958 | ||||
3959 | static InvokeInst *Create(FunctionCallee Func, BasicBlock *IfNormal, | |||
3960 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
3961 | ArrayRef<OperandBundleDef> Bundles, | |||
3962 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
3963 | return Create(Func.getFunctionType(), Func.getCallee(), IfNormal, | |||
3964 | IfException, Args, Bundles, NameStr, InsertAtEnd); | |||
3965 | } | |||
3966 | ||||
3967 | /// Create a clone of \p II with a different set of operand bundles and | |||
3968 | /// insert it before \p InsertPt. | |||
3969 | /// | |||
3970 | /// The returned invoke instruction is identical to \p II in every way except | |||
3971 | /// that the operand bundles for the new instruction are set to the operand | |||
3972 | /// bundles in \p Bundles. | |||
3973 | static InvokeInst *Create(InvokeInst *II, ArrayRef<OperandBundleDef> Bundles, | |||
3974 | Instruction *InsertPt = nullptr); | |||
3975 | ||||
3976 | // get*Dest - Return the destination basic blocks... | |||
3977 | BasicBlock *getNormalDest() const { | |||
3978 | return cast<BasicBlock>(Op<NormalDestOpEndIdx>()); | |||
3979 | } | |||
3980 | BasicBlock *getUnwindDest() const { | |||
3981 | return cast<BasicBlock>(Op<UnwindDestOpEndIdx>()); | |||
3982 | } | |||
3983 | void setNormalDest(BasicBlock *B) { | |||
3984 | Op<NormalDestOpEndIdx>() = reinterpret_cast<Value *>(B); | |||
3985 | } | |||
3986 | void setUnwindDest(BasicBlock *B) { | |||
3987 | Op<UnwindDestOpEndIdx>() = reinterpret_cast<Value *>(B); | |||
3988 | } | |||
3989 | ||||
3990 | /// Get the landingpad instruction from the landing pad | |||
3991 | /// block (the unwind destination). | |||
3992 | LandingPadInst *getLandingPadInst() const; | |||
3993 | ||||
3994 | BasicBlock *getSuccessor(unsigned i) const { | |||
3995 | assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!" ) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "llvm/include/llvm/IR/Instructions.h", 3995, __extension__ __PRETTY_FUNCTION__ )); | |||
3996 | return i == 0 ? getNormalDest() : getUnwindDest(); | |||
3997 | } | |||
3998 | ||||
3999 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { | |||
4000 | assert(i < 2 && "Successor # out of range for invoke!")(static_cast <bool> (i < 2 && "Successor # out of range for invoke!" ) ? void (0) : __assert_fail ("i < 2 && \"Successor # out of range for invoke!\"" , "llvm/include/llvm/IR/Instructions.h", 4000, __extension__ __PRETTY_FUNCTION__ )); | |||
4001 | if (i == 0) | |||
4002 | setNormalDest(NewSucc); | |||
4003 | else | |||
4004 | setUnwindDest(NewSucc); | |||
4005 | } | |||
4006 | ||||
4007 | unsigned getNumSuccessors() const { return 2; } | |||
4008 | ||||
4009 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4010 | static bool classof(const Instruction *I) { | |||
4011 | return (I->getOpcode() == Instruction::Invoke); | |||
4012 | } | |||
4013 | static bool classof(const Value *V) { | |||
4014 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4015 | } | |||
4016 | ||||
4017 | private: | |||
4018 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
4019 | // method so that subclasses cannot accidentally use it. | |||
4020 | template <typename Bitfield> | |||
4021 | void setSubclassData(typename Bitfield::Type Value) { | |||
4022 | Instruction::setSubclassData<Bitfield>(Value); | |||
4023 | } | |||
4024 | }; | |||
4025 | ||||
4026 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
4027 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
4028 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4029 | const Twine &NameStr, Instruction *InsertBefore) | |||
4030 | : CallBase(Ty->getReturnType(), Instruction::Invoke, | |||
4031 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4032 | InsertBefore) { | |||
4033 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); | |||
4034 | } | |||
4035 | ||||
4036 | InvokeInst::InvokeInst(FunctionType *Ty, Value *Func, BasicBlock *IfNormal, | |||
4037 | BasicBlock *IfException, ArrayRef<Value *> Args, | |||
4038 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4039 | const Twine &NameStr, BasicBlock *InsertAtEnd) | |||
4040 | : CallBase(Ty->getReturnType(), Instruction::Invoke, | |||
4041 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4042 | InsertAtEnd) { | |||
4043 | init(Ty, Func, IfNormal, IfException, Args, Bundles, NameStr); | |||
4044 | } | |||
4045 | ||||
4046 | //===----------------------------------------------------------------------===// | |||
4047 | // CallBrInst Class | |||
4048 | //===----------------------------------------------------------------------===// | |||
4049 | ||||
4050 | /// CallBr instruction, tracking function calls that may not return control but | |||
4051 | /// instead transfer it to a third location. The SubclassData field is used to | |||
4052 | /// hold the calling convention of the call. | |||
4053 | /// | |||
4054 | class CallBrInst : public CallBase { | |||
4055 | ||||
4056 | unsigned NumIndirectDests; | |||
4057 | ||||
4058 | CallBrInst(const CallBrInst &BI); | |||
4059 | ||||
4060 | /// Construct a CallBrInst given a range of arguments. | |||
4061 | /// | |||
4062 | /// Construct a CallBrInst from a range of arguments | |||
4063 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4064 | ArrayRef<BasicBlock *> IndirectDests, | |||
4065 | ArrayRef<Value *> Args, | |||
4066 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4067 | const Twine &NameStr, Instruction *InsertBefore); | |||
4068 | ||||
4069 | inline CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4070 | ArrayRef<BasicBlock *> IndirectDests, | |||
4071 | ArrayRef<Value *> Args, | |||
4072 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4073 | const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
4074 | ||||
4075 | void init(FunctionType *FTy, Value *Func, BasicBlock *DefaultDest, | |||
4076 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, | |||
4077 | ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr); | |||
4078 | ||||
4079 | /// Compute the number of operands to allocate. | |||
4080 | static int ComputeNumOperands(int NumArgs, int NumIndirectDests, | |||
4081 | int NumBundleInputs = 0) { | |||
4082 | // We need one operand for the called function, plus our extra operands and | |||
4083 | // the input operand counts provided. | |||
4084 | return 2 + NumIndirectDests + NumArgs + NumBundleInputs; | |||
4085 | } | |||
4086 | ||||
4087 | protected: | |||
4088 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4089 | friend class Instruction; | |||
4090 | ||||
4091 | CallBrInst *cloneImpl() const; | |||
4092 | ||||
4093 | public: | |||
4094 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4095 | BasicBlock *DefaultDest, | |||
4096 | ArrayRef<BasicBlock *> IndirectDests, | |||
4097 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4098 | Instruction *InsertBefore = nullptr) { | |||
4099 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); | |||
4100 | return new (NumOperands) | |||
4101 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, | |||
4102 | NumOperands, NameStr, InsertBefore); | |||
4103 | } | |||
4104 | ||||
4105 | static CallBrInst * | |||
4106 | Create(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4107 | ArrayRef<BasicBlock *> IndirectDests, ArrayRef<Value *> Args, | |||
4108 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
4109 | const Twine &NameStr = "", Instruction *InsertBefore = nullptr) { | |||
4110 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), | |||
4111 | CountBundleInputs(Bundles)); | |||
4112 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
4113 | ||||
4114 | return new (NumOperands, DescriptorBytes) | |||
4115 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, | |||
4116 | NumOperands, NameStr, InsertBefore); | |||
4117 | } | |||
4118 | ||||
4119 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4120 | BasicBlock *DefaultDest, | |||
4121 | ArrayRef<BasicBlock *> IndirectDests, | |||
4122 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4123 | BasicBlock *InsertAtEnd) { | |||
4124 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size()); | |||
4125 | return new (NumOperands) | |||
4126 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, std::nullopt, | |||
4127 | NumOperands, NameStr, InsertAtEnd); | |||
4128 | } | |||
4129 | ||||
4130 | static CallBrInst *Create(FunctionType *Ty, Value *Func, | |||
4131 | BasicBlock *DefaultDest, | |||
4132 | ArrayRef<BasicBlock *> IndirectDests, | |||
4133 | ArrayRef<Value *> Args, | |||
4134 | ArrayRef<OperandBundleDef> Bundles, | |||
4135 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4136 | int NumOperands = ComputeNumOperands(Args.size(), IndirectDests.size(), | |||
4137 | CountBundleInputs(Bundles)); | |||
4138 | unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo); | |||
4139 | ||||
4140 | return new (NumOperands, DescriptorBytes) | |||
4141 | CallBrInst(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, | |||
4142 | NumOperands, NameStr, InsertAtEnd); | |||
4143 | } | |||
4144 | ||||
4145 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, | |||
4146 | ArrayRef<BasicBlock *> IndirectDests, | |||
4147 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4148 | Instruction *InsertBefore = nullptr) { | |||
4149 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4150 | IndirectDests, Args, NameStr, InsertBefore); | |||
4151 | } | |||
4152 | ||||
4153 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, | |||
4154 | ArrayRef<BasicBlock *> IndirectDests, | |||
4155 | ArrayRef<Value *> Args, | |||
4156 | ArrayRef<OperandBundleDef> Bundles = std::nullopt, | |||
4157 | const Twine &NameStr = "", | |||
4158 | Instruction *InsertBefore = nullptr) { | |||
4159 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4160 | IndirectDests, Args, Bundles, NameStr, InsertBefore); | |||
4161 | } | |||
4162 | ||||
4163 | static CallBrInst *Create(FunctionCallee Func, BasicBlock *DefaultDest, | |||
4164 | ArrayRef<BasicBlock *> IndirectDests, | |||
4165 | ArrayRef<Value *> Args, const Twine &NameStr, | |||
4166 | BasicBlock *InsertAtEnd) { | |||
4167 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4168 | IndirectDests, Args, NameStr, InsertAtEnd); | |||
4169 | } | |||
4170 | ||||
4171 | static CallBrInst *Create(FunctionCallee Func, | |||
4172 | BasicBlock *DefaultDest, | |||
4173 | ArrayRef<BasicBlock *> IndirectDests, | |||
4174 | ArrayRef<Value *> Args, | |||
4175 | ArrayRef<OperandBundleDef> Bundles, | |||
4176 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4177 | return Create(Func.getFunctionType(), Func.getCallee(), DefaultDest, | |||
4178 | IndirectDests, Args, Bundles, NameStr, InsertAtEnd); | |||
4179 | } | |||
4180 | ||||
4181 | /// Create a clone of \p CBI with a different set of operand bundles and | |||
4182 | /// insert it before \p InsertPt. | |||
4183 | /// | |||
4184 | /// The returned callbr instruction is identical to \p CBI in every way | |||
4185 | /// except that the operand bundles for the new instruction are set to the | |||
4186 | /// operand bundles in \p Bundles. | |||
4187 | static CallBrInst *Create(CallBrInst *CBI, | |||
4188 | ArrayRef<OperandBundleDef> Bundles, | |||
4189 | Instruction *InsertPt = nullptr); | |||
4190 | ||||
4191 | /// Return the number of callbr indirect dest labels. | |||
4192 | /// | |||
4193 | unsigned getNumIndirectDests() const { return NumIndirectDests; } | |||
4194 | ||||
4195 | /// getIndirectDestLabel - Return the i-th indirect dest label. | |||
4196 | /// | |||
4197 | Value *getIndirectDestLabel(unsigned i) const { | |||
4198 | assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() && "Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "llvm/include/llvm/IR/Instructions.h", 4198, __extension__ __PRETTY_FUNCTION__ )); | |||
4199 | return getOperand(i + arg_size() + getNumTotalBundleOperands() + 1); | |||
4200 | } | |||
4201 | ||||
4202 | Value *getIndirectDestLabelUse(unsigned i) const { | |||
4203 | assert(i < getNumIndirectDests() && "Out of bounds!")(static_cast <bool> (i < getNumIndirectDests() && "Out of bounds!") ? void (0) : __assert_fail ("i < getNumIndirectDests() && \"Out of bounds!\"" , "llvm/include/llvm/IR/Instructions.h", 4203, __extension__ __PRETTY_FUNCTION__ )); | |||
4204 | return getOperandUse(i + arg_size() + getNumTotalBundleOperands() + 1); | |||
4205 | } | |||
4206 | ||||
4207 | // Return the destination basic blocks... | |||
4208 | BasicBlock *getDefaultDest() const { | |||
4209 | return cast<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() - 1)); | |||
4210 | } | |||
4211 | BasicBlock *getIndirectDest(unsigned i) const { | |||
4212 | return cast_or_null<BasicBlock>(*(&Op<-1>() - getNumIndirectDests() + i)); | |||
4213 | } | |||
4214 | SmallVector<BasicBlock *, 16> getIndirectDests() const { | |||
4215 | SmallVector<BasicBlock *, 16> IndirectDests; | |||
4216 | for (unsigned i = 0, e = getNumIndirectDests(); i < e; ++i) | |||
4217 | IndirectDests.push_back(getIndirectDest(i)); | |||
4218 | return IndirectDests; | |||
4219 | } | |||
4220 | void setDefaultDest(BasicBlock *B) { | |||
4221 | *(&Op<-1>() - getNumIndirectDests() - 1) = reinterpret_cast<Value *>(B); | |||
4222 | } | |||
4223 | void setIndirectDest(unsigned i, BasicBlock *B) { | |||
4224 | *(&Op<-1>() - getNumIndirectDests() + i) = reinterpret_cast<Value *>(B); | |||
4225 | } | |||
4226 | ||||
4227 | BasicBlock *getSuccessor(unsigned i) const { | |||
4228 | assert(i < getNumSuccessors() + 1 &&(static_cast <bool> (i < getNumSuccessors() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4229, __extension__ __PRETTY_FUNCTION__ )) | |||
4229 | "Successor # out of range for callbr!")(static_cast <bool> (i < getNumSuccessors() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumSuccessors() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4229, __extension__ __PRETTY_FUNCTION__ )); | |||
4230 | return i == 0 ? getDefaultDest() : getIndirectDest(i - 1); | |||
4231 | } | |||
4232 | ||||
4233 | void setSuccessor(unsigned i, BasicBlock *NewSucc) { | |||
4234 | assert(i < getNumIndirectDests() + 1 &&(static_cast <bool> (i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4235, __extension__ __PRETTY_FUNCTION__ )) | |||
4235 | "Successor # out of range for callbr!")(static_cast <bool> (i < getNumIndirectDests() + 1 && "Successor # out of range for callbr!") ? void (0) : __assert_fail ("i < getNumIndirectDests() + 1 && \"Successor # out of range for callbr!\"" , "llvm/include/llvm/IR/Instructions.h", 4235, __extension__ __PRETTY_FUNCTION__ )); | |||
4236 | return i == 0 ? setDefaultDest(NewSucc) : setIndirectDest(i - 1, NewSucc); | |||
4237 | } | |||
4238 | ||||
4239 | unsigned getNumSuccessors() const { return getNumIndirectDests() + 1; } | |||
4240 | ||||
4241 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4242 | static bool classof(const Instruction *I) { | |||
4243 | return (I->getOpcode() == Instruction::CallBr); | |||
4244 | } | |||
4245 | static bool classof(const Value *V) { | |||
4246 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4247 | } | |||
4248 | ||||
4249 | private: | |||
4250 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
4251 | // method so that subclasses cannot accidentally use it. | |||
4252 | template <typename Bitfield> | |||
4253 | void setSubclassData(typename Bitfield::Type Value) { | |||
4254 | Instruction::setSubclassData<Bitfield>(Value); | |||
4255 | } | |||
4256 | }; | |||
4257 | ||||
4258 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4259 | ArrayRef<BasicBlock *> IndirectDests, | |||
4260 | ArrayRef<Value *> Args, | |||
4261 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4262 | const Twine &NameStr, Instruction *InsertBefore) | |||
4263 | : CallBase(Ty->getReturnType(), Instruction::CallBr, | |||
4264 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4265 | InsertBefore) { | |||
4266 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); | |||
4267 | } | |||
4268 | ||||
4269 | CallBrInst::CallBrInst(FunctionType *Ty, Value *Func, BasicBlock *DefaultDest, | |||
4270 | ArrayRef<BasicBlock *> IndirectDests, | |||
4271 | ArrayRef<Value *> Args, | |||
4272 | ArrayRef<OperandBundleDef> Bundles, int NumOperands, | |||
4273 | const Twine &NameStr, BasicBlock *InsertAtEnd) | |||
4274 | : CallBase(Ty->getReturnType(), Instruction::CallBr, | |||
4275 | OperandTraits<CallBase>::op_end(this) - NumOperands, NumOperands, | |||
4276 | InsertAtEnd) { | |||
4277 | init(Ty, Func, DefaultDest, IndirectDests, Args, Bundles, NameStr); | |||
4278 | } | |||
4279 | ||||
4280 | //===----------------------------------------------------------------------===// | |||
4281 | // ResumeInst Class | |||
4282 | //===----------------------------------------------------------------------===// | |||
4283 | ||||
4284 | //===--------------------------------------------------------------------------- | |||
4285 | /// Resume the propagation of an exception. | |||
4286 | /// | |||
4287 | class ResumeInst : public Instruction { | |||
4288 | ResumeInst(const ResumeInst &RI); | |||
4289 | ||||
4290 | explicit ResumeInst(Value *Exn, Instruction *InsertBefore=nullptr); | |||
4291 | ResumeInst(Value *Exn, BasicBlock *InsertAtEnd); | |||
4292 | ||||
4293 | protected: | |||
4294 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4295 | friend class Instruction; | |||
4296 | ||||
4297 | ResumeInst *cloneImpl() const; | |||
4298 | ||||
4299 | public: | |||
4300 | static ResumeInst *Create(Value *Exn, Instruction *InsertBefore = nullptr) { | |||
4301 | return new(1) ResumeInst(Exn, InsertBefore); | |||
4302 | } | |||
4303 | ||||
4304 | static ResumeInst *Create(Value *Exn, BasicBlock *InsertAtEnd) { | |||
4305 | return new(1) ResumeInst(Exn, InsertAtEnd); | |||
4306 | } | |||
4307 | ||||
4308 | /// Provide fast operand accessors | |||
4309 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4310 | ||||
4311 | /// Convenience accessor. | |||
4312 | Value *getValue() const { return Op<0>(); } | |||
4313 | ||||
4314 | unsigned getNumSuccessors() const { return 0; } | |||
4315 | ||||
4316 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4317 | static bool classof(const Instruction *I) { | |||
4318 | return I->getOpcode() == Instruction::Resume; | |||
4319 | } | |||
4320 | static bool classof(const Value *V) { | |||
4321 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4322 | } | |||
4323 | ||||
4324 | private: | |||
4325 | BasicBlock *getSuccessor(unsigned idx) const { | |||
4326 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4326); | |||
4327 | } | |||
4328 | ||||
4329 | void setSuccessor(unsigned idx, BasicBlock *NewSucc) { | |||
4330 | llvm_unreachable("ResumeInst has no successors!")::llvm::llvm_unreachable_internal("ResumeInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4330); | |||
4331 | } | |||
4332 | }; | |||
4333 | ||||
4334 | template <> | |||
4335 | struct OperandTraits<ResumeInst> : | |||
4336 | public FixedNumOperandTraits<ResumeInst, 1> { | |||
4337 | }; | |||
4338 | ||||
4339 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ResumeInst, Value)ResumeInst::op_iterator ResumeInst::op_begin() { return OperandTraits <ResumeInst>::op_begin(this); } ResumeInst::const_op_iterator ResumeInst::op_begin() const { return OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this)); } ResumeInst ::op_iterator ResumeInst::op_end() { return OperandTraits< ResumeInst>::op_end(this); } ResumeInst::const_op_iterator ResumeInst::op_end() const { return OperandTraits<ResumeInst >::op_end(const_cast<ResumeInst*>(this)); } Value *ResumeInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<ResumeInst>::operands (this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4339, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<ResumeInst >::op_begin(const_cast<ResumeInst*>(this))[i_nocapture ].get()); } void ResumeInst::setOperand(unsigned i_nocapture, Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<ResumeInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ResumeInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4339, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<ResumeInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned ResumeInst::getNumOperands() const { return OperandTraits<ResumeInst>::operands(this); } template <int Idx_nocapture> Use &ResumeInst::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &ResumeInst::Op() const { return this->OpFrom<Idx_nocapture>(this); } | |||
4340 | ||||
4341 | //===----------------------------------------------------------------------===// | |||
4342 | // CatchSwitchInst Class | |||
4343 | //===----------------------------------------------------------------------===// | |||
4344 | class CatchSwitchInst : public Instruction { | |||
4345 | using UnwindDestField = BoolBitfieldElementT<0>; | |||
4346 | ||||
4347 | /// The number of operands actually allocated. NumOperands is | |||
4348 | /// the number actually in use. | |||
4349 | unsigned ReservedSpace; | |||
4350 | ||||
4351 | // Operand[0] = Outer scope | |||
4352 | // Operand[1] = Unwind block destination | |||
4353 | // Operand[n] = BasicBlock to go to on match | |||
4354 | CatchSwitchInst(const CatchSwitchInst &CSI); | |||
4355 | ||||
4356 | /// Create a new switch instruction, specifying a | |||
4357 | /// default destination. The number of additional handlers can be specified | |||
4358 | /// here to make memory allocation more efficient. | |||
4359 | /// This constructor can also autoinsert before another instruction. | |||
4360 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, | |||
4361 | unsigned NumHandlers, const Twine &NameStr, | |||
4362 | Instruction *InsertBefore); | |||
4363 | ||||
4364 | /// Create a new switch instruction, specifying a | |||
4365 | /// default destination. The number of additional handlers can be specified | |||
4366 | /// here to make memory allocation more efficient. | |||
4367 | /// This constructor also autoinserts at the end of the specified BasicBlock. | |||
4368 | CatchSwitchInst(Value *ParentPad, BasicBlock *UnwindDest, | |||
4369 | unsigned NumHandlers, const Twine &NameStr, | |||
4370 | BasicBlock *InsertAtEnd); | |||
4371 | ||||
4372 | // allocate space for exactly zero operands | |||
4373 | void *operator new(size_t S) { return User::operator new(S); } | |||
4374 | ||||
4375 | void init(Value *ParentPad, BasicBlock *UnwindDest, unsigned NumReserved); | |||
4376 | void growOperands(unsigned Size); | |||
4377 | ||||
4378 | protected: | |||
4379 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4380 | friend class Instruction; | |||
4381 | ||||
4382 | CatchSwitchInst *cloneImpl() const; | |||
4383 | ||||
4384 | public: | |||
4385 | void operator delete(void *Ptr) { return User::operator delete(Ptr); } | |||
4386 | ||||
4387 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, | |||
4388 | unsigned NumHandlers, | |||
4389 | const Twine &NameStr = "", | |||
4390 | Instruction *InsertBefore = nullptr) { | |||
4391 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, | |||
4392 | InsertBefore); | |||
4393 | } | |||
4394 | ||||
4395 | static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest, | |||
4396 | unsigned NumHandlers, const Twine &NameStr, | |||
4397 | BasicBlock *InsertAtEnd) { | |||
4398 | return new CatchSwitchInst(ParentPad, UnwindDest, NumHandlers, NameStr, | |||
4399 | InsertAtEnd); | |||
4400 | } | |||
4401 | ||||
4402 | /// Provide fast operand accessors | |||
4403 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4404 | ||||
4405 | // Accessor Methods for CatchSwitch stmt | |||
4406 | Value *getParentPad() const { return getOperand(0); } | |||
4407 | void setParentPad(Value *ParentPad) { setOperand(0, ParentPad); } | |||
4408 | ||||
4409 | // Accessor Methods for CatchSwitch stmt | |||
4410 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } | |||
4411 | bool unwindsToCaller() const { return !hasUnwindDest(); } | |||
4412 | BasicBlock *getUnwindDest() const { | |||
4413 | if (hasUnwindDest()) | |||
4414 | return cast<BasicBlock>(getOperand(1)); | |||
4415 | return nullptr; | |||
4416 | } | |||
4417 | void setUnwindDest(BasicBlock *UnwindDest) { | |||
4418 | assert(UnwindDest)(static_cast <bool> (UnwindDest) ? void (0) : __assert_fail ("UnwindDest", "llvm/include/llvm/IR/Instructions.h", 4418, __extension__ __PRETTY_FUNCTION__)); | |||
4419 | assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail ("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4419 , __extension__ __PRETTY_FUNCTION__)); | |||
4420 | setOperand(1, UnwindDest); | |||
4421 | } | |||
4422 | ||||
4423 | /// return the number of 'handlers' in this catchswitch | |||
4424 | /// instruction, except the default handler | |||
4425 | unsigned getNumHandlers() const { | |||
4426 | if (hasUnwindDest()) | |||
4427 | return getNumOperands() - 2; | |||
4428 | return getNumOperands() - 1; | |||
4429 | } | |||
4430 | ||||
4431 | private: | |||
4432 | static BasicBlock *handler_helper(Value *V) { return cast<BasicBlock>(V); } | |||
4433 | static const BasicBlock *handler_helper(const Value *V) { | |||
4434 | return cast<BasicBlock>(V); | |||
4435 | } | |||
4436 | ||||
4437 | public: | |||
4438 | using DerefFnTy = BasicBlock *(*)(Value *); | |||
4439 | using handler_iterator = mapped_iterator<op_iterator, DerefFnTy>; | |||
4440 | using handler_range = iterator_range<handler_iterator>; | |||
4441 | using ConstDerefFnTy = const BasicBlock *(*)(const Value *); | |||
4442 | using const_handler_iterator = | |||
4443 | mapped_iterator<const_op_iterator, ConstDerefFnTy>; | |||
4444 | using const_handler_range = iterator_range<const_handler_iterator>; | |||
4445 | ||||
4446 | /// Returns an iterator that points to the first handler in CatchSwitchInst. | |||
4447 | handler_iterator handler_begin() { | |||
4448 | op_iterator It = op_begin() + 1; | |||
4449 | if (hasUnwindDest()) | |||
4450 | ++It; | |||
4451 | return handler_iterator(It, DerefFnTy(handler_helper)); | |||
4452 | } | |||
4453 | ||||
4454 | /// Returns an iterator that points to the first handler in the | |||
4455 | /// CatchSwitchInst. | |||
4456 | const_handler_iterator handler_begin() const { | |||
4457 | const_op_iterator It = op_begin() + 1; | |||
4458 | if (hasUnwindDest()) | |||
4459 | ++It; | |||
4460 | return const_handler_iterator(It, ConstDerefFnTy(handler_helper)); | |||
4461 | } | |||
4462 | ||||
4463 | /// Returns a read-only iterator that points one past the last | |||
4464 | /// handler in the CatchSwitchInst. | |||
4465 | handler_iterator handler_end() { | |||
4466 | return handler_iterator(op_end(), DerefFnTy(handler_helper)); | |||
4467 | } | |||
4468 | ||||
4469 | /// Returns an iterator that points one past the last handler in the | |||
4470 | /// CatchSwitchInst. | |||
4471 | const_handler_iterator handler_end() const { | |||
4472 | return const_handler_iterator(op_end(), ConstDerefFnTy(handler_helper)); | |||
4473 | } | |||
4474 | ||||
4475 | /// iteration adapter for range-for loops. | |||
4476 | handler_range handlers() { | |||
4477 | return make_range(handler_begin(), handler_end()); | |||
4478 | } | |||
4479 | ||||
4480 | /// iteration adapter for range-for loops. | |||
4481 | const_handler_range handlers() const { | |||
4482 | return make_range(handler_begin(), handler_end()); | |||
4483 | } | |||
4484 | ||||
4485 | /// Add an entry to the switch instruction... | |||
4486 | /// Note: | |||
4487 | /// This action invalidates handler_end(). Old handler_end() iterator will | |||
4488 | /// point to the added handler. | |||
4489 | void addHandler(BasicBlock *Dest); | |||
4490 | ||||
4491 | void removeHandler(handler_iterator HI); | |||
4492 | ||||
4493 | unsigned getNumSuccessors() const { return getNumOperands() - 1; } | |||
4494 | BasicBlock *getSuccessor(unsigned Idx) const { | |||
4495 | assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4496, __extension__ __PRETTY_FUNCTION__ )) | |||
4496 | "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4496, __extension__ __PRETTY_FUNCTION__ )); | |||
4497 | return cast<BasicBlock>(getOperand(Idx + 1)); | |||
4498 | } | |||
4499 | void setSuccessor(unsigned Idx, BasicBlock *NewSucc) { | |||
4500 | assert(Idx < getNumSuccessors() &&(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4501, __extension__ __PRETTY_FUNCTION__ )) | |||
4501 | "Successor # out of range for catchswitch!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchswitch!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchswitch!\"" , "llvm/include/llvm/IR/Instructions.h", 4501, __extension__ __PRETTY_FUNCTION__ )); | |||
4502 | setOperand(Idx + 1, NewSucc); | |||
4503 | } | |||
4504 | ||||
4505 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4506 | static bool classof(const Instruction *I) { | |||
4507 | return I->getOpcode() == Instruction::CatchSwitch; | |||
4508 | } | |||
4509 | static bool classof(const Value *V) { | |||
4510 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4511 | } | |||
4512 | }; | |||
4513 | ||||
4514 | template <> | |||
4515 | struct OperandTraits<CatchSwitchInst> : public HungoffOperandTraits<2> {}; | |||
4516 | ||||
4517 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchSwitchInst, Value)CatchSwitchInst::op_iterator CatchSwitchInst::op_begin() { return OperandTraits<CatchSwitchInst>::op_begin(this); } CatchSwitchInst ::const_op_iterator CatchSwitchInst::op_begin() const { return OperandTraits<CatchSwitchInst>::op_begin(const_cast< CatchSwitchInst*>(this)); } CatchSwitchInst::op_iterator CatchSwitchInst ::op_end() { return OperandTraits<CatchSwitchInst>::op_end (this); } CatchSwitchInst::const_op_iterator CatchSwitchInst:: op_end() const { return OperandTraits<CatchSwitchInst>:: op_end(const_cast<CatchSwitchInst*>(this)); } Value *CatchSwitchInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<CatchSwitchInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4517, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CatchSwitchInst >::op_begin(const_cast<CatchSwitchInst*>(this))[i_nocapture ].get()); } void CatchSwitchInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchSwitchInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4517, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CatchSwitchInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CatchSwitchInst::getNumOperands () const { return OperandTraits<CatchSwitchInst>::operands (this); } template <int Idx_nocapture> Use &CatchSwitchInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchSwitchInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
4518 | ||||
4519 | //===----------------------------------------------------------------------===// | |||
4520 | // CleanupPadInst Class | |||
4521 | //===----------------------------------------------------------------------===// | |||
4522 | class CleanupPadInst : public FuncletPadInst { | |||
4523 | private: | |||
4524 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, | |||
4525 | unsigned Values, const Twine &NameStr, | |||
4526 | Instruction *InsertBefore) | |||
4527 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, | |||
4528 | NameStr, InsertBefore) {} | |||
4529 | explicit CleanupPadInst(Value *ParentPad, ArrayRef<Value *> Args, | |||
4530 | unsigned Values, const Twine &NameStr, | |||
4531 | BasicBlock *InsertAtEnd) | |||
4532 | : FuncletPadInst(Instruction::CleanupPad, ParentPad, Args, Values, | |||
4533 | NameStr, InsertAtEnd) {} | |||
4534 | ||||
4535 | public: | |||
4536 | static CleanupPadInst *Create(Value *ParentPad, | |||
4537 | ArrayRef<Value *> Args = std::nullopt, | |||
4538 | const Twine &NameStr = "", | |||
4539 | Instruction *InsertBefore = nullptr) { | |||
4540 | unsigned Values = 1 + Args.size(); | |||
4541 | return new (Values) | |||
4542 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertBefore); | |||
4543 | } | |||
4544 | ||||
4545 | static CleanupPadInst *Create(Value *ParentPad, ArrayRef<Value *> Args, | |||
4546 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4547 | unsigned Values = 1 + Args.size(); | |||
4548 | return new (Values) | |||
4549 | CleanupPadInst(ParentPad, Args, Values, NameStr, InsertAtEnd); | |||
4550 | } | |||
4551 | ||||
4552 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4553 | static bool classof(const Instruction *I) { | |||
4554 | return I->getOpcode() == Instruction::CleanupPad; | |||
4555 | } | |||
4556 | static bool classof(const Value *V) { | |||
4557 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4558 | } | |||
4559 | }; | |||
4560 | ||||
4561 | //===----------------------------------------------------------------------===// | |||
4562 | // CatchPadInst Class | |||
4563 | //===----------------------------------------------------------------------===// | |||
4564 | class CatchPadInst : public FuncletPadInst { | |||
4565 | private: | |||
4566 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4567 | unsigned Values, const Twine &NameStr, | |||
4568 | Instruction *InsertBefore) | |||
4569 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, | |||
4570 | NameStr, InsertBefore) {} | |||
4571 | explicit CatchPadInst(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4572 | unsigned Values, const Twine &NameStr, | |||
4573 | BasicBlock *InsertAtEnd) | |||
4574 | : FuncletPadInst(Instruction::CatchPad, CatchSwitch, Args, Values, | |||
4575 | NameStr, InsertAtEnd) {} | |||
4576 | ||||
4577 | public: | |||
4578 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4579 | const Twine &NameStr = "", | |||
4580 | Instruction *InsertBefore = nullptr) { | |||
4581 | unsigned Values = 1 + Args.size(); | |||
4582 | return new (Values) | |||
4583 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertBefore); | |||
4584 | } | |||
4585 | ||||
4586 | static CatchPadInst *Create(Value *CatchSwitch, ArrayRef<Value *> Args, | |||
4587 | const Twine &NameStr, BasicBlock *InsertAtEnd) { | |||
4588 | unsigned Values = 1 + Args.size(); | |||
4589 | return new (Values) | |||
4590 | CatchPadInst(CatchSwitch, Args, Values, NameStr, InsertAtEnd); | |||
4591 | } | |||
4592 | ||||
4593 | /// Convenience accessors | |||
4594 | CatchSwitchInst *getCatchSwitch() const { | |||
4595 | return cast<CatchSwitchInst>(Op<-1>()); | |||
4596 | } | |||
4597 | void setCatchSwitch(Value *CatchSwitch) { | |||
4598 | assert(CatchSwitch)(static_cast <bool> (CatchSwitch) ? void (0) : __assert_fail ("CatchSwitch", "llvm/include/llvm/IR/Instructions.h", 4598, __extension__ __PRETTY_FUNCTION__)); | |||
4599 | Op<-1>() = CatchSwitch; | |||
4600 | } | |||
4601 | ||||
4602 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4603 | static bool classof(const Instruction *I) { | |||
4604 | return I->getOpcode() == Instruction::CatchPad; | |||
4605 | } | |||
4606 | static bool classof(const Value *V) { | |||
4607 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4608 | } | |||
4609 | }; | |||
4610 | ||||
4611 | //===----------------------------------------------------------------------===// | |||
4612 | // CatchReturnInst Class | |||
4613 | //===----------------------------------------------------------------------===// | |||
4614 | ||||
4615 | class CatchReturnInst : public Instruction { | |||
4616 | CatchReturnInst(const CatchReturnInst &RI); | |||
4617 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, Instruction *InsertBefore); | |||
4618 | CatchReturnInst(Value *CatchPad, BasicBlock *BB, BasicBlock *InsertAtEnd); | |||
4619 | ||||
4620 | void init(Value *CatchPad, BasicBlock *BB); | |||
4621 | ||||
4622 | protected: | |||
4623 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4624 | friend class Instruction; | |||
4625 | ||||
4626 | CatchReturnInst *cloneImpl() const; | |||
4627 | ||||
4628 | public: | |||
4629 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, | |||
4630 | Instruction *InsertBefore = nullptr) { | |||
4631 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4631, __extension__ __PRETTY_FUNCTION__)); | |||
4632 | assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB" , "llvm/include/llvm/IR/Instructions.h", 4632, __extension__ __PRETTY_FUNCTION__ )); | |||
4633 | return new (2) CatchReturnInst(CatchPad, BB, InsertBefore); | |||
4634 | } | |||
4635 | ||||
4636 | static CatchReturnInst *Create(Value *CatchPad, BasicBlock *BB, | |||
4637 | BasicBlock *InsertAtEnd) { | |||
4638 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4638, __extension__ __PRETTY_FUNCTION__)); | |||
4639 | assert(BB)(static_cast <bool> (BB) ? void (0) : __assert_fail ("BB" , "llvm/include/llvm/IR/Instructions.h", 4639, __extension__ __PRETTY_FUNCTION__ )); | |||
4640 | return new (2) CatchReturnInst(CatchPad, BB, InsertAtEnd); | |||
4641 | } | |||
4642 | ||||
4643 | /// Provide fast operand accessors | |||
4644 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4645 | ||||
4646 | /// Convenience accessors. | |||
4647 | CatchPadInst *getCatchPad() const { return cast<CatchPadInst>(Op<0>()); } | |||
4648 | void setCatchPad(CatchPadInst *CatchPad) { | |||
4649 | assert(CatchPad)(static_cast <bool> (CatchPad) ? void (0) : __assert_fail ("CatchPad", "llvm/include/llvm/IR/Instructions.h", 4649, __extension__ __PRETTY_FUNCTION__)); | |||
4650 | Op<0>() = CatchPad; | |||
4651 | } | |||
4652 | ||||
4653 | BasicBlock *getSuccessor() const { return cast<BasicBlock>(Op<1>()); } | |||
4654 | void setSuccessor(BasicBlock *NewSucc) { | |||
4655 | assert(NewSucc)(static_cast <bool> (NewSucc) ? void (0) : __assert_fail ("NewSucc", "llvm/include/llvm/IR/Instructions.h", 4655, __extension__ __PRETTY_FUNCTION__)); | |||
4656 | Op<1>() = NewSucc; | |||
4657 | } | |||
4658 | unsigned getNumSuccessors() const { return 1; } | |||
4659 | ||||
4660 | /// Get the parentPad of this catchret's catchpad's catchswitch. | |||
4661 | /// The successor block is implicitly a member of this funclet. | |||
4662 | Value *getCatchSwitchParentPad() const { | |||
4663 | return getCatchPad()->getCatchSwitch()->getParentPad(); | |||
4664 | } | |||
4665 | ||||
4666 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4667 | static bool classof(const Instruction *I) { | |||
4668 | return (I->getOpcode() == Instruction::CatchRet); | |||
4669 | } | |||
4670 | static bool classof(const Value *V) { | |||
4671 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4672 | } | |||
4673 | ||||
4674 | private: | |||
4675 | BasicBlock *getSuccessor(unsigned Idx) const { | |||
4676 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchret!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "llvm/include/llvm/IR/Instructions.h", 4676, __extension__ __PRETTY_FUNCTION__ )); | |||
4677 | return getSuccessor(); | |||
4678 | } | |||
4679 | ||||
4680 | void setSuccessor(unsigned Idx, BasicBlock *B) { | |||
4681 | assert(Idx < getNumSuccessors() && "Successor # out of range for catchret!")(static_cast <bool> (Idx < getNumSuccessors() && "Successor # out of range for catchret!") ? void (0) : __assert_fail ("Idx < getNumSuccessors() && \"Successor # out of range for catchret!\"" , "llvm/include/llvm/IR/Instructions.h", 4681, __extension__ __PRETTY_FUNCTION__ )); | |||
4682 | setSuccessor(B); | |||
4683 | } | |||
4684 | }; | |||
4685 | ||||
4686 | template <> | |||
4687 | struct OperandTraits<CatchReturnInst> | |||
4688 | : public FixedNumOperandTraits<CatchReturnInst, 2> {}; | |||
4689 | ||||
4690 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CatchReturnInst, Value)CatchReturnInst::op_iterator CatchReturnInst::op_begin() { return OperandTraits<CatchReturnInst>::op_begin(this); } CatchReturnInst ::const_op_iterator CatchReturnInst::op_begin() const { return OperandTraits<CatchReturnInst>::op_begin(const_cast< CatchReturnInst*>(this)); } CatchReturnInst::op_iterator CatchReturnInst ::op_end() { return OperandTraits<CatchReturnInst>::op_end (this); } CatchReturnInst::const_op_iterator CatchReturnInst:: op_end() const { return OperandTraits<CatchReturnInst>:: op_end(const_cast<CatchReturnInst*>(this)); } Value *CatchReturnInst ::getOperand(unsigned i_nocapture) const { (static_cast <bool > (i_nocapture < OperandTraits<CatchReturnInst>:: operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4690, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CatchReturnInst >::op_begin(const_cast<CatchReturnInst*>(this))[i_nocapture ].get()); } void CatchReturnInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CatchReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4690, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CatchReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CatchReturnInst::getNumOperands () const { return OperandTraits<CatchReturnInst>::operands (this); } template <int Idx_nocapture> Use &CatchReturnInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CatchReturnInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
4691 | ||||
4692 | //===----------------------------------------------------------------------===// | |||
4693 | // CleanupReturnInst Class | |||
4694 | //===----------------------------------------------------------------------===// | |||
4695 | ||||
4696 | class CleanupReturnInst : public Instruction { | |||
4697 | using UnwindDestField = BoolBitfieldElementT<0>; | |||
4698 | ||||
4699 | private: | |||
4700 | CleanupReturnInst(const CleanupReturnInst &RI); | |||
4701 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, | |||
4702 | Instruction *InsertBefore = nullptr); | |||
4703 | CleanupReturnInst(Value *CleanupPad, BasicBlock *UnwindBB, unsigned Values, | |||
4704 | BasicBlock *InsertAtEnd); | |||
4705 | ||||
4706 | void init(Value *CleanupPad, BasicBlock *UnwindBB); | |||
4707 | ||||
4708 | protected: | |||
4709 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4710 | friend class Instruction; | |||
4711 | ||||
4712 | CleanupReturnInst *cloneImpl() const; | |||
4713 | ||||
4714 | public: | |||
4715 | static CleanupReturnInst *Create(Value *CleanupPad, | |||
4716 | BasicBlock *UnwindBB = nullptr, | |||
4717 | Instruction *InsertBefore = nullptr) { | |||
4718 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4718, __extension__ __PRETTY_FUNCTION__)); | |||
4719 | unsigned Values = 1; | |||
4720 | if (UnwindBB) | |||
4721 | ++Values; | |||
4722 | return new (Values) | |||
4723 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertBefore); | |||
4724 | } | |||
4725 | ||||
4726 | static CleanupReturnInst *Create(Value *CleanupPad, BasicBlock *UnwindBB, | |||
4727 | BasicBlock *InsertAtEnd) { | |||
4728 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4728, __extension__ __PRETTY_FUNCTION__)); | |||
4729 | unsigned Values = 1; | |||
4730 | if (UnwindBB) | |||
4731 | ++Values; | |||
4732 | return new (Values) | |||
4733 | CleanupReturnInst(CleanupPad, UnwindBB, Values, InsertAtEnd); | |||
4734 | } | |||
4735 | ||||
4736 | /// Provide fast operand accessors | |||
4737 | DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void setOperand(unsigned, Value*); inline op_iterator op_begin(); inline const_op_iterator op_begin() const; inline op_iterator op_end(); inline const_op_iterator op_end() const; protected : template <int> inline Use &Op(); template <int > inline const Use &Op() const; public: inline unsigned getNumOperands() const; | |||
4738 | ||||
4739 | bool hasUnwindDest() const { return getSubclassData<UnwindDestField>(); } | |||
4740 | bool unwindsToCaller() const { return !hasUnwindDest(); } | |||
4741 | ||||
4742 | /// Convenience accessor. | |||
4743 | CleanupPadInst *getCleanupPad() const { | |||
4744 | return cast<CleanupPadInst>(Op<0>()); | |||
4745 | } | |||
4746 | void setCleanupPad(CleanupPadInst *CleanupPad) { | |||
4747 | assert(CleanupPad)(static_cast <bool> (CleanupPad) ? void (0) : __assert_fail ("CleanupPad", "llvm/include/llvm/IR/Instructions.h", 4747, __extension__ __PRETTY_FUNCTION__)); | |||
4748 | Op<0>() = CleanupPad; | |||
4749 | } | |||
4750 | ||||
4751 | unsigned getNumSuccessors() const { return hasUnwindDest() ? 1 : 0; } | |||
4752 | ||||
4753 | BasicBlock *getUnwindDest() const { | |||
4754 | return hasUnwindDest() ? cast<BasicBlock>(Op<1>()) : nullptr; | |||
4755 | } | |||
4756 | void setUnwindDest(BasicBlock *NewDest) { | |||
4757 | assert(NewDest)(static_cast <bool> (NewDest) ? void (0) : __assert_fail ("NewDest", "llvm/include/llvm/IR/Instructions.h", 4757, __extension__ __PRETTY_FUNCTION__)); | |||
4758 | assert(hasUnwindDest())(static_cast <bool> (hasUnwindDest()) ? void (0) : __assert_fail ("hasUnwindDest()", "llvm/include/llvm/IR/Instructions.h", 4758 , __extension__ __PRETTY_FUNCTION__)); | |||
4759 | Op<1>() = NewDest; | |||
4760 | } | |||
4761 | ||||
4762 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4763 | static bool classof(const Instruction *I) { | |||
4764 | return (I->getOpcode() == Instruction::CleanupRet); | |||
4765 | } | |||
4766 | static bool classof(const Value *V) { | |||
4767 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4768 | } | |||
4769 | ||||
4770 | private: | |||
4771 | BasicBlock *getSuccessor(unsigned Idx) const { | |||
4772 | assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail ("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4772, __extension__ __PRETTY_FUNCTION__)); | |||
4773 | return getUnwindDest(); | |||
4774 | } | |||
4775 | ||||
4776 | void setSuccessor(unsigned Idx, BasicBlock *B) { | |||
4777 | assert(Idx == 0)(static_cast <bool> (Idx == 0) ? void (0) : __assert_fail ("Idx == 0", "llvm/include/llvm/IR/Instructions.h", 4777, __extension__ __PRETTY_FUNCTION__)); | |||
4778 | setUnwindDest(B); | |||
4779 | } | |||
4780 | ||||
4781 | // Shadow Instruction::setInstructionSubclassData with a private forwarding | |||
4782 | // method so that subclasses cannot accidentally use it. | |||
4783 | template <typename Bitfield> | |||
4784 | void setSubclassData(typename Bitfield::Type Value) { | |||
4785 | Instruction::setSubclassData<Bitfield>(Value); | |||
4786 | } | |||
4787 | }; | |||
4788 | ||||
4789 | template <> | |||
4790 | struct OperandTraits<CleanupReturnInst> | |||
4791 | : public VariadicOperandTraits<CleanupReturnInst, /*MINARITY=*/1> {}; | |||
4792 | ||||
4793 | DEFINE_TRANSPARENT_OPERAND_ACCESSORS(CleanupReturnInst, Value)CleanupReturnInst::op_iterator CleanupReturnInst::op_begin() { return OperandTraits<CleanupReturnInst>::op_begin(this ); } CleanupReturnInst::const_op_iterator CleanupReturnInst:: op_begin() const { return OperandTraits<CleanupReturnInst> ::op_begin(const_cast<CleanupReturnInst*>(this)); } CleanupReturnInst ::op_iterator CleanupReturnInst::op_end() { return OperandTraits <CleanupReturnInst>::op_end(this); } CleanupReturnInst:: const_op_iterator CleanupReturnInst::op_end() const { return OperandTraits <CleanupReturnInst>::op_end(const_cast<CleanupReturnInst *>(this)); } Value *CleanupReturnInst::getOperand(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && "getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"getOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4793, __extension__ __PRETTY_FUNCTION__ )); return cast_or_null<Value>( OperandTraits<CleanupReturnInst >::op_begin(const_cast<CleanupReturnInst*>(this))[i_nocapture ].get()); } void CleanupReturnInst::setOperand(unsigned i_nocapture , Value *Val_nocapture) { (static_cast <bool> (i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && "setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<CleanupReturnInst>::operands(this) && \"setOperand() out of range!\"" , "llvm/include/llvm/IR/Instructions.h", 4793, __extension__ __PRETTY_FUNCTION__ )); OperandTraits<CleanupReturnInst>::op_begin(this)[i_nocapture ] = Val_nocapture; } unsigned CleanupReturnInst::getNumOperands () const { return OperandTraits<CleanupReturnInst>::operands (this); } template <int Idx_nocapture> Use &CleanupReturnInst ::Op() { return this->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture> const Use &CleanupReturnInst ::Op() const { return this->OpFrom<Idx_nocapture>(this ); } | |||
4794 | ||||
4795 | //===----------------------------------------------------------------------===// | |||
4796 | // UnreachableInst Class | |||
4797 | //===----------------------------------------------------------------------===// | |||
4798 | ||||
4799 | //===--------------------------------------------------------------------------- | |||
4800 | /// This function has undefined behavior. In particular, the | |||
4801 | /// presence of this instruction indicates some higher level knowledge that the | |||
4802 | /// end of the block cannot be reached. | |||
4803 | /// | |||
4804 | class UnreachableInst : public Instruction { | |||
4805 | protected: | |||
4806 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4807 | friend class Instruction; | |||
4808 | ||||
4809 | UnreachableInst *cloneImpl() const; | |||
4810 | ||||
4811 | public: | |||
4812 | explicit UnreachableInst(LLVMContext &C, Instruction *InsertBefore = nullptr); | |||
4813 | explicit UnreachableInst(LLVMContext &C, BasicBlock *InsertAtEnd); | |||
4814 | ||||
4815 | // allocate space for exactly zero operands | |||
4816 | void *operator new(size_t S) { return User::operator new(S, 0); } | |||
4817 | void operator delete(void *Ptr) { User::operator delete(Ptr); } | |||
4818 | ||||
4819 | unsigned getNumSuccessors() const { return 0; } | |||
4820 | ||||
4821 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4822 | static bool classof(const Instruction *I) { | |||
4823 | return I->getOpcode() == Instruction::Unreachable; | |||
4824 | } | |||
4825 | static bool classof(const Value *V) { | |||
4826 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4827 | } | |||
4828 | ||||
4829 | private: | |||
4830 | BasicBlock *getSuccessor(unsigned idx) const { | |||
4831 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4831); | |||
4832 | } | |||
4833 | ||||
4834 | void setSuccessor(unsigned idx, BasicBlock *B) { | |||
4835 | llvm_unreachable("UnreachableInst has no successors!")::llvm::llvm_unreachable_internal("UnreachableInst has no successors!" , "llvm/include/llvm/IR/Instructions.h", 4835); | |||
4836 | } | |||
4837 | }; | |||
4838 | ||||
4839 | //===----------------------------------------------------------------------===// | |||
4840 | // TruncInst Class | |||
4841 | //===----------------------------------------------------------------------===// | |||
4842 | ||||
4843 | /// This class represents a truncation of integer types. | |||
4844 | class TruncInst : public CastInst { | |||
4845 | protected: | |||
4846 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4847 | friend class Instruction; | |||
4848 | ||||
4849 | /// Clone an identical TruncInst | |||
4850 | TruncInst *cloneImpl() const; | |||
4851 | ||||
4852 | public: | |||
4853 | /// Constructor with insert-before-instruction semantics | |||
4854 | TruncInst( | |||
4855 | Value *S, ///< The value to be truncated | |||
4856 | Type *Ty, ///< The (smaller) type to truncate to | |||
4857 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4858 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4859 | ); | |||
4860 | ||||
4861 | /// Constructor with insert-at-end-of-block semantics | |||
4862 | TruncInst( | |||
4863 | Value *S, ///< The value to be truncated | |||
4864 | Type *Ty, ///< The (smaller) type to truncate to | |||
4865 | const Twine &NameStr, ///< A name for the new instruction | |||
4866 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4867 | ); | |||
4868 | ||||
4869 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4870 | static bool classof(const Instruction *I) { | |||
4871 | return I->getOpcode() == Trunc; | |||
4872 | } | |||
4873 | static bool classof(const Value *V) { | |||
4874 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4875 | } | |||
4876 | }; | |||
4877 | ||||
4878 | //===----------------------------------------------------------------------===// | |||
4879 | // ZExtInst Class | |||
4880 | //===----------------------------------------------------------------------===// | |||
4881 | ||||
4882 | /// This class represents zero extension of integer types. | |||
4883 | class ZExtInst : public CastInst { | |||
4884 | protected: | |||
4885 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4886 | friend class Instruction; | |||
4887 | ||||
4888 | /// Clone an identical ZExtInst | |||
4889 | ZExtInst *cloneImpl() const; | |||
4890 | ||||
4891 | public: | |||
4892 | /// Constructor with insert-before-instruction semantics | |||
4893 | ZExtInst( | |||
4894 | Value *S, ///< The value to be zero extended | |||
4895 | Type *Ty, ///< The type to zero extend to | |||
4896 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4897 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4898 | ); | |||
4899 | ||||
4900 | /// Constructor with insert-at-end semantics. | |||
4901 | ZExtInst( | |||
4902 | Value *S, ///< The value to be zero extended | |||
4903 | Type *Ty, ///< The type to zero extend to | |||
4904 | const Twine &NameStr, ///< A name for the new instruction | |||
4905 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4906 | ); | |||
4907 | ||||
4908 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4909 | static bool classof(const Instruction *I) { | |||
4910 | return I->getOpcode() == ZExt; | |||
4911 | } | |||
4912 | static bool classof(const Value *V) { | |||
4913 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4914 | } | |||
4915 | }; | |||
4916 | ||||
4917 | //===----------------------------------------------------------------------===// | |||
4918 | // SExtInst Class | |||
4919 | //===----------------------------------------------------------------------===// | |||
4920 | ||||
4921 | /// This class represents a sign extension of integer types. | |||
4922 | class SExtInst : public CastInst { | |||
4923 | protected: | |||
4924 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4925 | friend class Instruction; | |||
4926 | ||||
4927 | /// Clone an identical SExtInst | |||
4928 | SExtInst *cloneImpl() const; | |||
4929 | ||||
4930 | public: | |||
4931 | /// Constructor with insert-before-instruction semantics | |||
4932 | SExtInst( | |||
4933 | Value *S, ///< The value to be sign extended | |||
4934 | Type *Ty, ///< The type to sign extend to | |||
4935 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4936 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4937 | ); | |||
4938 | ||||
4939 | /// Constructor with insert-at-end-of-block semantics | |||
4940 | SExtInst( | |||
4941 | Value *S, ///< The value to be sign extended | |||
4942 | Type *Ty, ///< The type to sign extend to | |||
4943 | const Twine &NameStr, ///< A name for the new instruction | |||
4944 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4945 | ); | |||
4946 | ||||
4947 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4948 | static bool classof(const Instruction *I) { | |||
4949 | return I->getOpcode() == SExt; | |||
4950 | } | |||
4951 | static bool classof(const Value *V) { | |||
4952 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4953 | } | |||
4954 | }; | |||
4955 | ||||
4956 | //===----------------------------------------------------------------------===// | |||
4957 | // FPTruncInst Class | |||
4958 | //===----------------------------------------------------------------------===// | |||
4959 | ||||
4960 | /// This class represents a truncation of floating point types. | |||
4961 | class FPTruncInst : public CastInst { | |||
4962 | protected: | |||
4963 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
4964 | friend class Instruction; | |||
4965 | ||||
4966 | /// Clone an identical FPTruncInst | |||
4967 | FPTruncInst *cloneImpl() const; | |||
4968 | ||||
4969 | public: | |||
4970 | /// Constructor with insert-before-instruction semantics | |||
4971 | FPTruncInst( | |||
4972 | Value *S, ///< The value to be truncated | |||
4973 | Type *Ty, ///< The type to truncate to | |||
4974 | const Twine &NameStr = "", ///< A name for the new instruction | |||
4975 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
4976 | ); | |||
4977 | ||||
4978 | /// Constructor with insert-before-instruction semantics | |||
4979 | FPTruncInst( | |||
4980 | Value *S, ///< The value to be truncated | |||
4981 | Type *Ty, ///< The type to truncate to | |||
4982 | const Twine &NameStr, ///< A name for the new instruction | |||
4983 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
4984 | ); | |||
4985 | ||||
4986 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
4987 | static bool classof(const Instruction *I) { | |||
4988 | return I->getOpcode() == FPTrunc; | |||
4989 | } | |||
4990 | static bool classof(const Value *V) { | |||
4991 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
4992 | } | |||
4993 | }; | |||
4994 | ||||
4995 | //===----------------------------------------------------------------------===// | |||
4996 | // FPExtInst Class | |||
4997 | //===----------------------------------------------------------------------===// | |||
4998 | ||||
4999 | /// This class represents an extension of floating point types. | |||
5000 | class FPExtInst : public CastInst { | |||
5001 | protected: | |||
5002 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5003 | friend class Instruction; | |||
5004 | ||||
5005 | /// Clone an identical FPExtInst | |||
5006 | FPExtInst *cloneImpl() const; | |||
5007 | ||||
5008 | public: | |||
5009 | /// Constructor with insert-before-instruction semantics | |||
5010 | FPExtInst( | |||
5011 | Value *S, ///< The value to be extended | |||
5012 | Type *Ty, ///< The type to extend to | |||
5013 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5014 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5015 | ); | |||
5016 | ||||
5017 | /// Constructor with insert-at-end-of-block semantics | |||
5018 | FPExtInst( | |||
5019 | Value *S, ///< The value to be extended | |||
5020 | Type *Ty, ///< The type to extend to | |||
5021 | const Twine &NameStr, ///< A name for the new instruction | |||
5022 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5023 | ); | |||
5024 | ||||
5025 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5026 | static bool classof(const Instruction *I) { | |||
5027 | return I->getOpcode() == FPExt; | |||
5028 | } | |||
5029 | static bool classof(const Value *V) { | |||
5030 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5031 | } | |||
5032 | }; | |||
5033 | ||||
5034 | //===----------------------------------------------------------------------===// | |||
5035 | // UIToFPInst Class | |||
5036 | //===----------------------------------------------------------------------===// | |||
5037 | ||||
5038 | /// This class represents a cast unsigned integer to floating point. | |||
5039 | class UIToFPInst : public CastInst { | |||
5040 | protected: | |||
5041 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5042 | friend class Instruction; | |||
5043 | ||||
5044 | /// Clone an identical UIToFPInst | |||
5045 | UIToFPInst *cloneImpl() const; | |||
5046 | ||||
5047 | public: | |||
5048 | /// Constructor with insert-before-instruction semantics | |||
5049 | UIToFPInst( | |||
5050 | Value *S, ///< The value to be converted | |||
5051 | Type *Ty, ///< The type to convert to | |||
5052 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5053 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5054 | ); | |||
5055 | ||||
5056 | /// Constructor with insert-at-end-of-block semantics | |||
5057 | UIToFPInst( | |||
5058 | Value *S, ///< The value to be converted | |||
5059 | Type *Ty, ///< The type to convert to | |||
5060 | const Twine &NameStr, ///< A name for the new instruction | |||
5061 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5062 | ); | |||
5063 | ||||
5064 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5065 | static bool classof(const Instruction *I) { | |||
5066 | return I->getOpcode() == UIToFP; | |||
5067 | } | |||
5068 | static bool classof(const Value *V) { | |||
5069 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5070 | } | |||
5071 | }; | |||
5072 | ||||
5073 | //===----------------------------------------------------------------------===// | |||
5074 | // SIToFPInst Class | |||
5075 | //===----------------------------------------------------------------------===// | |||
5076 | ||||
5077 | /// This class represents a cast from signed integer to floating point. | |||
5078 | class SIToFPInst : public CastInst { | |||
5079 | protected: | |||
5080 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5081 | friend class Instruction; | |||
5082 | ||||
5083 | /// Clone an identical SIToFPInst | |||
5084 | SIToFPInst *cloneImpl() const; | |||
5085 | ||||
5086 | public: | |||
5087 | /// Constructor with insert-before-instruction semantics | |||
5088 | SIToFPInst( | |||
5089 | Value *S, ///< The value to be converted | |||
5090 | Type *Ty, ///< The type to convert to | |||
5091 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5092 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5093 | ); | |||
5094 | ||||
5095 | /// Constructor with insert-at-end-of-block semantics | |||
5096 | SIToFPInst( | |||
5097 | Value *S, ///< The value to be converted | |||
5098 | Type *Ty, ///< The type to convert to | |||
5099 | const Twine &NameStr, ///< A name for the new instruction | |||
5100 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5101 | ); | |||
5102 | ||||
5103 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5104 | static bool classof(const Instruction *I) { | |||
5105 | return I->getOpcode() == SIToFP; | |||
5106 | } | |||
5107 | static bool classof(const Value *V) { | |||
5108 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5109 | } | |||
5110 | }; | |||
5111 | ||||
5112 | //===----------------------------------------------------------------------===// | |||
5113 | // FPToUIInst Class | |||
5114 | //===----------------------------------------------------------------------===// | |||
5115 | ||||
5116 | /// This class represents a cast from floating point to unsigned integer | |||
5117 | class FPToUIInst : public CastInst { | |||
5118 | protected: | |||
5119 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5120 | friend class Instruction; | |||
5121 | ||||
5122 | /// Clone an identical FPToUIInst | |||
5123 | FPToUIInst *cloneImpl() const; | |||
5124 | ||||
5125 | public: | |||
5126 | /// Constructor with insert-before-instruction semantics | |||
5127 | FPToUIInst( | |||
5128 | Value *S, ///< The value to be converted | |||
5129 | Type *Ty, ///< The type to convert to | |||
5130 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5131 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5132 | ); | |||
5133 | ||||
5134 | /// Constructor with insert-at-end-of-block semantics | |||
5135 | FPToUIInst( | |||
5136 | Value *S, ///< The value to be converted | |||
5137 | Type *Ty, ///< The type to convert to | |||
5138 | const Twine &NameStr, ///< A name for the new instruction | |||
5139 | BasicBlock *InsertAtEnd ///< Where to insert the new instruction | |||
5140 | ); | |||
5141 | ||||
5142 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5143 | static bool classof(const Instruction *I) { | |||
5144 | return I->getOpcode() == FPToUI; | |||
5145 | } | |||
5146 | static bool classof(const Value *V) { | |||
5147 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5148 | } | |||
5149 | }; | |||
5150 | ||||
5151 | //===----------------------------------------------------------------------===// | |||
5152 | // FPToSIInst Class | |||
5153 | //===----------------------------------------------------------------------===// | |||
5154 | ||||
5155 | /// This class represents a cast from floating point to signed integer. | |||
5156 | class FPToSIInst : public CastInst { | |||
5157 | protected: | |||
5158 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5159 | friend class Instruction; | |||
5160 | ||||
5161 | /// Clone an identical FPToSIInst | |||
5162 | FPToSIInst *cloneImpl() const; | |||
5163 | ||||
5164 | public: | |||
5165 | /// Constructor with insert-before-instruction semantics | |||
5166 | FPToSIInst( | |||
5167 | Value *S, ///< The value to be converted | |||
5168 | Type *Ty, ///< The type to convert to | |||
5169 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5170 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5171 | ); | |||
5172 | ||||
5173 | /// Constructor with insert-at-end-of-block semantics | |||
5174 | FPToSIInst( | |||
5175 | Value *S, ///< The value to be converted | |||
5176 | Type *Ty, ///< The type to convert to | |||
5177 | const Twine &NameStr, ///< A name for the new instruction | |||
5178 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5179 | ); | |||
5180 | ||||
5181 | /// Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5182 | static bool classof(const Instruction *I) { | |||
5183 | return I->getOpcode() == FPToSI; | |||
5184 | } | |||
5185 | static bool classof(const Value *V) { | |||
5186 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5187 | } | |||
5188 | }; | |||
5189 | ||||
5190 | //===----------------------------------------------------------------------===// | |||
5191 | // IntToPtrInst Class | |||
5192 | //===----------------------------------------------------------------------===// | |||
5193 | ||||
5194 | /// This class represents a cast from an integer to a pointer. | |||
5195 | class IntToPtrInst : public CastInst { | |||
5196 | public: | |||
5197 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5198 | friend class Instruction; | |||
5199 | ||||
5200 | /// Constructor with insert-before-instruction semantics | |||
5201 | IntToPtrInst( | |||
5202 | Value *S, ///< The value to be converted | |||
5203 | Type *Ty, ///< The type to convert to | |||
5204 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5205 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5206 | ); | |||
5207 | ||||
5208 | /// Constructor with insert-at-end-of-block semantics | |||
5209 | IntToPtrInst( | |||
5210 | Value *S, ///< The value to be converted | |||
5211 | Type *Ty, ///< The type to convert to | |||
5212 | const Twine &NameStr, ///< A name for the new instruction | |||
5213 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5214 | ); | |||
5215 | ||||
5216 | /// Clone an identical IntToPtrInst. | |||
5217 | IntToPtrInst *cloneImpl() const; | |||
5218 | ||||
5219 | /// Returns the address space of this instruction's pointer type. | |||
5220 | unsigned getAddressSpace() const { | |||
5221 | return getType()->getPointerAddressSpace(); | |||
5222 | } | |||
5223 | ||||
5224 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5225 | static bool classof(const Instruction *I) { | |||
5226 | return I->getOpcode() == IntToPtr; | |||
5227 | } | |||
5228 | static bool classof(const Value *V) { | |||
5229 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5230 | } | |||
5231 | }; | |||
5232 | ||||
5233 | //===----------------------------------------------------------------------===// | |||
5234 | // PtrToIntInst Class | |||
5235 | //===----------------------------------------------------------------------===// | |||
5236 | ||||
5237 | /// This class represents a cast from a pointer to an integer. | |||
5238 | class PtrToIntInst : public CastInst { | |||
5239 | protected: | |||
5240 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5241 | friend class Instruction; | |||
5242 | ||||
5243 | /// Clone an identical PtrToIntInst. | |||
5244 | PtrToIntInst *cloneImpl() const; | |||
5245 | ||||
5246 | public: | |||
5247 | /// Constructor with insert-before-instruction semantics | |||
5248 | PtrToIntInst( | |||
5249 | Value *S, ///< The value to be converted | |||
5250 | Type *Ty, ///< The type to convert to | |||
5251 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5252 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5253 | ); | |||
5254 | ||||
5255 | /// Constructor with insert-at-end-of-block semantics | |||
5256 | PtrToIntInst( | |||
5257 | Value *S, ///< The value to be converted | |||
5258 | Type *Ty, ///< The type to convert to | |||
5259 | const Twine &NameStr, ///< A name for the new instruction | |||
5260 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5261 | ); | |||
5262 | ||||
5263 | /// Gets the pointer operand. | |||
5264 | Value *getPointerOperand() { return getOperand(0); } | |||
5265 | /// Gets the pointer operand. | |||
5266 | const Value *getPointerOperand() const { return getOperand(0); } | |||
5267 | /// Gets the operand index of the pointer operand. | |||
5268 | static unsigned getPointerOperandIndex() { return 0U; } | |||
5269 | ||||
5270 | /// Returns the address space of the pointer operand. | |||
5271 | unsigned getPointerAddressSpace() const { | |||
5272 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
5273 | } | |||
5274 | ||||
5275 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5276 | static bool classof(const Instruction *I) { | |||
5277 | return I->getOpcode() == PtrToInt; | |||
5278 | } | |||
5279 | static bool classof(const Value *V) { | |||
5280 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5281 | } | |||
5282 | }; | |||
5283 | ||||
5284 | //===----------------------------------------------------------------------===// | |||
5285 | // BitCastInst Class | |||
5286 | //===----------------------------------------------------------------------===// | |||
5287 | ||||
5288 | /// This class represents a no-op cast from one type to another. | |||
5289 | class BitCastInst : public CastInst { | |||
5290 | protected: | |||
5291 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5292 | friend class Instruction; | |||
5293 | ||||
5294 | /// Clone an identical BitCastInst. | |||
5295 | BitCastInst *cloneImpl() const; | |||
5296 | ||||
5297 | public: | |||
5298 | /// Constructor with insert-before-instruction semantics | |||
5299 | BitCastInst( | |||
5300 | Value *S, ///< The value to be casted | |||
5301 | Type *Ty, ///< The type to casted to | |||
5302 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5303 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5304 | ); | |||
5305 | ||||
5306 | /// Constructor with insert-at-end-of-block semantics | |||
5307 | BitCastInst( | |||
5308 | Value *S, ///< The value to be casted | |||
5309 | Type *Ty, ///< The type to casted to | |||
5310 | const Twine &NameStr, ///< A name for the new instruction | |||
5311 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5312 | ); | |||
5313 | ||||
5314 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5315 | static bool classof(const Instruction *I) { | |||
5316 | return I->getOpcode() == BitCast; | |||
5317 | } | |||
5318 | static bool classof(const Value *V) { | |||
5319 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5320 | } | |||
5321 | }; | |||
5322 | ||||
5323 | //===----------------------------------------------------------------------===// | |||
5324 | // AddrSpaceCastInst Class | |||
5325 | //===----------------------------------------------------------------------===// | |||
5326 | ||||
5327 | /// This class represents a conversion between pointers from one address space | |||
5328 | /// to another. | |||
5329 | class AddrSpaceCastInst : public CastInst { | |||
5330 | protected: | |||
5331 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5332 | friend class Instruction; | |||
5333 | ||||
5334 | /// Clone an identical AddrSpaceCastInst. | |||
5335 | AddrSpaceCastInst *cloneImpl() const; | |||
5336 | ||||
5337 | public: | |||
5338 | /// Constructor with insert-before-instruction semantics | |||
5339 | AddrSpaceCastInst( | |||
5340 | Value *S, ///< The value to be casted | |||
5341 | Type *Ty, ///< The type to casted to | |||
5342 | const Twine &NameStr = "", ///< A name for the new instruction | |||
5343 | Instruction *InsertBefore = nullptr ///< Where to insert the new instruction | |||
5344 | ); | |||
5345 | ||||
5346 | /// Constructor with insert-at-end-of-block semantics | |||
5347 | AddrSpaceCastInst( | |||
5348 | Value *S, ///< The value to be casted | |||
5349 | Type *Ty, ///< The type to casted to | |||
5350 | const Twine &NameStr, ///< A name for the new instruction | |||
5351 | BasicBlock *InsertAtEnd ///< The block to insert the instruction into | |||
5352 | ); | |||
5353 | ||||
5354 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5355 | static bool classof(const Instruction *I) { | |||
5356 | return I->getOpcode() == AddrSpaceCast; | |||
5357 | } | |||
5358 | static bool classof(const Value *V) { | |||
5359 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5360 | } | |||
5361 | ||||
5362 | /// Gets the pointer operand. | |||
5363 | Value *getPointerOperand() { | |||
5364 | return getOperand(0); | |||
5365 | } | |||
5366 | ||||
5367 | /// Gets the pointer operand. | |||
5368 | const Value *getPointerOperand() const { | |||
5369 | return getOperand(0); | |||
5370 | } | |||
5371 | ||||
5372 | /// Gets the operand index of the pointer operand. | |||
5373 | static unsigned getPointerOperandIndex() { | |||
5374 | return 0U; | |||
5375 | } | |||
5376 | ||||
5377 | /// Returns the address space of the pointer operand. | |||
5378 | unsigned getSrcAddressSpace() const { | |||
5379 | return getPointerOperand()->getType()->getPointerAddressSpace(); | |||
5380 | } | |||
5381 | ||||
5382 | /// Returns the address space of the result. | |||
5383 | unsigned getDestAddressSpace() const { | |||
5384 | return getType()->getPointerAddressSpace(); | |||
5385 | } | |||
5386 | }; | |||
5387 | ||||
5388 | //===----------------------------------------------------------------------===// | |||
5389 | // Helper functions | |||
5390 | //===----------------------------------------------------------------------===// | |||
5391 | ||||
5392 | /// A helper function that returns the pointer operand of a load or store | |||
5393 | /// instruction. Returns nullptr if not load or store. | |||
5394 | inline const Value *getLoadStorePointerOperand(const Value *V) { | |||
5395 | if (auto *Load = dyn_cast<LoadInst>(V)) | |||
5396 | return Load->getPointerOperand(); | |||
5397 | if (auto *Store = dyn_cast<StoreInst>(V)) | |||
5398 | return Store->getPointerOperand(); | |||
5399 | return nullptr; | |||
5400 | } | |||
5401 | inline Value *getLoadStorePointerOperand(Value *V) { | |||
5402 | return const_cast<Value *>( | |||
5403 | getLoadStorePointerOperand(static_cast<const Value *>(V))); | |||
5404 | } | |||
5405 | ||||
5406 | /// A helper function that returns the pointer operand of a load, store | |||
5407 | /// or GEP instruction. Returns nullptr if not load, store, or GEP. | |||
5408 | inline const Value *getPointerOperand(const Value *V) { | |||
5409 | if (auto *Ptr = getLoadStorePointerOperand(V)) | |||
5410 | return Ptr; | |||
5411 | if (auto *Gep = dyn_cast<GetElementPtrInst>(V)) | |||
5412 | return Gep->getPointerOperand(); | |||
5413 | return nullptr; | |||
5414 | } | |||
5415 | inline Value *getPointerOperand(Value *V) { | |||
5416 | return const_cast<Value *>(getPointerOperand(static_cast<const Value *>(V))); | |||
5417 | } | |||
5418 | ||||
5419 | /// A helper function that returns the alignment of load or store instruction. | |||
5420 | inline Align getLoadStoreAlignment(Value *I) { | |||
5421 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5422, __extension__ __PRETTY_FUNCTION__ )) | |||
5422 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5422, __extension__ __PRETTY_FUNCTION__ )); | |||
5423 | if (auto *LI = dyn_cast<LoadInst>(I)) | |||
5424 | return LI->getAlign(); | |||
5425 | return cast<StoreInst>(I)->getAlign(); | |||
5426 | } | |||
5427 | ||||
5428 | /// A helper function that returns the address space of the pointer operand of | |||
5429 | /// load or store instruction. | |||
5430 | inline unsigned getLoadStoreAddressSpace(Value *I) { | |||
5431 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5432, __extension__ __PRETTY_FUNCTION__ )) | |||
5432 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5432, __extension__ __PRETTY_FUNCTION__ )); | |||
5433 | if (auto *LI = dyn_cast<LoadInst>(I)) | |||
5434 | return LI->getPointerAddressSpace(); | |||
5435 | return cast<StoreInst>(I)->getPointerAddressSpace(); | |||
5436 | } | |||
5437 | ||||
5438 | /// A helper function that returns the type of a load or store instruction. | |||
5439 | inline Type *getLoadStoreType(Value *I) { | |||
5440 | assert((isa<LoadInst>(I) || isa<StoreInst>(I)) &&(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5441, __extension__ __PRETTY_FUNCTION__ )) | |||
5441 | "Expected Load or Store instruction")(static_cast <bool> ((isa<LoadInst>(I) || isa< StoreInst>(I)) && "Expected Load or Store instruction" ) ? void (0) : __assert_fail ("(isa<LoadInst>(I) || isa<StoreInst>(I)) && \"Expected Load or Store instruction\"" , "llvm/include/llvm/IR/Instructions.h", 5441, __extension__ __PRETTY_FUNCTION__ )); | |||
5442 | if (auto *LI = dyn_cast<LoadInst>(I)) | |||
5443 | return LI->getType(); | |||
5444 | return cast<StoreInst>(I)->getValueOperand()->getType(); | |||
5445 | } | |||
5446 | ||||
5447 | /// A helper function that returns an atomic operation's sync scope; returns | |||
5448 | /// std::nullopt if it is not an atomic operation. | |||
5449 | inline std::optional<SyncScope::ID> getAtomicSyncScopeID(const Instruction *I) { | |||
5450 | if (!I->isAtomic()) | |||
5451 | return std::nullopt; | |||
5452 | if (auto *AI = dyn_cast<LoadInst>(I)) | |||
5453 | return AI->getSyncScopeID(); | |||
5454 | if (auto *AI = dyn_cast<StoreInst>(I)) | |||
5455 | return AI->getSyncScopeID(); | |||
5456 | if (auto *AI = dyn_cast<FenceInst>(I)) | |||
5457 | return AI->getSyncScopeID(); | |||
5458 | if (auto *AI = dyn_cast<AtomicCmpXchgInst>(I)) | |||
5459 | return AI->getSyncScopeID(); | |||
5460 | if (auto *AI = dyn_cast<AtomicRMWInst>(I)) | |||
5461 | return AI->getSyncScopeID(); | |||
5462 | llvm_unreachable("unhandled atomic operation")::llvm::llvm_unreachable_internal("unhandled atomic operation" , "llvm/include/llvm/IR/Instructions.h", 5462); | |||
5463 | } | |||
5464 | ||||
5465 | //===----------------------------------------------------------------------===// | |||
5466 | // FreezeInst Class | |||
5467 | //===----------------------------------------------------------------------===// | |||
5468 | ||||
5469 | /// This class represents a freeze function that returns random concrete | |||
5470 | /// value if an operand is either a poison value or an undef value | |||
5471 | class FreezeInst : public UnaryInstruction { | |||
5472 | protected: | |||
5473 | // Note: Instruction needs to be a friend here to call cloneImpl. | |||
5474 | friend class Instruction; | |||
5475 | ||||
5476 | /// Clone an identical FreezeInst | |||
5477 | FreezeInst *cloneImpl() const; | |||
5478 | ||||
5479 | public: | |||
5480 | explicit FreezeInst(Value *S, | |||
5481 | const Twine &NameStr = "", | |||
5482 | Instruction *InsertBefore = nullptr); | |||
5483 | FreezeInst(Value *S, const Twine &NameStr, BasicBlock *InsertAtEnd); | |||
5484 | ||||
5485 | // Methods for support type inquiry through isa, cast, and dyn_cast: | |||
5486 | static inline bool classof(const Instruction *I) { | |||
5487 | return I->getOpcode() == Freeze; | |||
5488 | } | |||
5489 | static inline bool classof(const Value *V) { | |||
5490 | return isa<Instruction>(V) && classof(cast<Instruction>(V)); | |||
5491 | } | |||
5492 | }; | |||
5493 | ||||
5494 | } // end namespace llvm | |||
5495 | ||||
5496 | #endif // LLVM_IR_INSTRUCTIONS_H |