Bug Summary

File:build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include/llvm/IR/Instructions.h
Warning:line 1242, column 33
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name LoopBoundSplit.cpp -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm -resource-dir /usr/lib/llvm-16/lib/clang/16.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Transforms/Scalar -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Transforms/Scalar -I include -I /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include -D _FORTIFY_SOURCE=2 -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-16/lib/clang/16.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fmacro-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fcoverage-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -O3 -Wno-unused-command-line-argument -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -Wno-misleading-indentation -std=c++17 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/build-llvm=build-llvm -fdebug-prefix-map=/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/= -ferror-limit 19 -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2022-09-04-125545-48738-1 -x c++ /build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp

/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp

1//===------- LoopBoundSplit.cpp - Split Loop Bound --------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "llvm/Transforms/Scalar/LoopBoundSplit.h"
10#include "llvm/ADT/Sequence.h"
11#include "llvm/Analysis/LoopAnalysisManager.h"
12#include "llvm/Analysis/LoopInfo.h"
13#include "llvm/Analysis/ScalarEvolution.h"
14#include "llvm/Analysis/ScalarEvolutionExpressions.h"
15#include "llvm/IR/PatternMatch.h"
16#include "llvm/Transforms/Scalar/LoopPassManager.h"
17#include "llvm/Transforms/Utils/BasicBlockUtils.h"
18#include "llvm/Transforms/Utils/Cloning.h"
19#include "llvm/Transforms/Utils/LoopSimplify.h"
20#include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
21
22#define DEBUG_TYPE"loop-bound-split" "loop-bound-split"
23
24namespace llvm {
25
26using namespace PatternMatch;
27
28namespace {
29struct ConditionInfo {
30 /// Branch instruction with this condition
31 BranchInst *BI = nullptr;
32 /// ICmp instruction with this condition
33 ICmpInst *ICmp = nullptr;
34 /// Preciate info
35 ICmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
36 /// AddRec llvm value
37 Value *AddRecValue = nullptr;
38 /// Non PHI AddRec llvm value
39 Value *NonPHIAddRecValue;
40 /// Bound llvm value
41 Value *BoundValue = nullptr;
42 /// AddRec SCEV
43 const SCEVAddRecExpr *AddRecSCEV = nullptr;
44 /// Bound SCEV
45 const SCEV *BoundSCEV = nullptr;
46
47 ConditionInfo() = default;
48};
49} // namespace
50
51static void analyzeICmp(ScalarEvolution &SE, ICmpInst *ICmp,
52 ConditionInfo &Cond, const Loop &L) {
53 Cond.ICmp = ICmp;
54 if (match(ICmp, m_ICmp(Cond.Pred, m_Value(Cond.AddRecValue),
55 m_Value(Cond.BoundValue)))) {
56 const SCEV *AddRecSCEV = SE.getSCEV(Cond.AddRecValue);
57 const SCEV *BoundSCEV = SE.getSCEV(Cond.BoundValue);
58 const SCEVAddRecExpr *LHSAddRecSCEV = dyn_cast<SCEVAddRecExpr>(AddRecSCEV);
59 const SCEVAddRecExpr *RHSAddRecSCEV = dyn_cast<SCEVAddRecExpr>(BoundSCEV);
60 // Locate AddRec in LHSSCEV and Bound in RHSSCEV.
61 if (!LHSAddRecSCEV && RHSAddRecSCEV) {
62 std::swap(Cond.AddRecValue, Cond.BoundValue);
63 std::swap(AddRecSCEV, BoundSCEV);
64 Cond.Pred = ICmpInst::getSwappedPredicate(Cond.Pred);
65 }
66
67 Cond.AddRecSCEV = dyn_cast<SCEVAddRecExpr>(AddRecSCEV);
68 Cond.BoundSCEV = BoundSCEV;
69 Cond.NonPHIAddRecValue = Cond.AddRecValue;
70
71 // If the Cond.AddRecValue is PHI node, update Cond.NonPHIAddRecValue with
72 // value from backedge.
73 if (Cond.AddRecSCEV && isa<PHINode>(Cond.AddRecValue)) {
74 PHINode *PN = cast<PHINode>(Cond.AddRecValue);
75 Cond.NonPHIAddRecValue = PN->getIncomingValueForBlock(L.getLoopLatch());
76 }
77 }
78}
79
80static bool calculateUpperBound(const Loop &L, ScalarEvolution &SE,
81 ConditionInfo &Cond, bool IsExitCond) {
82 if (IsExitCond) {
83 const SCEV *ExitCount = SE.getExitCount(&L, Cond.ICmp->getParent());
84 if (isa<SCEVCouldNotCompute>(ExitCount))
85 return false;
86
87 Cond.BoundSCEV = ExitCount;
88 return true;
89 }
90
91 // For non-exit condtion, if pred is LT, keep existing bound.
92 if (Cond.Pred == ICmpInst::ICMP_SLT || Cond.Pred == ICmpInst::ICMP_ULT)
93 return true;
94
95 // For non-exit condition, if pre is LE, try to convert it to LT.
96 // Range Range
97 // AddRec <= Bound --> AddRec < Bound + 1
98 if (Cond.Pred != ICmpInst::ICMP_ULE && Cond.Pred != ICmpInst::ICMP_SLE)
99 return false;
100
101 if (IntegerType *BoundSCEVIntType =
102 dyn_cast<IntegerType>(Cond.BoundSCEV->getType())) {
103 unsigned BitWidth = BoundSCEVIntType->getBitWidth();
104 APInt Max = ICmpInst::isSigned(Cond.Pred)
105 ? APInt::getSignedMaxValue(BitWidth)
106 : APInt::getMaxValue(BitWidth);
107 const SCEV *MaxSCEV = SE.getConstant(Max);
108 // Check Bound < INT_MAX
109 ICmpInst::Predicate Pred =
110 ICmpInst::isSigned(Cond.Pred) ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
111 if (SE.isKnownPredicate(Pred, Cond.BoundSCEV, MaxSCEV)) {
112 const SCEV *BoundPlusOneSCEV =
113 SE.getAddExpr(Cond.BoundSCEV, SE.getOne(BoundSCEVIntType));
114 Cond.BoundSCEV = BoundPlusOneSCEV;
115 Cond.Pred = Pred;
116 return true;
117 }
118 }
119
120 // ToDo: Support ICMP_NE/EQ.
121
122 return false;
123}
124
125static bool hasProcessableCondition(const Loop &L, ScalarEvolution &SE,
126 ICmpInst *ICmp, ConditionInfo &Cond,
127 bool IsExitCond) {
128 analyzeICmp(SE, ICmp, Cond, L);
129
130 // The BoundSCEV should be evaluated at loop entry.
131 if (!SE.isAvailableAtLoopEntry(Cond.BoundSCEV, &L))
132 return false;
133
134 // Allowed AddRec as induction variable.
135 if (!Cond.AddRecSCEV)
136 return false;
137
138 if (!Cond.AddRecSCEV->isAffine())
139 return false;
140
141 const SCEV *StepRecSCEV = Cond.AddRecSCEV->getStepRecurrence(SE);
142 // Allowed constant step.
143 if (!isa<SCEVConstant>(StepRecSCEV))
144 return false;
145
146 ConstantInt *StepCI = cast<SCEVConstant>(StepRecSCEV)->getValue();
147 // Allowed positive step for now.
148 // TODO: Support negative step.
149 if (StepCI->isNegative() || StepCI->isZero())
150 return false;
151
152 // Calculate upper bound.
153 if (!calculateUpperBound(L, SE, Cond, IsExitCond))
154 return false;
155
156 return true;
157}
158
159static bool isProcessableCondBI(const ScalarEvolution &SE,
160 const BranchInst *BI) {
161 BasicBlock *TrueSucc = nullptr;
162 BasicBlock *FalseSucc = nullptr;
163 ICmpInst::Predicate Pred;
164 Value *LHS, *RHS;
165 if (!match(BI, m_Br(m_ICmp(Pred, m_Value(LHS), m_Value(RHS)),
166 m_BasicBlock(TrueSucc), m_BasicBlock(FalseSucc))))
167 return false;
168
169 if (!SE.isSCEVable(LHS->getType()))
170 return false;
171 assert(SE.isSCEVable(RHS->getType()) && "Expected RHS's type is SCEVable")(static_cast <bool> (SE.isSCEVable(RHS->getType()) &&
"Expected RHS's type is SCEVable") ? void (0) : __assert_fail
("SE.isSCEVable(RHS->getType()) && \"Expected RHS's type is SCEVable\""
, "llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp", 171, __extension__
__PRETTY_FUNCTION__))
;
172
173 if (TrueSucc == FalseSucc)
174 return false;
175
176 return true;
177}
178
179static bool canSplitLoopBound(const Loop &L, const DominatorTree &DT,
180 ScalarEvolution &SE, ConditionInfo &Cond) {
181 // Skip function with optsize.
182 if (L.getHeader()->getParent()->hasOptSize())
183 return false;
184
185 // Split only innermost loop.
186 if (!L.isInnermost())
187 return false;
188
189 // Check loop is in simplified form.
190 if (!L.isLoopSimplifyForm())
191 return false;
192
193 // Check loop is in LCSSA form.
194 if (!L.isLCSSAForm(DT))
195 return false;
196
197 // Skip loop that cannot be cloned.
198 if (!L.isSafeToClone())
199 return false;
200
201 BasicBlock *ExitingBB = L.getExitingBlock();
202 // Assumed only one exiting block.
203 if (!ExitingBB)
204 return false;
205
206 BranchInst *ExitingBI = dyn_cast<BranchInst>(ExitingBB->getTerminator());
207 if (!ExitingBI)
208 return false;
209
210 // Allowed only conditional branch with ICmp.
211 if (!isProcessableCondBI(SE, ExitingBI))
212 return false;
213
214 // Check the condition is processable.
215 ICmpInst *ICmp = cast<ICmpInst>(ExitingBI->getCondition());
216 if (!hasProcessableCondition(L, SE, ICmp, Cond, /*IsExitCond*/ true))
217 return false;
218
219 Cond.BI = ExitingBI;
220 return true;
221}
222
223static bool isProfitableToTransform(const Loop &L, const BranchInst *BI) {
224 // If the conditional branch splits a loop into two halves, we could
225 // generally say it is profitable.
226 //
227 // ToDo: Add more profitable cases here.
228
229 // Check this branch causes diamond CFG.
230 BasicBlock *Succ0 = BI->getSuccessor(0);
231 BasicBlock *Succ1 = BI->getSuccessor(1);
232
233 BasicBlock *Succ0Succ = Succ0->getSingleSuccessor();
234 BasicBlock *Succ1Succ = Succ1->getSingleSuccessor();
235 if (!Succ0Succ || !Succ1Succ || Succ0Succ != Succ1Succ)
236 return false;
237
238 // ToDo: Calculate each successor's instruction cost.
239
240 return true;
241}
242
243static BranchInst *findSplitCandidate(const Loop &L, ScalarEvolution &SE,
244 ConditionInfo &ExitingCond,
245 ConditionInfo &SplitCandidateCond) {
246 for (auto *BB : L.blocks()) {
247 // Skip condition of backedge.
248 if (L.getLoopLatch() == BB)
249 continue;
250
251 auto *BI = dyn_cast<BranchInst>(BB->getTerminator());
252 if (!BI)
253 continue;
254
255 // Check conditional branch with ICmp.
256 if (!isProcessableCondBI(SE, BI))
257 continue;
258
259 // Skip loop invariant condition.
260 if (L.isLoopInvariant(BI->getCondition()))
261 continue;
262
263 // Check the condition is processable.
264 ICmpInst *ICmp = cast<ICmpInst>(BI->getCondition());
265 if (!hasProcessableCondition(L, SE, ICmp, SplitCandidateCond,
266 /*IsExitCond*/ false))
267 continue;
268
269 if (ExitingCond.BoundSCEV->getType() !=
270 SplitCandidateCond.BoundSCEV->getType())
271 continue;
272
273 // After transformation, we assume the split condition of the pre-loop is
274 // always true. In order to guarantee it, we need to check the start value
275 // of the split cond AddRec satisfies the split condition.
276 if (!SE.isLoopEntryGuardedByCond(&L, SplitCandidateCond.Pred,
277 SplitCandidateCond.AddRecSCEV->getStart(),
278 SplitCandidateCond.BoundSCEV))
279 continue;
280
281 SplitCandidateCond.BI = BI;
282 return BI;
283 }
284
285 return nullptr;
286}
287
288static bool splitLoopBound(Loop &L, DominatorTree &DT, LoopInfo &LI,
289 ScalarEvolution &SE, LPMUpdater &U) {
290 ConditionInfo SplitCandidateCond;
291 ConditionInfo ExitingCond;
292
293 // Check we can split this loop's bound.
294 if (!canSplitLoopBound(L, DT, SE, ExitingCond))
4
Taking false branch
295 return false;
296
297 if (!findSplitCandidate(L, SE, ExitingCond, SplitCandidateCond))
5
Taking false branch
298 return false;
299
300 if (!isProfitableToTransform(L, SplitCandidateCond.BI))
6
Taking false branch
301 return false;
302
303 // Now, we have a split candidate. Let's build a form as below.
304 // +--------------------+
305 // | preheader |
306 // | set up newbound |
307 // +--------------------+
308 // | /----------------\
309 // +--------v----v------+ |
310 // | header |---\ |
311 // | with true condition| | |
312 // +--------------------+ | |
313 // | | |
314 // +--------v-----------+ | |
315 // | if.then.BB | | |
316 // +--------------------+ | |
317 // | | |
318 // +--------v-----------<---/ |
319 // | latch >----------/
320 // | with newbound |
321 // +--------------------+
322 // |
323 // +--------v-----------+
324 // | preheader2 |--------------\
325 // | if (AddRec i != | |
326 // | org bound) | |
327 // +--------------------+ |
328 // | /----------------\ |
329 // +--------v----v------+ | |
330 // | header2 |---\ | |
331 // | conditional branch | | | |
332 // |with false condition| | | |
333 // +--------------------+ | | |
334 // | | | |
335 // +--------v-----------+ | | |
336 // | if.then.BB2 | | | |
337 // +--------------------+ | | |
338 // | | | |
339 // +--------v-----------<---/ | |
340 // | latch2 >----------/ |
341 // | with org bound | |
342 // +--------v-----------+ |
343 // | |
344 // | +---------------+ |
345 // +--> exit <-------/
346 // +---------------+
347
348 // Let's create post loop.
349 SmallVector<BasicBlock *, 8> PostLoopBlocks;
350 Loop *PostLoop;
351 ValueToValueMapTy VMap;
352 BasicBlock *PreHeader = L.getLoopPreheader();
353 BasicBlock *SplitLoopPH = SplitEdge(PreHeader, L.getHeader(), &DT, &LI);
354 PostLoop = cloneLoopWithPreheader(L.getExitBlock(), SplitLoopPH, &L, VMap,
355 ".split", &LI, &DT, PostLoopBlocks);
356 remapInstructionsInBlocks(PostLoopBlocks, VMap);
357
358 BasicBlock *PostLoopPreHeader = PostLoop->getLoopPreheader();
359 IRBuilder<> Builder(&PostLoopPreHeader->front());
360
361 // Update phi nodes in header of post-loop.
362 bool isExitingLatch =
363 (L.getExitingBlock() == L.getLoopLatch()) ? true : false;
7
Assuming the condition is true
8
'?' condition is true
364 Value *ExitingCondLCSSAPhi = nullptr;
9
'ExitingCondLCSSAPhi' initialized to a null pointer value
365 for (PHINode &PN : L.getHeader()->phis()) {
366 // Create LCSSA phi node in preheader of post-loop.
367 PHINode *LCSSAPhi =
368 Builder.CreatePHI(PN.getType(), 1, PN.getName() + ".lcssa");
369 LCSSAPhi->setDebugLoc(PN.getDebugLoc());
370 // If the exiting block is loop latch, the phi does not have the update at
371 // last iteration. In this case, update lcssa phi with value from backedge.
372 LCSSAPhi->addIncoming(
373 isExitingLatch ? PN.getIncomingValueForBlock(L.getLoopLatch()) : &PN,
374 L.getExitingBlock());
375
376 // Update the start value of phi node in post-loop with the LCSSA phi node.
377 PHINode *PostLoopPN = cast<PHINode>(VMap[&PN]);
378 PostLoopPN->setIncomingValueForBlock(PostLoopPreHeader, LCSSAPhi);
379
380 // Find PHI with exiting condition from pre-loop. The PHI should be
381 // SCEVAddRecExpr and have same incoming value from backedge with
382 // ExitingCond.
383 if (!SE.isSCEVable(PN.getType()))
384 continue;
385
386 const SCEVAddRecExpr *PhiSCEV = dyn_cast<SCEVAddRecExpr>(SE.getSCEV(&PN));
387 if (PhiSCEV && ExitingCond.NonPHIAddRecValue ==
388 PN.getIncomingValueForBlock(L.getLoopLatch()))
389 ExitingCondLCSSAPhi = LCSSAPhi;
390 }
391
392 // Add conditional branch to check we can skip post-loop in its preheader.
393 Instruction *OrigBI = PostLoopPreHeader->getTerminator();
394 ICmpInst::Predicate Pred = ICmpInst::ICMP_NE;
395 Value *Cond =
396 Builder.CreateICmp(Pred, ExitingCondLCSSAPhi, ExitingCond.BoundValue);
10
Passing null pointer value via 2nd parameter 'LHS'
11
Calling 'IRBuilderBase::CreateICmp'
397 Builder.CreateCondBr(Cond, PostLoop->getHeader(), PostLoop->getExitBlock());
398 OrigBI->eraseFromParent();
399
400 // Create new loop bound and add it into preheader of pre-loop.
401 const SCEV *NewBoundSCEV = ExitingCond.BoundSCEV;
402 const SCEV *SplitBoundSCEV = SplitCandidateCond.BoundSCEV;
403 NewBoundSCEV = ICmpInst::isSigned(ExitingCond.Pred)
404 ? SE.getSMinExpr(NewBoundSCEV, SplitBoundSCEV)
405 : SE.getUMinExpr(NewBoundSCEV, SplitBoundSCEV);
406
407 SCEVExpander Expander(
408 SE, L.getHeader()->getParent()->getParent()->getDataLayout(), "split");
409 Instruction *InsertPt = SplitLoopPH->getTerminator();
410 Value *NewBoundValue =
411 Expander.expandCodeFor(NewBoundSCEV, NewBoundSCEV->getType(), InsertPt);
412 NewBoundValue->setName("new.bound");
413
414 // Replace exiting bound value of pre-loop NewBound.
415 ExitingCond.ICmp->setOperand(1, NewBoundValue);
416
417 // Replace SplitCandidateCond.BI's condition of pre-loop by True.
418 LLVMContext &Context = PreHeader->getContext();
419 SplitCandidateCond.BI->setCondition(ConstantInt::getTrue(Context));
420
421 // Replace cloned SplitCandidateCond.BI's condition in post-loop by False.
422 BranchInst *ClonedSplitCandidateBI =
423 cast<BranchInst>(VMap[SplitCandidateCond.BI]);
424 ClonedSplitCandidateBI->setCondition(ConstantInt::getFalse(Context));
425
426 // Replace exit branch target of pre-loop by post-loop's preheader.
427 if (L.getExitBlock() == ExitingCond.BI->getSuccessor(0))
428 ExitingCond.BI->setSuccessor(0, PostLoopPreHeader);
429 else
430 ExitingCond.BI->setSuccessor(1, PostLoopPreHeader);
431
432 // Update phi node in exit block of post-loop.
433 Builder.SetInsertPoint(&PostLoopPreHeader->front());
434 for (PHINode &PN : PostLoop->getExitBlock()->phis()) {
435 for (auto i : seq<int>(0, PN.getNumOperands())) {
436 // Check incoming block is pre-loop's exiting block.
437 if (PN.getIncomingBlock(i) == L.getExitingBlock()) {
438 Value *IncomingValue = PN.getIncomingValue(i);
439
440 // Create LCSSA phi node for incoming value.
441 PHINode *LCSSAPhi =
442 Builder.CreatePHI(PN.getType(), 1, PN.getName() + ".lcssa");
443 LCSSAPhi->setDebugLoc(PN.getDebugLoc());
444 LCSSAPhi->addIncoming(IncomingValue, PN.getIncomingBlock(i));
445
446 // Replace pre-loop's exiting block by post-loop's preheader.
447 PN.setIncomingBlock(i, PostLoopPreHeader);
448 // Replace incoming value by LCSSAPhi.
449 PN.setIncomingValue(i, LCSSAPhi);
450 // Add a new incoming value with post-loop's exiting block.
451 PN.addIncoming(VMap[IncomingValue], PostLoop->getExitingBlock());
452 }
453 }
454 }
455
456 // Update dominator tree.
457 DT.changeImmediateDominator(PostLoopPreHeader, L.getExitingBlock());
458 DT.changeImmediateDominator(PostLoop->getExitBlock(), PostLoopPreHeader);
459
460 // Invalidate cached SE information.
461 SE.forgetLoop(&L);
462
463 // Canonicalize loops.
464 simplifyLoop(&L, &DT, &LI, &SE, nullptr, nullptr, true);
465 simplifyLoop(PostLoop, &DT, &LI, &SE, nullptr, nullptr, true);
466
467 // Add new post-loop to loop pass manager.
468 U.addSiblingLoops(PostLoop);
469
470 return true;
471}
472
473PreservedAnalyses LoopBoundSplitPass::run(Loop &L, LoopAnalysisManager &AM,
474 LoopStandardAnalysisResults &AR,
475 LPMUpdater &U) {
476 Function &F = *L.getHeader()->getParent();
477 (void)F;
478
479 LLVM_DEBUG(dbgs() << "Spliting bound of loop in " << F.getName() << ": " << Ldo { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-bound-split")) { dbgs() << "Spliting bound of loop in "
<< F.getName() << ": " << L << "\n";
} } while (false)
1
Assuming 'DebugFlag' is false
2
Loop condition is false. Exiting loop
480 << "\n")do { if (::llvm::DebugFlag && ::llvm::isCurrentDebugType
("loop-bound-split")) { dbgs() << "Spliting bound of loop in "
<< F.getName() << ": " << L << "\n";
} } while (false)
;
481
482 if (!splitLoopBound(L, AR.DT, AR.LI, AR.SE, U))
3
Calling 'splitLoopBound'
483 return PreservedAnalyses::all();
484
485 assert(AR.DT.verify(DominatorTree::VerificationLevel::Fast))(static_cast <bool> (AR.DT.verify(DominatorTree::VerificationLevel
::Fast)) ? void (0) : __assert_fail ("AR.DT.verify(DominatorTree::VerificationLevel::Fast)"
, "llvm/lib/Transforms/Scalar/LoopBoundSplit.cpp", 485, __extension__
__PRETTY_FUNCTION__))
;
486 AR.LI.verify(AR.DT);
487
488 return getLoopPassPreservedAnalyses();
489}
490
491} // end namespace llvm

/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugLoc.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/FPEnv.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/Intrinsics.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstdint>
48#include <functional>
49#include <utility>
50
51namespace llvm {
52
53class APInt;
54class Use;
55
56/// This provides the default implementation of the IRBuilder
57/// 'InsertHelper' method that is called whenever an instruction is created by
58/// IRBuilder and needs to be inserted.
59///
60/// By default, this inserts the instruction at the insertion point.
61class IRBuilderDefaultInserter {
62public:
63 virtual ~IRBuilderDefaultInserter();
64
65 virtual void InsertHelper(Instruction *I, const Twine &Name,
66 BasicBlock *BB,
67 BasicBlock::iterator InsertPt) const {
68 if (BB) BB->getInstList().insert(InsertPt, I);
69 I->setName(Name);
70 }
71};
72
73/// Provides an 'InsertHelper' that calls a user-provided callback after
74/// performing the default insertion.
75class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
76 std::function<void(Instruction *)> Callback;
77
78public:
79 ~IRBuilderCallbackInserter() override;
80
81 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
82 : Callback(std::move(Callback)) {}
83
84 void InsertHelper(Instruction *I, const Twine &Name,
85 BasicBlock *BB,
86 BasicBlock::iterator InsertPt) const override {
87 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
88 Callback(I);
89 }
90};
91
92/// Common base class shared among various IRBuilders.
93class IRBuilderBase {
94 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
95 /// created instructions, like !dbg metadata.
96 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
97
98 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
99 /// null. If \p MD is null, remove the entry with \p Kind.
100 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
101 if (!MD) {
102 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
103 return KV.first == Kind;
104 });
105 return;
106 }
107
108 for (auto &KV : MetadataToCopy)
109 if (KV.first == Kind) {
110 KV.second = MD;
111 return;
112 }
113
114 MetadataToCopy.emplace_back(Kind, MD);
115 }
116
117protected:
118 BasicBlock *BB;
119 BasicBlock::iterator InsertPt;
120 LLVMContext &Context;
121 const IRBuilderFolder &Folder;
122 const IRBuilderDefaultInserter &Inserter;
123
124 MDNode *DefaultFPMathTag;
125 FastMathFlags FMF;
126
127 bool IsFPConstrained = false;
128 fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict;
129 RoundingMode DefaultConstrainedRounding = RoundingMode::Dynamic;
130
131 ArrayRef<OperandBundleDef> DefaultOperandBundles;
132
133public:
134 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
135 const IRBuilderDefaultInserter &Inserter, MDNode *FPMathTag,
136 ArrayRef<OperandBundleDef> OpBundles)
137 : Context(context), Folder(Folder), Inserter(Inserter),
138 DefaultFPMathTag(FPMathTag), DefaultOperandBundles(OpBundles) {
139 ClearInsertionPoint();
140 }
141
142 /// Insert and return the specified instruction.
143 template<typename InstTy>
144 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
145 Inserter.InsertHelper(I, Name, BB, InsertPt);
146 AddMetadataToInst(I);
147 return I;
148 }
149
150 /// No-op overload to handle constants.
151 Constant *Insert(Constant *C, const Twine& = "") const {
152 return C;
153 }
154
155 Value *Insert(Value *V, const Twine &Name = "") const {
156 if (Instruction *I = dyn_cast<Instruction>(V))
157 return Insert(I, Name);
158 assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0)
: __assert_fail ("isa<Constant>(V)", "llvm/include/llvm/IR/IRBuilder.h"
, 158, __extension__ __PRETTY_FUNCTION__))
;
159 return V;
160 }
161
162 //===--------------------------------------------------------------------===//
163 // Builder configuration methods
164 //===--------------------------------------------------------------------===//
165
166 /// Clear the insertion point: created instructions will not be
167 /// inserted into a block.
168 void ClearInsertionPoint() {
169 BB = nullptr;
170 InsertPt = BasicBlock::iterator();
171 }
172
173 BasicBlock *GetInsertBlock() const { return BB; }
174 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
175 LLVMContext &getContext() const { return Context; }
176
177 /// This specifies that created instructions should be appended to the
178 /// end of the specified block.
179 void SetInsertPoint(BasicBlock *TheBB) {
180 BB = TheBB;
181 InsertPt = BB->end();
182 }
183
184 /// This specifies that created instructions should be inserted before
185 /// the specified instruction.
186 void SetInsertPoint(Instruction *I) {
187 BB = I->getParent();
188 InsertPt = I->getIterator();
189 assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() &&
"Can't read debug loc from end()") ? void (0) : __assert_fail
("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "llvm/include/llvm/IR/IRBuilder.h", 189, __extension__ __PRETTY_FUNCTION__
))
;
190 SetCurrentDebugLocation(I->getDebugLoc());
191 }
192
193 /// This specifies that created instructions should be inserted at the
194 /// specified point.
195 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
196 BB = TheBB;
197 InsertPt = IP;
198 if (IP != TheBB->end())
199 SetCurrentDebugLocation(IP->getDebugLoc());
200 }
201
202 /// Set location information used by debugging information.
203 void SetCurrentDebugLocation(DebugLoc L) {
204 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
205 }
206
207 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
208 /// added to all created instructions. Entries present in MedataDataToCopy but
209 /// not on \p Src will be dropped from MetadataToCopy.
210 void CollectMetadataToCopy(Instruction *Src,
211 ArrayRef<unsigned> MetadataKinds) {
212 for (unsigned K : MetadataKinds)
213 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
214 }
215
216 /// Get location information used by debugging information.
217 DebugLoc getCurrentDebugLocation() const;
218
219 /// If this builder has a current debug location, set it on the
220 /// specified instruction.
221 void SetInstDebugLocation(Instruction *I) const;
222
223 /// Add all entries in MetadataToCopy to \p I.
224 void AddMetadataToInst(Instruction *I) const {
225 for (const auto &KV : MetadataToCopy)
226 I->setMetadata(KV.first, KV.second);
227 }
228
229 /// Get the return type of the current function that we're emitting
230 /// into.
231 Type *getCurrentFunctionReturnType() const;
232
233 /// InsertPoint - A saved insertion point.
234 class InsertPoint {
235 BasicBlock *Block = nullptr;
236 BasicBlock::iterator Point;
237
238 public:
239 /// Creates a new insertion point which doesn't point to anything.
240 InsertPoint() = default;
241
242 /// Creates a new insertion point at the given location.
243 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
244 : Block(InsertBlock), Point(InsertPoint) {}
245
246 /// Returns true if this insert point is set.
247 bool isSet() const { return (Block != nullptr); }
248
249 BasicBlock *getBlock() const { return Block; }
250 BasicBlock::iterator getPoint() const { return Point; }
251 };
252
253 /// Returns the current insert point.
254 InsertPoint saveIP() const {
255 return InsertPoint(GetInsertBlock(), GetInsertPoint());
256 }
257
258 /// Returns the current insert point, clearing it in the process.
259 InsertPoint saveAndClearIP() {
260 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
261 ClearInsertionPoint();
262 return IP;
263 }
264
265 /// Sets the current insert point to a previously-saved location.
266 void restoreIP(InsertPoint IP) {
267 if (IP.isSet())
268 SetInsertPoint(IP.getBlock(), IP.getPoint());
269 else
270 ClearInsertionPoint();
271 }
272
273 /// Get the floating point math metadata being used.
274 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
275
276 /// Get the flags to be applied to created floating point ops
277 FastMathFlags getFastMathFlags() const { return FMF; }
278
279 FastMathFlags &getFastMathFlags() { return FMF; }
280
281 /// Clear the fast-math flags.
282 void clearFastMathFlags() { FMF.clear(); }
283
284 /// Set the floating point math metadata to be used.
285 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
286
287 /// Set the fast-math flags to be used with generated fp-math operators
288 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
289
290 /// Enable/Disable use of constrained floating point math. When
291 /// enabled the CreateF<op>() calls instead create constrained
292 /// floating point intrinsic calls. Fast math flags are unaffected
293 /// by this setting.
294 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
295
296 /// Query for the use of constrained floating point math
297 bool getIsFPConstrained() { return IsFPConstrained; }
298
299 /// Set the exception handling to be used with constrained floating point
300 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
301#ifndef NDEBUG
302 Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(NewExcept);
303 assert(ExceptStr && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr && \"Garbage strict exception behavior!\""
, "llvm/include/llvm/IR/IRBuilder.h", 303, __extension__ __PRETTY_FUNCTION__
))
;
304#endif
305 DefaultConstrainedExcept = NewExcept;
306 }
307
308 /// Set the rounding mode handling to be used with constrained floating point
309 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
310#ifndef NDEBUG
311 Optional<StringRef> RoundingStr = convertRoundingModeToStr(NewRounding);
312 assert(RoundingStr && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr && \"Garbage strict rounding mode!\""
, "llvm/include/llvm/IR/IRBuilder.h", 312, __extension__ __PRETTY_FUNCTION__
))
;
313#endif
314 DefaultConstrainedRounding = NewRounding;
315 }
316
317 /// Get the exception handling used with constrained floating point
318 fp::ExceptionBehavior getDefaultConstrainedExcept() {
319 return DefaultConstrainedExcept;
320 }
321
322 /// Get the rounding mode handling used with constrained floating point
323 RoundingMode getDefaultConstrainedRounding() {
324 return DefaultConstrainedRounding;
325 }
326
327 void setConstrainedFPFunctionAttr() {
328 assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!"
) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "llvm/include/llvm/IR/IRBuilder.h", 328, __extension__ __PRETTY_FUNCTION__
))
;
329
330 Function *F = BB->getParent();
331 if (!F->hasFnAttribute(Attribute::StrictFP)) {
332 F->addFnAttr(Attribute::StrictFP);
333 }
334 }
335
336 void setConstrainedFPCallAttr(CallBase *I) {
337 I->addFnAttr(Attribute::StrictFP);
338 }
339
340 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
341 DefaultOperandBundles = OpBundles;
342 }
343
344 //===--------------------------------------------------------------------===//
345 // RAII helpers.
346 //===--------------------------------------------------------------------===//
347
348 // RAII object that stores the current insertion point and restores it
349 // when the object is destroyed. This includes the debug location.
350 class InsertPointGuard {
351 IRBuilderBase &Builder;
352 AssertingVH<BasicBlock> Block;
353 BasicBlock::iterator Point;
354 DebugLoc DbgLoc;
355
356 public:
357 InsertPointGuard(IRBuilderBase &B)
358 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
359 DbgLoc(B.getCurrentDebugLocation()) {}
360
361 InsertPointGuard(const InsertPointGuard &) = delete;
362 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
363
364 ~InsertPointGuard() {
365 Builder.restoreIP(InsertPoint(Block, Point));
366 Builder.SetCurrentDebugLocation(DbgLoc);
367 }
368 };
369
370 // RAII object that stores the current fast math settings and restores
371 // them when the object is destroyed.
372 class FastMathFlagGuard {
373 IRBuilderBase &Builder;
374 FastMathFlags FMF;
375 MDNode *FPMathTag;
376 bool IsFPConstrained;
377 fp::ExceptionBehavior DefaultConstrainedExcept;
378 RoundingMode DefaultConstrainedRounding;
379
380 public:
381 FastMathFlagGuard(IRBuilderBase &B)
382 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
383 IsFPConstrained(B.IsFPConstrained),
384 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
385 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
386
387 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
388 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
389
390 ~FastMathFlagGuard() {
391 Builder.FMF = FMF;
392 Builder.DefaultFPMathTag = FPMathTag;
393 Builder.IsFPConstrained = IsFPConstrained;
394 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
395 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
396 }
397 };
398
399 // RAII object that stores the current default operand bundles and restores
400 // them when the object is destroyed.
401 class OperandBundlesGuard {
402 IRBuilderBase &Builder;
403 ArrayRef<OperandBundleDef> DefaultOperandBundles;
404
405 public:
406 OperandBundlesGuard(IRBuilderBase &B)
407 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
408
409 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
410 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
411
412 ~OperandBundlesGuard() {
413 Builder.DefaultOperandBundles = DefaultOperandBundles;
414 }
415 };
416
417
418 //===--------------------------------------------------------------------===//
419 // Miscellaneous creation methods.
420 //===--------------------------------------------------------------------===//
421
422 /// Make a new global variable with initializer type i8*
423 ///
424 /// Make a new global variable with an initializer that has array of i8 type
425 /// filled in with the null terminated string value specified. The new global
426 /// variable will be marked mergable with any others of the same contents. If
427 /// Name is specified, it is the name of the global variable created.
428 ///
429 /// If no module is given via \p M, it is take from the insertion point basic
430 /// block.
431 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
432 unsigned AddressSpace = 0,
433 Module *M = nullptr);
434
435 /// Get a constant value representing either true or false.
436 ConstantInt *getInt1(bool V) {
437 return ConstantInt::get(getInt1Ty(), V);
438 }
439
440 /// Get the constant value for i1 true.
441 ConstantInt *getTrue() {
442 return ConstantInt::getTrue(Context);
443 }
444
445 /// Get the constant value for i1 false.
446 ConstantInt *getFalse() {
447 return ConstantInt::getFalse(Context);
448 }
449
450 /// Get a constant 8-bit value.
451 ConstantInt *getInt8(uint8_t C) {
452 return ConstantInt::get(getInt8Ty(), C);
453 }
454
455 /// Get a constant 16-bit value.
456 ConstantInt *getInt16(uint16_t C) {
457 return ConstantInt::get(getInt16Ty(), C);
458 }
459
460 /// Get a constant 32-bit value.
461 ConstantInt *getInt32(uint32_t C) {
462 return ConstantInt::get(getInt32Ty(), C);
463 }
464
465 /// Get a constant 64-bit value.
466 ConstantInt *getInt64(uint64_t C) {
467 return ConstantInt::get(getInt64Ty(), C);
468 }
469
470 /// Get a constant N-bit value, zero extended or truncated from
471 /// a 64-bit value.
472 ConstantInt *getIntN(unsigned N, uint64_t C) {
473 return ConstantInt::get(getIntNTy(N), C);
474 }
475
476 /// Get a constant integer value.
477 ConstantInt *getInt(const APInt &AI) {
478 return ConstantInt::get(Context, AI);
479 }
480
481 //===--------------------------------------------------------------------===//
482 // Type creation methods
483 //===--------------------------------------------------------------------===//
484
485 /// Fetch the type representing a single bit
486 IntegerType *getInt1Ty() {
487 return Type::getInt1Ty(Context);
488 }
489
490 /// Fetch the type representing an 8-bit integer.
491 IntegerType *getInt8Ty() {
492 return Type::getInt8Ty(Context);
493 }
494
495 /// Fetch the type representing a 16-bit integer.
496 IntegerType *getInt16Ty() {
497 return Type::getInt16Ty(Context);
498 }
499
500 /// Fetch the type representing a 32-bit integer.
501 IntegerType *getInt32Ty() {
502 return Type::getInt32Ty(Context);
503 }
504
505 /// Fetch the type representing a 64-bit integer.
506 IntegerType *getInt64Ty() {
507 return Type::getInt64Ty(Context);
508 }
509
510 /// Fetch the type representing a 128-bit integer.
511 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
512
513 /// Fetch the type representing an N-bit integer.
514 IntegerType *getIntNTy(unsigned N) {
515 return Type::getIntNTy(Context, N);
516 }
517
518 /// Fetch the type representing a 16-bit floating point value.
519 Type *getHalfTy() {
520 return Type::getHalfTy(Context);
521 }
522
523 /// Fetch the type representing a 16-bit brain floating point value.
524 Type *getBFloatTy() {
525 return Type::getBFloatTy(Context);
526 }
527
528 /// Fetch the type representing a 32-bit floating point value.
529 Type *getFloatTy() {
530 return Type::getFloatTy(Context);
531 }
532
533 /// Fetch the type representing a 64-bit floating point value.
534 Type *getDoubleTy() {
535 return Type::getDoubleTy(Context);
536 }
537
538 /// Fetch the type representing void.
539 Type *getVoidTy() {
540 return Type::getVoidTy(Context);
541 }
542
543 /// Fetch the type representing a pointer.
544 PointerType *getPtrTy(unsigned AddrSpace = 0) {
545 return PointerType::get(Context, AddrSpace);
546 }
547
548 /// Fetch the type representing a pointer to an 8-bit integer value.
549 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
550 return Type::getInt8PtrTy(Context, AddrSpace);
551 }
552
553 /// Fetch the type representing a pointer to an integer value.
554 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
555 return DL.getIntPtrType(Context, AddrSpace);
556 }
557
558 //===--------------------------------------------------------------------===//
559 // Intrinsic creation methods
560 //===--------------------------------------------------------------------===//
561
562 /// Create and insert a memset to the specified pointer and the
563 /// specified value.
564 ///
565 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
566 /// specified, it will be added to the instruction. Likewise with alias.scope
567 /// and noalias tags.
568 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
569 MaybeAlign Align, bool isVolatile = false,
570 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
571 MDNode *NoAliasTag = nullptr) {
572 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
573 TBAATag, ScopeTag, NoAliasTag);
574 }
575
576 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
577 bool isVolatile = false, MDNode *TBAATag = nullptr,
578 MDNode *ScopeTag = nullptr,
579 MDNode *NoAliasTag = nullptr);
580
581 CallInst *CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, Value *Val,
582 Value *Size, bool IsVolatile = false,
583 MDNode *TBAATag = nullptr,
584 MDNode *ScopeTag = nullptr,
585 MDNode *NoAliasTag = nullptr);
586
587 /// Create and insert an element unordered-atomic memset of the region of
588 /// memory starting at the given pointer to the given value.
589 ///
590 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
591 /// specified, it will be added to the instruction. Likewise with alias.scope
592 /// and noalias tags.
593 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
594 uint64_t Size, Align Alignment,
595 uint32_t ElementSize,
596 MDNode *TBAATag = nullptr,
597 MDNode *ScopeTag = nullptr,
598 MDNode *NoAliasTag = nullptr) {
599 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
600 Align(Alignment), ElementSize,
601 TBAATag, ScopeTag, NoAliasTag);
602 }
603
604 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
605 Value *Size, Align Alignment,
606 uint32_t ElementSize,
607 MDNode *TBAATag = nullptr,
608 MDNode *ScopeTag = nullptr,
609 MDNode *NoAliasTag = nullptr);
610
611 /// Create and insert a memcpy between the specified pointers.
612 ///
613 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
614 /// specified, it will be added to the instruction. Likewise with alias.scope
615 /// and noalias tags.
616 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
617 MaybeAlign SrcAlign, uint64_t Size,
618 bool isVolatile = false, MDNode *TBAATag = nullptr,
619 MDNode *TBAAStructTag = nullptr,
620 MDNode *ScopeTag = nullptr,
621 MDNode *NoAliasTag = nullptr) {
622 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
623 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
624 NoAliasTag);
625 }
626
627 CallInst *CreateMemTransferInst(
628 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
629 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
630 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
631 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
632
633 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
634 MaybeAlign SrcAlign, Value *Size,
635 bool isVolatile = false, MDNode *TBAATag = nullptr,
636 MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr,
638 MDNode *NoAliasTag = nullptr) {
639 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
640 SrcAlign, Size, isVolatile, TBAATag,
641 TBAAStructTag, ScopeTag, NoAliasTag);
642 }
643
644 CallInst *
645 CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
646 MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false,
647 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
648 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
649
650 /// Create and insert an element unordered-atomic memcpy between the
651 /// specified pointers.
652 ///
653 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
654 ///
655 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
656 /// specified, it will be added to the instruction. Likewise with alias.scope
657 /// and noalias tags.
658 CallInst *CreateElementUnorderedAtomicMemCpy(
659 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
660 uint32_t ElementSize, MDNode *TBAATag = nullptr,
661 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
662 MDNode *NoAliasTag = nullptr);
663
664 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
665 MaybeAlign SrcAlign, uint64_t Size,
666 bool isVolatile = false, MDNode *TBAATag = nullptr,
667 MDNode *ScopeTag = nullptr,
668 MDNode *NoAliasTag = nullptr) {
669 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
670 isVolatile, TBAATag, ScopeTag, NoAliasTag);
671 }
672
673 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
674 MaybeAlign SrcAlign, Value *Size,
675 bool isVolatile = false, MDNode *TBAATag = nullptr,
676 MDNode *ScopeTag = nullptr,
677 MDNode *NoAliasTag = nullptr);
678
679 /// \brief Create and insert an element unordered-atomic memmove between the
680 /// specified pointers.
681 ///
682 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
683 /// respectively.
684 ///
685 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
686 /// specified, it will be added to the instruction. Likewise with alias.scope
687 /// and noalias tags.
688 CallInst *CreateElementUnorderedAtomicMemMove(
689 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
690 uint32_t ElementSize, MDNode *TBAATag = nullptr,
691 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
692 MDNode *NoAliasTag = nullptr);
693
694private:
695 CallInst *getReductionIntrinsic(Intrinsic::ID ID, Value *Src);
696
697public:
698 /// Create a sequential vector fadd reduction intrinsic of the source vector.
699 /// The first parameter is a scalar accumulator value. An unordered reduction
700 /// can be created by adding the reassoc fast-math flag to the resulting
701 /// sequential reduction.
702 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
703
704 /// Create a sequential vector fmul reduction intrinsic of the source vector.
705 /// The first parameter is a scalar accumulator value. An unordered reduction
706 /// can be created by adding the reassoc fast-math flag to the resulting
707 /// sequential reduction.
708 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
709
710 /// Create a vector int add reduction intrinsic of the source vector.
711 CallInst *CreateAddReduce(Value *Src);
712
713 /// Create a vector int mul reduction intrinsic of the source vector.
714 CallInst *CreateMulReduce(Value *Src);
715
716 /// Create a vector int AND reduction intrinsic of the source vector.
717 CallInst *CreateAndReduce(Value *Src);
718
719 /// Create a vector int OR reduction intrinsic of the source vector.
720 CallInst *CreateOrReduce(Value *Src);
721
722 /// Create a vector int XOR reduction intrinsic of the source vector.
723 CallInst *CreateXorReduce(Value *Src);
724
725 /// Create a vector integer max reduction intrinsic of the source
726 /// vector.
727 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
728
729 /// Create a vector integer min reduction intrinsic of the source
730 /// vector.
731 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
732
733 /// Create a vector float max reduction intrinsic of the source
734 /// vector.
735 CallInst *CreateFPMaxReduce(Value *Src);
736
737 /// Create a vector float min reduction intrinsic of the source
738 /// vector.
739 CallInst *CreateFPMinReduce(Value *Src);
740
741 /// Create a lifetime.start intrinsic.
742 ///
743 /// If the pointer isn't i8* it will be converted.
744 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
745
746 /// Create a lifetime.end intrinsic.
747 ///
748 /// If the pointer isn't i8* it will be converted.
749 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
750
751 /// Create a call to invariant.start intrinsic.
752 ///
753 /// If the pointer isn't i8* it will be converted.
754 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
755
756 /// Create a call to llvm.threadlocal.address intrinsic.
757 CallInst *CreateThreadLocalAddress(Value *Ptr);
758
759 /// Create a call to Masked Load intrinsic
760 CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask,
761 Value *PassThru = nullptr, const Twine &Name = "");
762
763 /// Create a call to Masked Store intrinsic
764 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
765 Value *Mask);
766
767 /// Create a call to Masked Gather intrinsic
768 CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment,
769 Value *Mask = nullptr, Value *PassThru = nullptr,
770 const Twine &Name = "");
771
772 /// Create a call to Masked Scatter intrinsic
773 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
774 Value *Mask = nullptr);
775
776 /// Create an assume intrinsic call that allows the optimizer to
777 /// assume that the provided condition will be true.
778 ///
779 /// The optional argument \p OpBundles specifies operand bundles that are
780 /// added to the call instruction.
781 CallInst *CreateAssumption(Value *Cond,
782 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
783
784 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
785 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
786 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
787 return CreateNoAliasScopeDeclaration(
788 MetadataAsValue::get(Context, ScopeTag));
789 }
790
791 /// Create a call to the experimental.gc.statepoint intrinsic to
792 /// start a new statepoint sequence.
793 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
794 FunctionCallee ActualCallee,
795 ArrayRef<Value *> CallArgs,
796 Optional<ArrayRef<Value *>> DeoptArgs,
797 ArrayRef<Value *> GCArgs,
798 const Twine &Name = "");
799
800 /// Create a call to the experimental.gc.statepoint intrinsic to
801 /// start a new statepoint sequence.
802 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
803 FunctionCallee ActualCallee, uint32_t Flags,
804 ArrayRef<Value *> CallArgs,
805 Optional<ArrayRef<Use>> TransitionArgs,
806 Optional<ArrayRef<Use>> DeoptArgs,
807 ArrayRef<Value *> GCArgs,
808 const Twine &Name = "");
809
810 /// Conveninence function for the common case when CallArgs are filled
811 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
812 /// .get()'ed to get the Value pointer.
813 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
814 FunctionCallee ActualCallee,
815 ArrayRef<Use> CallArgs,
816 Optional<ArrayRef<Value *>> DeoptArgs,
817 ArrayRef<Value *> GCArgs,
818 const Twine &Name = "");
819
820 /// Create an invoke to the experimental.gc.statepoint intrinsic to
821 /// start a new statepoint sequence.
822 InvokeInst *
823 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
824 FunctionCallee ActualInvokee, BasicBlock *NormalDest,
825 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
826 Optional<ArrayRef<Value *>> DeoptArgs,
827 ArrayRef<Value *> GCArgs, const Twine &Name = "");
828
829 /// Create an invoke to the experimental.gc.statepoint intrinsic to
830 /// start a new statepoint sequence.
831 InvokeInst *CreateGCStatepointInvoke(
832 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
833 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
834 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
835 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
836 const Twine &Name = "");
837
838 // Convenience function for the common case when CallArgs are filled in using
839 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
840 // get the Value *.
841 InvokeInst *
842 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
843 FunctionCallee ActualInvokee, BasicBlock *NormalDest,
844 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
845 Optional<ArrayRef<Value *>> DeoptArgs,
846 ArrayRef<Value *> GCArgs, const Twine &Name = "");
847
848 /// Create a call to the experimental.gc.result intrinsic to extract
849 /// the result from a call wrapped in a statepoint.
850 CallInst *CreateGCResult(Instruction *Statepoint,
851 Type *ResultType,
852 const Twine &Name = "");
853
854 /// Create a call to the experimental.gc.relocate intrinsics to
855 /// project the relocated value of one pointer from the statepoint.
856 CallInst *CreateGCRelocate(Instruction *Statepoint,
857 int BaseOffset,
858 int DerivedOffset,
859 Type *ResultType,
860 const Twine &Name = "");
861
862 /// Create a call to the experimental.gc.pointer.base intrinsic to get the
863 /// base pointer for the specified derived pointer.
864 CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = "");
865
866 /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get
867 /// the offset of the specified derived pointer from its base.
868 CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = "");
869
870 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
871 /// will be the same type as that of \p Scaling.
872 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
873
874 /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
875 Value *CreateStepVector(Type *DstType, const Twine &Name = "");
876
877 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
878 /// type.
879 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
880 Instruction *FMFSource = nullptr,
881 const Twine &Name = "");
882
883 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
884 /// first type.
885 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
886 Instruction *FMFSource = nullptr,
887 const Twine &Name = "");
888
889 /// Create a call to intrinsic \p ID with \p Args, mangled using \p Types. If
890 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
891 /// the intrinsic.
892 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
893 ArrayRef<Value *> Args,
894 Instruction *FMFSource = nullptr,
895 const Twine &Name = "");
896
897 /// Create a call to intrinsic \p ID with \p RetTy and \p Args. If
898 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
899 /// the intrinsic.
900 CallInst *CreateIntrinsic(Type *RetTy, Intrinsic::ID ID,
901 ArrayRef<Value *> Args,
902 Instruction *FMFSource = nullptr,
903 const Twine &Name = "");
904
905 /// Create call to the minnum intrinsic.
906 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
907 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
908 }
909
910 /// Create call to the maxnum intrinsic.
911 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
912 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
913 }
914
915 /// Create call to the minimum intrinsic.
916 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
917 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
918 }
919
920 /// Create call to the maximum intrinsic.
921 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
922 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
923 }
924
925 /// Create a call to the arithmetic_fence intrinsic.
926 CallInst *CreateArithmeticFence(Value *Val, Type *DstType,
927 const Twine &Name = "") {
928 return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr,
929 Name);
930 }
931
932 /// Create a call to the vector.extract intrinsic.
933 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
934 const Twine &Name = "") {
935 return CreateIntrinsic(Intrinsic::vector_extract,
936 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
937 Name);
938 }
939
940 /// Create a call to the vector.insert intrinsic.
941 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
942 Value *Idx, const Twine &Name = "") {
943 return CreateIntrinsic(Intrinsic::vector_insert,
944 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
945 nullptr, Name);
946 }
947
948private:
949 /// Create a call to a masked intrinsic with given Id.
950 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
951 ArrayRef<Type *> OverloadedTypes,
952 const Twine &Name = "");
953
954 Value *getCastedInt8PtrValue(Value *Ptr);
955
956 //===--------------------------------------------------------------------===//
957 // Instruction creation methods: Terminators
958 //===--------------------------------------------------------------------===//
959
960private:
961 /// Helper to add branch weight and unpredictable metadata onto an
962 /// instruction.
963 /// \returns The annotated instruction.
964 template <typename InstTy>
965 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
966 if (Weights)
967 I->setMetadata(LLVMContext::MD_prof, Weights);
968 if (Unpredictable)
969 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
970 return I;
971 }
972
973public:
974 /// Create a 'ret void' instruction.
975 ReturnInst *CreateRetVoid() {
976 return Insert(ReturnInst::Create(Context));
977 }
978
979 /// Create a 'ret <val>' instruction.
980 ReturnInst *CreateRet(Value *V) {
981 return Insert(ReturnInst::Create(Context, V));
982 }
983
984 /// Create a sequence of N insertvalue instructions,
985 /// with one Value from the retVals array each, that build a aggregate
986 /// return value one value at a time, and a ret instruction to return
987 /// the resulting aggregate value.
988 ///
989 /// This is a convenience function for code that uses aggregate return values
990 /// as a vehicle for having multiple return values.
991 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
992 Value *V = PoisonValue::get(getCurrentFunctionReturnType());
993 for (unsigned i = 0; i != N; ++i)
994 V = CreateInsertValue(V, retVals[i], i, "mrv");
995 return Insert(ReturnInst::Create(Context, V));
996 }
997
998 /// Create an unconditional 'br label X' instruction.
999 BranchInst *CreateBr(BasicBlock *Dest) {
1000 return Insert(BranchInst::Create(Dest));
1001 }
1002
1003 /// Create a conditional 'br Cond, TrueDest, FalseDest'
1004 /// instruction.
1005 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
1006 MDNode *BranchWeights = nullptr,
1007 MDNode *Unpredictable = nullptr) {
1008 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
1009 BranchWeights, Unpredictable));
1010 }
1011
1012 /// Create a conditional 'br Cond, TrueDest, FalseDest'
1013 /// instruction. Copy branch meta data if available.
1014 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
1015 Instruction *MDSrc) {
1016 BranchInst *Br = BranchInst::Create(True, False, Cond);
1017 if (MDSrc) {
1018 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
1019 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
1020 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
1021 }
1022 return Insert(Br);
1023 }
1024
1025 /// Create a switch instruction with the specified value, default dest,
1026 /// and with a hint for the number of cases that will be added (for efficient
1027 /// allocation).
1028 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1029 MDNode *BranchWeights = nullptr,
1030 MDNode *Unpredictable = nullptr) {
1031 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1032 BranchWeights, Unpredictable));
1033 }
1034
1035 /// Create an indirect branch instruction with the specified address
1036 /// operand, with an optional hint for the number of destinations that will be
1037 /// added (for efficient allocation).
1038 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1039 return Insert(IndirectBrInst::Create(Addr, NumDests));
1040 }
1041
1042 /// Create an invoke instruction.
1043 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1044 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1045 ArrayRef<Value *> Args,
1046 ArrayRef<OperandBundleDef> OpBundles,
1047 const Twine &Name = "") {
1048 InvokeInst *II =
1049 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1050 if (IsFPConstrained)
1051 setConstrainedFPCallAttr(II);
1052 return Insert(II, Name);
1053 }
1054 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1055 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1056 ArrayRef<Value *> Args = None,
1057 const Twine &Name = "") {
1058 InvokeInst *II =
1059 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1060 if (IsFPConstrained)
1061 setConstrainedFPCallAttr(II);
1062 return Insert(II, Name);
1063 }
1064
1065 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1066 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1067 ArrayRef<OperandBundleDef> OpBundles,
1068 const Twine &Name = "") {
1069 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1070 NormalDest, UnwindDest, Args, OpBundles, Name);
1071 }
1072
1073 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1074 BasicBlock *UnwindDest,
1075 ArrayRef<Value *> Args = None,
1076 const Twine &Name = "") {
1077 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1078 NormalDest, UnwindDest, Args, Name);
1079 }
1080
1081 /// \brief Create a callbr instruction.
1082 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1083 BasicBlock *DefaultDest,
1084 ArrayRef<BasicBlock *> IndirectDests,
1085 ArrayRef<Value *> Args = None,
1086 const Twine &Name = "") {
1087 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1088 Args), Name);
1089 }
1090 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1091 BasicBlock *DefaultDest,
1092 ArrayRef<BasicBlock *> IndirectDests,
1093 ArrayRef<Value *> Args,
1094 ArrayRef<OperandBundleDef> OpBundles,
1095 const Twine &Name = "") {
1096 return Insert(
1097 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1098 OpBundles), Name);
1099 }
1100
1101 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1102 ArrayRef<BasicBlock *> IndirectDests,
1103 ArrayRef<Value *> Args = None,
1104 const Twine &Name = "") {
1105 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1106 DefaultDest, IndirectDests, Args, Name);
1107 }
1108 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1109 ArrayRef<BasicBlock *> IndirectDests,
1110 ArrayRef<Value *> Args,
1111 ArrayRef<OperandBundleDef> OpBundles,
1112 const Twine &Name = "") {
1113 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1114 DefaultDest, IndirectDests, Args, Name);
1115 }
1116
1117 ResumeInst *CreateResume(Value *Exn) {
1118 return Insert(ResumeInst::Create(Exn));
1119 }
1120
1121 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1122 BasicBlock *UnwindBB = nullptr) {
1123 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1124 }
1125
1126 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1127 unsigned NumHandlers,
1128 const Twine &Name = "") {
1129 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1130 Name);
1131 }
1132
1133 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1134 const Twine &Name = "") {
1135 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1136 }
1137
1138 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1139 ArrayRef<Value *> Args = None,
1140 const Twine &Name = "") {
1141 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1142 }
1143
1144 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1145 return Insert(CatchReturnInst::Create(CatchPad, BB));
1146 }
1147
1148 UnreachableInst *CreateUnreachable() {
1149 return Insert(new UnreachableInst(Context));
1150 }
1151
1152 //===--------------------------------------------------------------------===//
1153 // Instruction creation methods: Binary Operators
1154 //===--------------------------------------------------------------------===//
1155private:
1156 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1157 Value *LHS, Value *RHS,
1158 const Twine &Name,
1159 bool HasNUW, bool HasNSW) {
1160 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1161 if (HasNUW) BO->setHasNoUnsignedWrap();
1162 if (HasNSW) BO->setHasNoSignedWrap();
1163 return BO;
1164 }
1165
1166 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1167 FastMathFlags FMF) const {
1168 if (!FPMD)
1169 FPMD = DefaultFPMathTag;
1170 if (FPMD)
1171 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1172 I->setFastMathFlags(FMF);
1173 return I;
1174 }
1175
1176 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1177 RoundingMode UseRounding = DefaultConstrainedRounding;
1178
1179 if (Rounding)
1180 UseRounding = Rounding.value();
1181
1182 Optional<StringRef> RoundingStr = convertRoundingModeToStr(UseRounding);
1183 assert(RoundingStr && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr && \"Garbage strict rounding mode!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1183, __extension__ __PRETTY_FUNCTION__
))
;
1184 auto *RoundingMDS = MDString::get(Context, RoundingStr.value());
1185
1186 return MetadataAsValue::get(Context, RoundingMDS);
1187 }
1188
1189 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1190 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1191
1192 if (Except)
1193 UseExcept = Except.value();
1194
1195 Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(UseExcept);
1196 assert(ExceptStr && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr && \"Garbage strict exception behavior!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1196, __extension__ __PRETTY_FUNCTION__
))
;
1197 auto *ExceptMDS = MDString::get(Context, ExceptStr.value());
1198
1199 return MetadataAsValue::get(Context, ExceptMDS);
1200 }
1201
1202 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1203 assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1206, __extension__ __PRETTY_FUNCTION__
))
1204 Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1206, __extension__ __PRETTY_FUNCTION__
))
1205 Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1206, __extension__ __PRETTY_FUNCTION__
))
1206 "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1206, __extension__ __PRETTY_FUNCTION__
))
;
1207
1208 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1209 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1210
1211 return MetadataAsValue::get(Context, PredicateMDS);
1212 }
1213
1214public:
1215 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1216 bool HasNUW = false, bool HasNSW = false) {
1217 if (Value *V =
1218 Folder.FoldNoWrapBinOp(Instruction::Add, LHS, RHS, HasNUW, HasNSW))
1219 return V;
1220 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name, HasNUW,
1221 HasNSW);
1222 }
1223
1224 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1225 return CreateAdd(LHS, RHS, Name, false, true);
1226 }
1227
1228 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1229 return CreateAdd(LHS, RHS, Name, true, false);
1230 }
1231
1232 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1233 bool HasNUW = false, bool HasNSW = false) {
1234 if (Value *V =
1235 Folder.FoldNoWrapBinOp(Instruction::Sub, LHS, RHS, HasNUW, HasNSW))
1236 return V;
1237 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name, HasNUW,
1238 HasNSW);
1239 }
1240
1241 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1242 return CreateSub(LHS, RHS, Name, false, true);
1243 }
1244
1245 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1246 return CreateSub(LHS, RHS, Name, true, false);
1247 }
1248
1249 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1250 bool HasNUW = false, bool HasNSW = false) {
1251 if (Value *V =
1252 Folder.FoldNoWrapBinOp(Instruction::Mul, LHS, RHS, HasNUW, HasNSW))
1253 return V;
1254 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name, HasNUW,
1255 HasNSW);
1256 }
1257
1258 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1259 return CreateMul(LHS, RHS, Name, false, true);
1260 }
1261
1262 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1263 return CreateMul(LHS, RHS, Name, true, false);
1264 }
1265
1266 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1267 bool isExact = false) {
1268 if (Value *V = Folder.FoldExactBinOp(Instruction::UDiv, LHS, RHS, isExact))
1269 return V;
1270 if (!isExact)
1271 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1272 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1273 }
1274
1275 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1276 return CreateUDiv(LHS, RHS, Name, true);
1277 }
1278
1279 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1280 bool isExact = false) {
1281 if (Value *V = Folder.FoldExactBinOp(Instruction::SDiv, LHS, RHS, isExact))
1282 return V;
1283 if (!isExact)
1284 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1285 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1286 }
1287
1288 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1289 return CreateSDiv(LHS, RHS, Name, true);
1290 }
1291
1292 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1293 if (Value *V = Folder.FoldBinOp(Instruction::URem, LHS, RHS))
1294 return V;
1295 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1296 }
1297
1298 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1299 if (Value *V = Folder.FoldBinOp(Instruction::SRem, LHS, RHS))
1300 return V;
1301 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1302 }
1303
1304 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1305 bool HasNUW = false, bool HasNSW = false) {
1306 if (Value *V =
1307 Folder.FoldNoWrapBinOp(Instruction::Shl, LHS, RHS, HasNUW, HasNSW))
1308 return V;
1309 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1310 HasNUW, HasNSW);
1311 }
1312
1313 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1314 bool HasNUW = false, bool HasNSW = false) {
1315 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1316 HasNUW, HasNSW);
1317 }
1318
1319 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1320 bool HasNUW = false, bool HasNSW = false) {
1321 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1322 HasNUW, HasNSW);
1323 }
1324
1325 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1326 bool isExact = false) {
1327 if (Value *V = Folder.FoldExactBinOp(Instruction::LShr, LHS, RHS, isExact))
1328 return V;
1329 if (!isExact)
1330 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1331 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1332 }
1333
1334 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1335 bool isExact = false) {
1336 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1337 }
1338
1339 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1340 bool isExact = false) {
1341 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1342 }
1343
1344 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1345 bool isExact = false) {
1346 if (Value *V = Folder.FoldExactBinOp(Instruction::AShr, LHS, RHS, isExact))
1347 return V;
1348 if (!isExact)
1349 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1350 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1351 }
1352
1353 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1354 bool isExact = false) {
1355 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1356 }
1357
1358 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1359 bool isExact = false) {
1360 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1361 }
1362
1363 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1364 if (auto *V = Folder.FoldBinOp(Instruction::And, LHS, RHS))
1365 return V;
1366 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1367 }
1368
1369 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1370 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1371 }
1372
1373 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1374 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1375 }
1376
1377 Value *CreateAnd(ArrayRef<Value*> Ops) {
1378 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1378, __extension__
__PRETTY_FUNCTION__))
;
1379 Value *Accum = Ops[0];
1380 for (unsigned i = 1; i < Ops.size(); i++)
1381 Accum = CreateAnd(Accum, Ops[i]);
1382 return Accum;
1383 }
1384
1385 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1386 if (auto *V = Folder.FoldBinOp(Instruction::Or, LHS, RHS))
1387 return V;
1388 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1389 }
1390
1391 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1392 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1393 }
1394
1395 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1396 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1397 }
1398
1399 Value *CreateOr(ArrayRef<Value*> Ops) {
1400 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1400, __extension__
__PRETTY_FUNCTION__))
;
1401 Value *Accum = Ops[0];
1402 for (unsigned i = 1; i < Ops.size(); i++)
1403 Accum = CreateOr(Accum, Ops[i]);
1404 return Accum;
1405 }
1406
1407 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1408 if (Value *V = Folder.FoldBinOp(Instruction::Xor, LHS, RHS))
1409 return V;
1410 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1411 }
1412
1413 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1414 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1415 }
1416
1417 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1418 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1419 }
1420
1421 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1422 MDNode *FPMD = nullptr) {
1423 if (IsFPConstrained)
1424 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1425 L, R, nullptr, Name, FPMD);
1426
1427 if (Value *V = Folder.FoldBinOpFMF(Instruction::FAdd, L, R, FMF))
1428 return V;
1429 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1430 return Insert(I, Name);
1431 }
1432
1433 /// Copy fast-math-flags from an instruction rather than using the builder's
1434 /// default FMF.
1435 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1436 const Twine &Name = "") {
1437 if (IsFPConstrained)
1438 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1439 L, R, FMFSource, Name);
1440
1441 FastMathFlags FMF = FMFSource->getFastMathFlags();
1442 if (Value *V = Folder.FoldBinOpFMF(Instruction::FAdd, L, R, FMF))
1443 return V;
1444 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr, FMF);
1445 return Insert(I, Name);
1446 }
1447
1448 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1449 MDNode *FPMD = nullptr) {
1450 if (IsFPConstrained)
1451 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1452 L, R, nullptr, Name, FPMD);
1453
1454 if (Value *V = Folder.FoldBinOpFMF(Instruction::FSub, L, R, FMF))
1455 return V;
1456 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1457 return Insert(I, Name);
1458 }
1459
1460 /// Copy fast-math-flags from an instruction rather than using the builder's
1461 /// default FMF.
1462 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1463 const Twine &Name = "") {
1464 if (IsFPConstrained)
1465 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1466 L, R, FMFSource, Name);
1467
1468 FastMathFlags FMF = FMFSource->getFastMathFlags();
1469 if (Value *V = Folder.FoldBinOpFMF(Instruction::FSub, L, R, FMF))
1470 return V;
1471 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr, FMF);
1472 return Insert(I, Name);
1473 }
1474
1475 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1476 MDNode *FPMD = nullptr) {
1477 if (IsFPConstrained)
1478 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1479 L, R, nullptr, Name, FPMD);
1480
1481 if (Value *V = Folder.FoldBinOpFMF(Instruction::FMul, L, R, FMF))
1482 return V;
1483 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1484 return Insert(I, Name);
1485 }
1486
1487 /// Copy fast-math-flags from an instruction rather than using the builder's
1488 /// default FMF.
1489 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1490 const Twine &Name = "") {
1491 if (IsFPConstrained)
1492 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1493 L, R, FMFSource, Name);
1494
1495 FastMathFlags FMF = FMFSource->getFastMathFlags();
1496 if (Value *V = Folder.FoldBinOpFMF(Instruction::FMul, L, R, FMF))
1497 return V;
1498 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr, FMF);
1499 return Insert(I, Name);
1500 }
1501
1502 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1503 MDNode *FPMD = nullptr) {
1504 if (IsFPConstrained)
1505 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1506 L, R, nullptr, Name, FPMD);
1507
1508 if (Value *V = Folder.FoldBinOpFMF(Instruction::FDiv, L, R, FMF))
1509 return V;
1510 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1511 return Insert(I, Name);
1512 }
1513
1514 /// Copy fast-math-flags from an instruction rather than using the builder's
1515 /// default FMF.
1516 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1517 const Twine &Name = "") {
1518 if (IsFPConstrained)
1519 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1520 L, R, FMFSource, Name);
1521
1522 if (Value *V = Folder.FoldBinOpFMF(Instruction::FDiv, L, R, FMF))
1523 return V;
1524 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr, FMF);
1525 return Insert(I, Name);
1526 }
1527
1528 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1529 MDNode *FPMD = nullptr) {
1530 if (IsFPConstrained)
1531 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1532 L, R, nullptr, Name, FPMD);
1533
1534 if (Value *V = Folder.FoldBinOpFMF(Instruction::FRem, L, R, FMF)) return V;
1535 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1536 return Insert(I, Name);
1537 }
1538
1539 /// Copy fast-math-flags from an instruction rather than using the builder's
1540 /// default FMF.
1541 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1542 const Twine &Name = "") {
1543 if (IsFPConstrained)
1544 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1545 L, R, FMFSource, Name);
1546
1547 FastMathFlags FMF = FMFSource->getFastMathFlags();
1548 if (Value *V = Folder.FoldBinOpFMF(Instruction::FRem, L, R, FMF)) return V;
1549 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr, FMF);
1550 return Insert(I, Name);
1551 }
1552
1553 Value *CreateBinOp(Instruction::BinaryOps Opc,
1554 Value *LHS, Value *RHS, const Twine &Name = "",
1555 MDNode *FPMathTag = nullptr) {
1556 if (Value *V = Folder.FoldBinOp(Opc, LHS, RHS)) return V;
1557 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1558 if (isa<FPMathOperator>(BinOp))
1559 setFPAttrs(BinOp, FPMathTag, FMF);
1560 return Insert(BinOp, Name);
1561 }
1562
1563 Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1564 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "llvm/include/llvm/IR/IRBuilder.h", 1564, __extension__ __PRETTY_FUNCTION__
))
;
1565 return CreateSelect(Cond1, Cond2,
1566 ConstantInt::getNullValue(Cond2->getType()), Name);
1567 }
1568
1569 Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1570 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "llvm/include/llvm/IR/IRBuilder.h", 1570, __extension__ __PRETTY_FUNCTION__
))
;
1571 return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
1572 Cond2, Name);
1573 }
1574
1575 // NOTE: this is sequential, non-commutative, ordered reduction!
1576 Value *CreateLogicalOr(ArrayRef<Value *> Ops) {
1577 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1577, __extension__
__PRETTY_FUNCTION__))
;
1578 Value *Accum = Ops[0];
1579 for (unsigned i = 1; i < Ops.size(); i++)
1580 Accum = CreateLogicalOr(Accum, Ops[i]);
1581 return Accum;
1582 }
1583
1584 CallInst *CreateConstrainedFPBinOp(
1585 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1586 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1587 Optional<RoundingMode> Rounding = None,
1588 Optional<fp::ExceptionBehavior> Except = None);
1589
1590 Value *CreateNeg(Value *V, const Twine &Name = "", bool HasNUW = false,
1591 bool HasNSW = false) {
1592 return CreateSub(Constant::getNullValue(V->getType()), V, Name, HasNUW,
1593 HasNSW);
1594 }
1595
1596 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1597 return CreateNeg(V, Name, false, true);
1598 }
1599
1600 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1601 return CreateNeg(V, Name, true, false);
1602 }
1603
1604 Value *CreateFNeg(Value *V, const Twine &Name = "",
1605 MDNode *FPMathTag = nullptr) {
1606 if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF))
1607 return Res;
1608 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1609 Name);
1610 }
1611
1612 /// Copy fast-math-flags from an instruction rather than using the builder's
1613 /// default FMF.
1614 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1615 const Twine &Name = "") {
1616 FastMathFlags FMF = FMFSource->getFastMathFlags();
1617 if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF))
1618 return Res;
1619 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr, FMF),
1620 Name);
1621 }
1622
1623 Value *CreateNot(Value *V, const Twine &Name = "") {
1624 return CreateXor(V, Constant::getAllOnesValue(V->getType()), Name);
1625 }
1626
1627 Value *CreateUnOp(Instruction::UnaryOps Opc,
1628 Value *V, const Twine &Name = "",
1629 MDNode *FPMathTag = nullptr) {
1630 if (Value *Res = Folder.FoldUnOpFMF(Opc, V, FMF))
1631 return Res;
1632 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1633 if (isa<FPMathOperator>(UnOp))
1634 setFPAttrs(UnOp, FPMathTag, FMF);
1635 return Insert(UnOp, Name);
1636 }
1637
1638 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1639 /// Correct number of operands must be passed accordingly.
1640 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1641 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1642
1643 //===--------------------------------------------------------------------===//
1644 // Instruction creation methods: Memory Instructions
1645 //===--------------------------------------------------------------------===//
1646
1647 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1648 Value *ArraySize = nullptr, const Twine &Name = "") {
1649 const DataLayout &DL = BB->getModule()->getDataLayout();
1650 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1651 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1652 }
1653
1654 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1655 const Twine &Name = "") {
1656 const DataLayout &DL = BB->getModule()->getDataLayout();
1657 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1658 unsigned AddrSpace = DL.getAllocaAddrSpace();
1659 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1660 }
1661
1662 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1663 /// converting the string to 'bool' for the isVolatile parameter.
1664 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1665 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1666 }
1667
1668 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1669 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1670 }
1671
1672 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1673 const Twine &Name = "") {
1674 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1675 }
1676
1677 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1678 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1679 }
1680
1681 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1682 const char *Name) {
1683 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1684 }
1685
1686 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1687 const Twine &Name = "") {
1688 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1689 }
1690
1691 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1692 bool isVolatile, const Twine &Name = "") {
1693 if (!Align) {
1694 const DataLayout &DL = BB->getModule()->getDataLayout();
1695 Align = DL.getABITypeAlign(Ty);
1696 }
1697 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1698 }
1699
1700 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1701 bool isVolatile = false) {
1702 if (!Align) {
1703 const DataLayout &DL = BB->getModule()->getDataLayout();
1704 Align = DL.getABITypeAlign(Val->getType());
1705 }
1706 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1707 }
1708 FenceInst *CreateFence(AtomicOrdering Ordering,
1709 SyncScope::ID SSID = SyncScope::System,
1710 const Twine &Name = "") {
1711 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1712 }
1713
1714 AtomicCmpXchgInst *
1715 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
1716 AtomicOrdering SuccessOrdering,
1717 AtomicOrdering FailureOrdering,
1718 SyncScope::ID SSID = SyncScope::System) {
1719 if (!Align) {
1720 const DataLayout &DL = BB->getModule()->getDataLayout();
1721 Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
1722 }
1723
1724 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
1725 FailureOrdering, SSID));
1726 }
1727
1728 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
1729 Value *Val, MaybeAlign Align,
1730 AtomicOrdering Ordering,
1731 SyncScope::ID SSID = SyncScope::System) {
1732 if (!Align) {
1733 const DataLayout &DL = BB->getModule()->getDataLayout();
1734 Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
1735 }
1736
1737 return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
1738 }
1739
1740 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1741 const Twine &Name = "", bool IsInBounds = false) {
1742 if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, IsInBounds))
1743 return V;
1744 return Insert(IsInBounds
1745 ? GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList)
1746 : GetElementPtrInst::Create(Ty, Ptr, IdxList),
1747 Name);
1748 }
1749
1750 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1751 const Twine &Name = "") {
1752 return CreateGEP(Ty, Ptr, IdxList, Name, /* IsInBounds */ true);
1753 }
1754
1755 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1756 const Twine &Name = "") {
1757 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1758
1759 if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false))
1760 return V;
1761
1762 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1763 }
1764
1765 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1766 const Twine &Name = "") {
1767 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1768
1769 if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true))
1770 return V;
1771
1772 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1773 }
1774
1775 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1776 const Twine &Name = "") {
1777 Value *Idxs[] = {
1778 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1779 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1780 };
1781
1782 if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false))
1783 return V;
1784
1785 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1786 }
1787
1788 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1789 unsigned Idx1, const Twine &Name = "") {
1790 Value *Idxs[] = {
1791 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1792 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1793 };
1794
1795 if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true))
1796 return V;
1797
1798 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1799 }
1800
1801 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1802 const Twine &Name = "") {
1803 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1804
1805 if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false))
1806 return V;
1807
1808 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1809 }
1810
1811 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1812 const Twine &Name = "") {
1813 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1814
1815 if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true))
1816 return V;
1817
1818 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1819 }
1820
1821 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1822 const Twine &Name = "") {
1823 Value *Idxs[] = {
1824 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1825 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1826 };
1827
1828 if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false))
1829 return V;
1830
1831 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1832 }
1833
1834 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1835 uint64_t Idx1, const Twine &Name = "") {
1836 Value *Idxs[] = {
1837 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1838 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1839 };
1840
1841 if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true))
1842 return V;
1843
1844 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1845 }
1846
1847 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1848 const Twine &Name = "") {
1849 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1850 }
1851
1852 /// Same as CreateGlobalString, but return a pointer with "i8*" type
1853 /// instead of a pointer to array of i8.
1854 ///
1855 /// If no module is given via \p M, it is take from the insertion point basic
1856 /// block.
1857 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
1858 unsigned AddressSpace = 0,
1859 Module *M = nullptr) {
1860 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
1861 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1862 Constant *Indices[] = {Zero, Zero};
1863 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
1864 Indices);
1865 }
1866
1867 //===--------------------------------------------------------------------===//
1868 // Instruction creation methods: Cast/Conversion Operators
1869 //===--------------------------------------------------------------------===//
1870
1871 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
1872 return CreateCast(Instruction::Trunc, V, DestTy, Name);
1873 }
1874
1875 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
1876 return CreateCast(Instruction::ZExt, V, DestTy, Name);
1877 }
1878
1879 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
1880 return CreateCast(Instruction::SExt, V, DestTy, Name);
1881 }
1882
1883 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
1884 /// the value untouched if the type of V is already DestTy.
1885 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
1886 const Twine &Name = "") {
1887 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1889, __extension__ __PRETTY_FUNCTION__
))
1888 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1889, __extension__ __PRETTY_FUNCTION__
))
1889 "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1889, __extension__ __PRETTY_FUNCTION__
))
;
1890 Type *VTy = V->getType();
1891 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
1892 return CreateZExt(V, DestTy, Name);
1893 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1894 return CreateTrunc(V, DestTy, Name);
1895 return V;
1896 }
1897
1898 /// Create a SExt or Trunc from the integer value V to DestTy. Return
1899 /// the value untouched if the type of V is already DestTy.
1900 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
1901 const Twine &Name = "") {
1902 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1904, __extension__ __PRETTY_FUNCTION__
))
1903 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1904, __extension__ __PRETTY_FUNCTION__
))
1904 "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "llvm/include/llvm/IR/IRBuilder.h", 1904, __extension__ __PRETTY_FUNCTION__
))
;
1905 Type *VTy = V->getType();
1906 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
1907 return CreateSExt(V, DestTy, Name);
1908 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
1909 return CreateTrunc(V, DestTy, Name);
1910 return V;
1911 }
1912
1913 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
1914 if (IsFPConstrained)
1915 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
1916 V, DestTy, nullptr, Name);
1917 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
1918 }
1919
1920 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
1921 if (IsFPConstrained)
1922 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
1923 V, DestTy, nullptr, Name);
1924 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
1925 }
1926
1927 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
1928 if (IsFPConstrained)
1929 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
1930 V, DestTy, nullptr, Name);
1931 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
1932 }
1933
1934 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
1935 if (IsFPConstrained)
1936 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
1937 V, DestTy, nullptr, Name);
1938 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
1939 }
1940
1941 Value *CreateFPTrunc(Value *V, Type *DestTy,
1942 const Twine &Name = "") {
1943 if (IsFPConstrained)
1944 return CreateConstrainedFPCast(
1945 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
1946 Name);
1947 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
1948 }
1949
1950 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
1951 if (IsFPConstrained)
1952 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
1953 V, DestTy, nullptr, Name);
1954 return CreateCast(Instruction::FPExt, V, DestTy, Name);
1955 }
1956
1957 Value *CreatePtrToInt(Value *V, Type *DestTy,
1958 const Twine &Name = "") {
1959 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
1960 }
1961
1962 Value *CreateIntToPtr(Value *V, Type *DestTy,
1963 const Twine &Name = "") {
1964 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
1965 }
1966
1967 Value *CreateBitCast(Value *V, Type *DestTy,
1968 const Twine &Name = "") {
1969 return CreateCast(Instruction::BitCast, V, DestTy, Name);
1970 }
1971
1972 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
1973 const Twine &Name = "") {
1974 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
1975 }
1976
1977 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
1978 const Twine &Name = "") {
1979 if (V->getType() == DestTy)
1980 return V;
1981 if (auto *VC = dyn_cast<Constant>(V))
1982 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
1983 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
1984 }
1985
1986 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
1987 const Twine &Name = "") {
1988 if (V->getType() == DestTy)
1989 return V;
1990 if (auto *VC = dyn_cast<Constant>(V))
1991 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
1992 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
1993 }
1994
1995 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
1996 const Twine &Name = "") {
1997 if (V->getType() == DestTy)
1998 return V;
1999 if (auto *VC = dyn_cast<Constant>(V))
2000 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2001 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2002 }
2003
2004 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2005 const Twine &Name = "") {
2006 if (V->getType() == DestTy)
2007 return V;
2008 if (auto *VC = dyn_cast<Constant>(V))
2009 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2010 return Insert(CastInst::Create(Op, V, DestTy), Name);
2011 }
2012
2013 Value *CreatePointerCast(Value *V, Type *DestTy,
2014 const Twine &Name = "") {
2015 if (V->getType() == DestTy)
2016 return V;
2017 if (auto *VC = dyn_cast<Constant>(V))
2018 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2019 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2020 }
2021
2022 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2023 const Twine &Name = "") {
2024 if (V->getType() == DestTy)
2025 return V;
2026
2027 if (auto *VC = dyn_cast<Constant>(V)) {
2028 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2029 Name);
2030 }
2031
2032 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2033 Name);
2034 }
2035
2036 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2037 const Twine &Name = "") {
2038 if (V->getType() == DestTy)
2039 return V;
2040 if (auto *VC = dyn_cast<Constant>(V))
2041 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2042 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2043 }
2044
2045 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2046 const Twine &Name = "") {
2047 if (V->getType() == DestTy)
2048 return V;
2049 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2050 return CreatePtrToInt(V, DestTy, Name);
2051 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2052 return CreateIntToPtr(V, DestTy, Name);
2053
2054 return CreateBitCast(V, DestTy, Name);
2055 }
2056
2057 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2058 if (V->getType() == DestTy)
2059 return V;
2060 if (auto *VC = dyn_cast<Constant>(V))
2061 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2062 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2063 }
2064
2065 CallInst *CreateConstrainedFPCast(
2066 Intrinsic::ID ID, Value *V, Type *DestTy,
2067 Instruction *FMFSource = nullptr, const Twine &Name = "",
2068 MDNode *FPMathTag = nullptr,
2069 Optional<RoundingMode> Rounding = None,
2070 Optional<fp::ExceptionBehavior> Except = None);
2071
2072 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2073 // compile time error, instead of converting the string to bool for the
2074 // isSigned parameter.
2075 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2076
2077 //===--------------------------------------------------------------------===//
2078 // Instruction creation methods: Compare Instructions
2079 //===--------------------------------------------------------------------===//
2080
2081 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2082 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2083 }
2084
2085 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2086 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2087 }
2088
2089 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2090 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2091 }
2092
2093 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2094 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2095 }
2096
2097 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2098 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2099 }
2100
2101 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2102 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2103 }
2104
2105 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2106 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2107 }
2108
2109 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2110 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2111 }
2112
2113 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2114 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2115 }
2116
2117 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2118 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2119 }
2120
2121 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2122 MDNode *FPMathTag = nullptr) {
2123 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2124 }
2125
2126 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2127 MDNode *FPMathTag = nullptr) {
2128 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2129 }
2130
2131 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2132 MDNode *FPMathTag = nullptr) {
2133 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2134 }
2135
2136 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2137 MDNode *FPMathTag = nullptr) {
2138 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2139 }
2140
2141 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2142 MDNode *FPMathTag = nullptr) {
2143 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2144 }
2145
2146 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2147 MDNode *FPMathTag = nullptr) {
2148 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2149 }
2150
2151 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2152 MDNode *FPMathTag = nullptr) {
2153 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2154 }
2155
2156 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2157 MDNode *FPMathTag = nullptr) {
2158 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2159 }
2160
2161 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2162 MDNode *FPMathTag = nullptr) {
2163 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2164 }
2165
2166 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2167 MDNode *FPMathTag = nullptr) {
2168 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2169 }
2170
2171 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2172 MDNode *FPMathTag = nullptr) {
2173 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2174 }
2175
2176 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2177 MDNode *FPMathTag = nullptr) {
2178 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2179 }
2180
2181 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2182 MDNode *FPMathTag = nullptr) {
2183 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2184 }
2185
2186 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2187 MDNode *FPMathTag = nullptr) {
2188 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2189 }
2190
2191 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2192 const Twine &Name = "") {
2193 if (auto *V = Folder.FoldICmp(P, LHS, RHS))
12
Assuming 'V' is null
13
Taking false branch
2194 return V;
2195 return Insert(new ICmpInst(P, LHS, RHS), Name);
14
Passing null pointer value via 2nd parameter 'LHS'
15
Calling constructor for 'ICmpInst'
2196 }
2197
2198 // Create a quiet floating-point comparison (i.e. one that raises an FP
2199 // exception only in the case where an input is a signaling NaN).
2200 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2201 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2202 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2203 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2204 }
2205
2206 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2207 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2208 return CmpInst::isFPPredicate(Pred)
2209 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2210 : CreateICmp(Pred, LHS, RHS, Name);
2211 }
2212
2213 // Create a signaling floating-point comparison (i.e. one that raises an FP
2214 // exception whenever an input is any NaN, signaling or quiet).
2215 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2216 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2217 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2218 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2219 }
2220
2221private:
2222 // Helper routine to create either a signaling or a quiet FP comparison.
2223 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2224 const Twine &Name, MDNode *FPMathTag,
2225 bool IsSignaling);
2226
2227public:
2228 CallInst *CreateConstrainedFPCmp(
2229 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2230 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2231
2232 //===--------------------------------------------------------------------===//
2233 // Instruction creation methods: Other Instructions
2234 //===--------------------------------------------------------------------===//
2235
2236 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2237 const Twine &Name = "") {
2238 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2239 if (isa<FPMathOperator>(Phi))
2240 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2241 return Insert(Phi, Name);
2242 }
2243
2244private:
2245 CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
2246 const Twine &Name = "",
2247 Instruction *FMFSource = nullptr,
2248 ArrayRef<OperandBundleDef> OpBundles = {});
2249
2250public:
2251 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2252 ArrayRef<Value *> Args = None, const Twine &Name = "",
2253 MDNode *FPMathTag = nullptr) {
2254 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2255 if (IsFPConstrained)
2256 setConstrainedFPCallAttr(CI);
2257 if (isa<FPMathOperator>(CI))
2258 setFPAttrs(CI, FPMathTag, FMF);
2259 return Insert(CI, Name);
2260 }
2261
2262 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2263 ArrayRef<OperandBundleDef> OpBundles,
2264 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2265 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2266 if (IsFPConstrained)
2267 setConstrainedFPCallAttr(CI);
2268 if (isa<FPMathOperator>(CI))
2269 setFPAttrs(CI, FPMathTag, FMF);
2270 return Insert(CI, Name);
2271 }
2272
2273 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2274 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2275 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2276 FPMathTag);
2277 }
2278
2279 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2280 ArrayRef<OperandBundleDef> OpBundles,
2281 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2282 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2283 OpBundles, Name, FPMathTag);
2284 }
2285
2286 CallInst *CreateConstrainedFPCall(
2287 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2288 Optional<RoundingMode> Rounding = None,
2289 Optional<fp::ExceptionBehavior> Except = None);
2290
2291 Value *CreateSelect(Value *C, Value *True, Value *False,
2292 const Twine &Name = "", Instruction *MDFrom = nullptr);
2293
2294 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2295 return Insert(new VAArgInst(List, Ty), Name);
2296 }
2297
2298 Value *CreateExtractElement(Value *Vec, Value *Idx,
2299 const Twine &Name = "") {
2300 if (Value *V = Folder.FoldExtractElement(Vec, Idx))
2301 return V;
2302 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2303 }
2304
2305 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2306 const Twine &Name = "") {
2307 return CreateExtractElement(Vec, getInt64(Idx), Name);
2308 }
2309
2310 Value *CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx,
2311 const Twine &Name = "") {
2312 return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name);
2313 }
2314
2315 Value *CreateInsertElement(Type *VecTy, Value *NewElt, uint64_t Idx,
2316 const Twine &Name = "") {
2317 return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name);
2318 }
2319
2320 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2321 const Twine &Name = "") {
2322 if (Value *V = Folder.FoldInsertElement(Vec, NewElt, Idx))
2323 return V;
2324 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2325 }
2326
2327 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2328 const Twine &Name = "") {
2329 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2330 }
2331
2332 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2333 const Twine &Name = "") {
2334 SmallVector<int, 16> IntMask;
2335 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2336 return CreateShuffleVector(V1, V2, IntMask, Name);
2337 }
2338
2339 /// See class ShuffleVectorInst for a description of the mask representation.
2340 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2341 const Twine &Name = "") {
2342 if (Value *V = Folder.FoldShuffleVector(V1, V2, Mask))
2343 return V;
2344 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2345 }
2346
2347 /// Create a unary shuffle. The second vector operand of the IR instruction
2348 /// is poison.
2349 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2350 const Twine &Name = "") {
2351 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2352 }
2353
2354 Value *CreateExtractValue(Value *Agg, ArrayRef<unsigned> Idxs,
2355 const Twine &Name = "") {
2356 if (auto *V = Folder.FoldExtractValue(Agg, Idxs))
2357 return V;
2358 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2359 }
2360
2361 Value *CreateInsertValue(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2362 const Twine &Name = "") {
2363 if (auto *V = Folder.FoldInsertValue(Agg, Val, Idxs))
2364 return V;
2365 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2366 }
2367
2368 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2369 const Twine &Name = "") {
2370 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2371 }
2372
2373 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2374 return Insert(new FreezeInst(V), Name);
2375 }
2376
2377 //===--------------------------------------------------------------------===//
2378 // Utility creation methods
2379 //===--------------------------------------------------------------------===//
2380
2381 /// Return a boolean value testing if \p Arg == 0.
2382 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2383 return CreateICmpEQ(Arg, ConstantInt::getNullValue(Arg->getType()), Name);
2384 }
2385
2386 /// Return a boolean value testing if \p Arg != 0.
2387 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2388 return CreateICmpNE(Arg, ConstantInt::getNullValue(Arg->getType()), Name);
2389 }
2390
2391 /// Return a boolean value testing if \p Arg < 0.
2392 Value *CreateIsNeg(Value *Arg, const Twine &Name = "") {
2393 return CreateICmpSLT(Arg, ConstantInt::getNullValue(Arg->getType()), Name);
2394 }
2395
2396 /// Return a boolean value testing if \p Arg > -1.
2397 Value *CreateIsNotNeg(Value *Arg, const Twine &Name = "") {
2398 return CreateICmpSGT(Arg, ConstantInt::getAllOnesValue(Arg->getType()),
2399 Name);
2400 }
2401
2402 /// Return the i64 difference between two pointer values, dividing out
2403 /// the size of the pointed-to objects.
2404 ///
2405 /// This is intended to implement C-style pointer subtraction. As such, the
2406 /// pointers must be appropriately aligned for their element types and
2407 /// pointing into the same object.
2408 Value *CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
2409 const Twine &Name = "");
2410
2411 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2412 /// different from pointer to i8, it's casted to pointer to i8 in the same
2413 /// address space before call and casted back to Ptr type after call.
2414 Value *CreateLaunderInvariantGroup(Value *Ptr);
2415
2416 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2417 /// different from pointer to i8, it's casted to pointer to i8 in the same
2418 /// address space before call and casted back to Ptr type after call.
2419 Value *CreateStripInvariantGroup(Value *Ptr);
2420
2421 /// Return a vector value that contains the vector V reversed
2422 Value *CreateVectorReverse(Value *V, const Twine &Name = "");
2423
2424 /// Return a vector splice intrinsic if using scalable vectors, otherwise
2425 /// return a shufflevector. If the immediate is positive, a vector is
2426 /// extracted from concat(V1, V2), starting at Imm. If the immediate
2427 /// is negative, we extract -Imm elements from V1 and the remaining
2428 /// elements from V2. Imm is a signed integer in the range
2429 /// -VL <= Imm < VL (where VL is the runtime vector length of the
2430 /// source/result vector)
2431 Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
2432 const Twine &Name = "");
2433
2434 /// Return a vector value that contains \arg V broadcasted to \p
2435 /// NumElts elements.
2436 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2437
2438 /// Return a vector value that contains \arg V broadcasted to \p
2439 /// EC elements.
2440 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2441
2442 /// Return a value that has been extracted from a larger integer type.
2443 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2444 IntegerType *ExtractedTy, uint64_t Offset,
2445 const Twine &Name);
2446
2447 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2448 unsigned Dimension, unsigned LastIndex,
2449 MDNode *DbgInfo);
2450
2451 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2452 MDNode *DbgInfo);
2453
2454 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2455 unsigned Index, unsigned FieldIndex,
2456 MDNode *DbgInfo);
2457
2458private:
2459 /// Helper function that creates an assume intrinsic call that
2460 /// represents an alignment assumption on the provided pointer \p PtrValue
2461 /// with offset \p OffsetValue and alignment value \p AlignValue.
2462 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2463 Value *PtrValue, Value *AlignValue,
2464 Value *OffsetValue);
2465
2466public:
2467 /// Create an assume intrinsic call that represents an alignment
2468 /// assumption on the provided pointer.
2469 ///
2470 /// An optional offset can be provided, and if it is provided, the offset
2471 /// must be subtracted from the provided pointer to get the pointer with the
2472 /// specified alignment.
2473 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2474 unsigned Alignment,
2475 Value *OffsetValue = nullptr);
2476
2477 /// Create an assume intrinsic call that represents an alignment
2478 /// assumption on the provided pointer.
2479 ///
2480 /// An optional offset can be provided, and if it is provided, the offset
2481 /// must be subtracted from the provided pointer to get the pointer with the
2482 /// specified alignment.
2483 ///
2484 /// This overload handles the condition where the Alignment is dependent
2485 /// on an existing value rather than a static value.
2486 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2487 Value *Alignment,
2488 Value *OffsetValue = nullptr);
2489};
2490
2491/// This provides a uniform API for creating instructions and inserting
2492/// them into a basic block: either at the end of a BasicBlock, or at a specific
2493/// iterator location in a block.
2494///
2495/// Note that the builder does not expose the full generality of LLVM
2496/// instructions. For access to extra instruction properties, use the mutators
2497/// (e.g. setVolatile) on the instructions after they have been
2498/// created. Convenience state exists to specify fast-math flags and fp-math
2499/// tags.
2500///
2501/// The first template argument specifies a class to use for creating constants.
2502/// This defaults to creating minimally folded constants. The second template
2503/// argument allows clients to specify custom insertion hooks that are called on
2504/// every newly created insertion.
2505template <typename FolderTy = ConstantFolder,
2506 typename InserterTy = IRBuilderDefaultInserter>
2507class IRBuilder : public IRBuilderBase {
2508private:
2509 FolderTy Folder;
2510 InserterTy Inserter;
2511
2512public:
2513 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2514 MDNode *FPMathTag = nullptr,
2515 ArrayRef<OperandBundleDef> OpBundles = None)
2516 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2517 Folder(Folder), Inserter(Inserter) {}
2518
2519 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2520 ArrayRef<OperandBundleDef> OpBundles = None)
2521 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2522
2523 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2524 MDNode *FPMathTag = nullptr,
2525 ArrayRef<OperandBundleDef> OpBundles = None)
2526 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2527 FPMathTag, OpBundles), Folder(Folder) {
2528 SetInsertPoint(TheBB);
2529 }
2530
2531 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2532 ArrayRef<OperandBundleDef> OpBundles = None)
2533 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2534 FPMathTag, OpBundles) {
2535 SetInsertPoint(TheBB);
2536 }
2537
2538 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2539 ArrayRef<OperandBundleDef> OpBundles = None)
2540 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
2541 FPMathTag, OpBundles) {
2542 SetInsertPoint(IP);
2543 }
2544
2545 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2546 MDNode *FPMathTag = nullptr,
2547 ArrayRef<OperandBundleDef> OpBundles = None)
2548 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2549 FPMathTag, OpBundles), Folder(Folder) {
2550 SetInsertPoint(TheBB, IP);
2551 }
2552
2553 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2554 MDNode *FPMathTag = nullptr,
2555 ArrayRef<OperandBundleDef> OpBundles = None)
2556 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2557 FPMathTag, OpBundles) {
2558 SetInsertPoint(TheBB, IP);
2559 }
2560
2561 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2562 /// or FastMathFlagGuard instead.
2563 IRBuilder(const IRBuilder &) = delete;
2564
2565 InserterTy &getInserter() { return Inserter; }
2566};
2567
2568// Create wrappers for C Binding types (see CBindingWrapping.h).
2569DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2570
2571} // end namespace llvm
2572
2573#endif // LLVM_IR_IRBUILDER_H

/build/llvm-toolchain-snapshot-16~++20220904122748+c444af1c20b3/llvm/include/llvm/IR/Instructions.h

1//===- llvm/Instructions.h - Instruction subclass definitions ---*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file exposes the class definitions of all of the subclasses of the
10// Instruction class. This is meant to be an easy way to get access to all
11// instruction subclasses.
12//
13//===----------------------------------------------------------------------===//
14
15#ifndef LLVM_IR_INSTRUCTIONS_H
16#define LLVM_IR_INSTRUCTIONS_H
17
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/Bitfields.h"
20#include "llvm/ADT/MapVector.h"
21#include "llvm/ADT/None.h"
22#include "llvm/ADT/STLExtras.h"
23#include "llvm/ADT/SmallVector.h"
24#include "llvm/ADT/Twine.h"
25#include "llvm/ADT/iterator.h"
26#include "llvm/ADT/iterator_range.h"
27#include "llvm/IR/CFG.h"
28#include "llvm/IR/Constant.h"
29#include "llvm/IR/DerivedTypes.h"
30#include "llvm/IR/InstrTypes.h"
31#include "llvm/IR/Instruction.h"
32#include "llvm/IR/OperandTraits.h"
33#include "llvm/IR/Use.h"
34#include "llvm/IR/User.h"
35#include "llvm/Support/AtomicOrdering.h"
36#include "llvm/Support/ErrorHandling.h"
37#include <cassert>
38#include <cstddef>
39#include <cstdint>
40#include <iterator>
41
42namespace llvm {
43
44class APFloat;
45class APInt;
46class BasicBlock;
47class BlockAddress;
48class ConstantInt;
49class DataLayout;
50class StringRef;
51class Type;
52class Value;
53
54//===----------------------------------------------------------------------===//
55// AllocaInst Class
56//===----------------------------------------------------------------------===//
57
58/// an instruction to allocate memory on the stack
59class AllocaInst : public UnaryInstruction {
60 Type *AllocatedType;
61
62 using AlignmentField = AlignmentBitfieldElementT<0>;
63 using UsedWithInAllocaField = BoolBitfieldElementT<AlignmentField::NextBit>;
64 using SwiftErrorField = BoolBitfieldElementT<UsedWithInAllocaField::NextBit>;
65 static_assert(Bitfield::areContiguous<AlignmentField, UsedWithInAllocaField,
66 SwiftErrorField>(),
67 "Bitfields must be contiguous");
68
69protected:
70 // Note: Instruction needs to be a friend here to call cloneImpl.
71 friend class Instruction;
72
73 AllocaInst *cloneImpl() const;
74
75public:
76 explicit AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
77 const Twine &Name, Instruction *InsertBefore);
78 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize,
79 const Twine &Name, BasicBlock *InsertAtEnd);
80
81 AllocaInst(Type *Ty, unsigned AddrSpace, const Twine &Name,
82 Instruction *InsertBefore);
83 AllocaInst(Type *Ty, unsigned AddrSpace,
84 const Twine &Name, BasicBlock *InsertAtEnd);
85
86 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
87 const Twine &Name = "", Instruction *InsertBefore = nullptr);
88 AllocaInst(Type *Ty, unsigned AddrSpace, Value *ArraySize, Align Align,
89 const Twine &Name, BasicBlock *InsertAtEnd);
90
91 /// Return true if there is an allocation size parameter to the allocation
92 /// instruction that is not 1.
93 bool isArrayAllocation() const;
94
95 /// Get the number of elements allocated. For a simple allocation of a single
96 /// element, this will return a constant 1 value.
97 const Value *getArraySize() const { return getOperand(0); }
98 Value *getArraySize() { return getOperand(0); }
99
100 /// Overload to return most specific pointer type.
101 PointerType *getType() const {
102 return cast<PointerType>(Instruction::getType());
103 }
104
105 /// Return the address space for the allocation.
106 unsigned getAddressSpace() const {
107 return getType()->getAddressSpace();
108 }
109
110 /// Get allocation size in bits. Returns None if size can't be determined,
111 /// e.g. in case of a VLA.
112 Optional<TypeSize> getAllocationSizeInBits(const DataLayout &DL) const;
113
114 /// Return the type that is being allocated by the instruction.
115 Type *getAllocatedType() const { return AllocatedType; }
116 /// for use only in special circumstances that need to generically
117 /// transform a whole instruction (eg: IR linking and vectorization).
118 void setAllocatedType(Type *Ty) { AllocatedType = Ty; }
119
120 /// Return the alignment of the memory that is being allocated by the
121 /// instruction.
122 Align getAlign() const {
123 return Align(1ULL << getSubclassData<AlignmentField>());
124 }
125
126 void setAlignment(Align Align) {
127 setSubclassData<AlignmentField>(Log2(Align));
128 }
129
130 /// Return true if this alloca is in the entry block of the function and is a
131 /// constant size. If so, the code generator will fold it into the
132 /// prolog/epilog code, so it is basically free.
133 bool isStaticAlloca() const;
134
135 /// Return true if this alloca is used as an inalloca argument to a call. Such
136 /// allocas are never considered static even if they are in the entry block.
137 bool isUsedWithInAlloca() const {
138 return getSubclassData<UsedWithInAllocaField>();
139 }
140
141 /// Specify whether this alloca is used to represent the arguments to a call.
142 void setUsedWithInAlloca(bool V) {
143 setSubclassData<UsedWithInAllocaField>(V);
144 }
145
146 /// Return true if this alloca is used as a swifterror argument to a call.
147 bool isSwiftError() const { return getSubclassData<SwiftErrorField>(); }
148 /// Specify whether this alloca is used to represent a swifterror.
149 void setSwiftError(bool V) { setSubclassData<SwiftErrorField>(V); }
150
151 // Methods for support type inquiry through isa, cast, and dyn_cast:
152 static bool classof(const Instruction *I) {
153 return (I->getOpcode() == Instruction::Alloca);
154 }
155 static bool classof(const Value *V) {
156 return isa<Instruction>(V) && classof(cast<Instruction>(V));
157 }
158
159private:
160 // Shadow Instruction::setInstructionSubclassData with a private forwarding
161 // method so that subclasses cannot accidentally use it.
162 template <typename Bitfield>
163 void setSubclassData(typename Bitfield::Type Value) {
164 Instruction::setSubclassData<Bitfield>(Value);
165 }
166};
167
168//===----------------------------------------------------------------------===//
169// LoadInst Class
170//===----------------------------------------------------------------------===//
171
172/// An instruction for reading from memory. This uses the SubclassData field in
173/// Value to store whether or not the load is volatile.
174class LoadInst : public UnaryInstruction {
175 using VolatileField = BoolBitfieldElementT<0>;
176 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
177 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
178 static_assert(
179 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
180 "Bitfields must be contiguous");
181
182 void AssertOK();
183
184protected:
185 // Note: Instruction needs to be a friend here to call cloneImpl.
186 friend class Instruction;
187
188 LoadInst *cloneImpl() const;
189
190public:
191 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr,
192 Instruction *InsertBefore);
193 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, BasicBlock *InsertAtEnd);
194 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
195 Instruction *InsertBefore);
196 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
197 BasicBlock *InsertAtEnd);
198 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
199 Align Align, Instruction *InsertBefore = nullptr);
200 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
201 Align Align, BasicBlock *InsertAtEnd);
202 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
203 Align Align, AtomicOrdering Order,
204 SyncScope::ID SSID = SyncScope::System,
205 Instruction *InsertBefore = nullptr);
206 LoadInst(Type *Ty, Value *Ptr, const Twine &NameStr, bool isVolatile,
207 Align Align, AtomicOrdering Order, SyncScope::ID SSID,
208 BasicBlock *InsertAtEnd);
209
210 /// Return true if this is a load from a volatile memory location.
211 bool isVolatile() const { return getSubclassData<VolatileField>(); }
212
213 /// Specify whether this is a volatile load or not.
214 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
215
216 /// Return the alignment of the access that is being performed.
217 Align getAlign() const {
218 return Align(1ULL << (getSubclassData<AlignmentField>()));
219 }
220
221 void setAlignment(Align Align) {
222 setSubclassData<AlignmentField>(Log2(Align));
223 }
224
225 /// Returns the ordering constraint of this load instruction.
226 AtomicOrdering getOrdering() const {
227 return getSubclassData<OrderingField>();
228 }
229 /// Sets the ordering constraint of this load instruction. May not be Release
230 /// or AcquireRelease.
231 void setOrdering(AtomicOrdering Ordering) {
232 setSubclassData<OrderingField>(Ordering);
233 }
234
235 /// Returns the synchronization scope ID of this load instruction.
236 SyncScope::ID getSyncScopeID() const {
237 return SSID;
238 }
239
240 /// Sets the synchronization scope ID of this load instruction.
241 void setSyncScopeID(SyncScope::ID SSID) {
242 this->SSID = SSID;
243 }
244
245 /// Sets the ordering constraint and the synchronization scope ID of this load
246 /// instruction.
247 void setAtomic(AtomicOrdering Ordering,
248 SyncScope::ID SSID = SyncScope::System) {
249 setOrdering(Ordering);
250 setSyncScopeID(SSID);
251 }
252
253 bool isSimple() const { return !isAtomic() && !isVolatile(); }
254
255 bool isUnordered() const {
256 return (getOrdering() == AtomicOrdering::NotAtomic ||
257 getOrdering() == AtomicOrdering::Unordered) &&
258 !isVolatile();
259 }
260
261 Value *getPointerOperand() { return getOperand(0); }
262 const Value *getPointerOperand() const { return getOperand(0); }
263 static unsigned getPointerOperandIndex() { return 0U; }
264 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
265
266 /// Returns the address space of the pointer operand.
267 unsigned getPointerAddressSpace() const {
268 return getPointerOperandType()->getPointerAddressSpace();
269 }
270
271 // Methods for support type inquiry through isa, cast, and dyn_cast:
272 static bool classof(const Instruction *I) {
273 return I->getOpcode() == Instruction::Load;
274 }
275 static bool classof(const Value *V) {
276 return isa<Instruction>(V) && classof(cast<Instruction>(V));
277 }
278
279private:
280 // Shadow Instruction::setInstructionSubclassData with a private forwarding
281 // method so that subclasses cannot accidentally use it.
282 template <typename Bitfield>
283 void setSubclassData(typename Bitfield::Type Value) {
284 Instruction::setSubclassData<Bitfield>(Value);
285 }
286
287 /// The synchronization scope ID of this load instruction. Not quite enough
288 /// room in SubClassData for everything, so synchronization scope ID gets its
289 /// own field.
290 SyncScope::ID SSID;
291};
292
293//===----------------------------------------------------------------------===//
294// StoreInst Class
295//===----------------------------------------------------------------------===//
296
297/// An instruction for storing to memory.
298class StoreInst : public Instruction {
299 using VolatileField = BoolBitfieldElementT<0>;
300 using AlignmentField = AlignmentBitfieldElementT<VolatileField::NextBit>;
301 using OrderingField = AtomicOrderingBitfieldElementT<AlignmentField::NextBit>;
302 static_assert(
303 Bitfield::areContiguous<VolatileField, AlignmentField, OrderingField>(),
304 "Bitfields must be contiguous");
305
306 void AssertOK();
307
308protected:
309 // Note: Instruction needs to be a friend here to call cloneImpl.
310 friend class Instruction;
311
312 StoreInst *cloneImpl() const;
313
314public:
315 StoreInst(Value *Val, Value *Ptr, Instruction *InsertBefore);
316 StoreInst(Value *Val, Value *Ptr, BasicBlock *InsertAtEnd);
317 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Instruction *InsertBefore);
318 StoreInst(Value *Val, Value *Ptr, bool isVolatile, BasicBlock *InsertAtEnd);
319 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
320 Instruction *InsertBefore = nullptr);
321 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
322 BasicBlock *InsertAtEnd);
323 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
324 AtomicOrdering Order, SyncScope::ID SSID = SyncScope::System,
325 Instruction *InsertBefore = nullptr);
326 StoreInst(Value *Val, Value *Ptr, bool isVolatile, Align Align,
327 AtomicOrdering Order, SyncScope::ID SSID, BasicBlock *InsertAtEnd);
328
329 // allocate space for exactly two operands
330 void *operator new(size_t S) { return User::operator new(S, 2); }
331 void operator delete(void *Ptr) { User::operator delete(Ptr); }
332
333 /// Return true if this is a store to a volatile memory location.
334 bool isVolatile() const { return getSubclassData<VolatileField>(); }
335
336 /// Specify whether this is a volatile store or not.
337 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
338
339 /// Transparently provide more efficient getOperand methods.
340 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
341
342 Align getAlign() const {
343 return Align(1ULL << (getSubclassData<AlignmentField>()));
344 }
345
346 void setAlignment(Align Align) {
347 setSubclassData<AlignmentField>(Log2(Align));
348 }
349
350 /// Returns the ordering constraint of this store instruction.
351 AtomicOrdering getOrdering() const {
352 return getSubclassData<OrderingField>();
353 }
354
355 /// Sets the ordering constraint of this store instruction. May not be
356 /// Acquire or AcquireRelease.
357 void setOrdering(AtomicOrdering Ordering) {
358 setSubclassData<OrderingField>(Ordering);
359 }
360
361 /// Returns the synchronization scope ID of this store instruction.
362 SyncScope::ID getSyncScopeID() const {
363 return SSID;
364 }
365
366 /// Sets the synchronization scope ID of this store instruction.
367 void setSyncScopeID(SyncScope::ID SSID) {
368 this->SSID = SSID;
369 }
370
371 /// Sets the ordering constraint and the synchronization scope ID of this
372 /// store instruction.
373 void setAtomic(AtomicOrdering Ordering,
374 SyncScope::ID SSID = SyncScope::System) {
375 setOrdering(Ordering);
376 setSyncScopeID(SSID);
377 }
378
379 bool isSimple() const { return !isAtomic() && !isVolatile(); }
380
381 bool isUnordered() const {
382 return (getOrdering() == AtomicOrdering::NotAtomic ||
383 getOrdering() == AtomicOrdering::Unordered) &&
384 !isVolatile();
385 }
386
387 Value *getValueOperand() { return getOperand(0); }
388 const Value *getValueOperand() const { return getOperand(0); }
389
390 Value *getPointerOperand() { return getOperand(1); }
391 const Value *getPointerOperand() const { return getOperand(1); }
392 static unsigned getPointerOperandIndex() { return 1U; }
393 Type *getPointerOperandType() const { return getPointerOperand()->getType(); }
394
395 /// Returns the address space of the pointer operand.
396 unsigned getPointerAddressSpace() const {
397 return getPointerOperandType()->getPointerAddressSpace();
398 }
399
400 // Methods for support type inquiry through isa, cast, and dyn_cast:
401 static bool classof(const Instruction *I) {
402 return I->getOpcode() == Instruction::Store;
403 }
404 static bool classof(const Value *V) {
405 return isa<Instruction>(V) && classof(cast<Instruction>(V));
406 }
407
408private:
409 // Shadow Instruction::setInstructionSubclassData with a private forwarding
410 // method so that subclasses cannot accidentally use it.
411 template <typename Bitfield>
412 void setSubclassData(typename Bitfield::Type Value) {
413 Instruction::setSubclassData<Bitfield>(Value);
414 }
415
416 /// The synchronization scope ID of this store instruction. Not quite enough
417 /// room in SubClassData for everything, so synchronization scope ID gets its
418 /// own field.
419 SyncScope::ID SSID;
420};
421
422template <>
423struct OperandTraits<StoreInst> : public FixedNumOperandTraits<StoreInst, 2> {
424};
425
426DEFINE_TRANSPARENT_OPERAND_ACCESSORS(StoreInst, Value)StoreInst::op_iterator StoreInst::op_begin() { return OperandTraits
<StoreInst>::op_begin(this); } StoreInst::const_op_iterator
StoreInst::op_begin() const { return OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this)); } StoreInst
::op_iterator StoreInst::op_end() { return OperandTraits<StoreInst
>::op_end(this); } StoreInst::const_op_iterator StoreInst::
op_end() const { return OperandTraits<StoreInst>::op_end
(const_cast<StoreInst*>(this)); } Value *StoreInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<StoreInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 426, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<StoreInst
>::op_begin(const_cast<StoreInst*>(this))[i_nocapture
].get()); } void StoreInst::setOperand(unsigned i_nocapture, Value
*Val_nocapture) { (static_cast <bool> (i_nocapture <
OperandTraits<StoreInst>::operands(this) && "setOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<StoreInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 426, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<StoreInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned StoreInst::getNumOperands() const
{ return OperandTraits<StoreInst>::operands(this); } template
<int Idx_nocapture> Use &StoreInst::Op() { return this
->OpFrom<Idx_nocapture>(this); } template <int Idx_nocapture
> const Use &StoreInst::Op() const { return this->OpFrom
<Idx_nocapture>(this); }
427
428//===----------------------------------------------------------------------===//
429// FenceInst Class
430//===----------------------------------------------------------------------===//
431
432/// An instruction for ordering other memory operations.
433class FenceInst : public Instruction {
434 using OrderingField = AtomicOrderingBitfieldElementT<0>;
435
436 void Init(AtomicOrdering Ordering, SyncScope::ID SSID);
437
438protected:
439 // Note: Instruction needs to be a friend here to call cloneImpl.
440 friend class Instruction;
441
442 FenceInst *cloneImpl() const;
443
444public:
445 // Ordering may only be Acquire, Release, AcquireRelease, or
446 // SequentiallyConsistent.
447 FenceInst(LLVMContext &C, AtomicOrdering Ordering,
448 SyncScope::ID SSID = SyncScope::System,
449 Instruction *InsertBefore = nullptr);
450 FenceInst(LLVMContext &C, AtomicOrdering Ordering, SyncScope::ID SSID,
451 BasicBlock *InsertAtEnd);
452
453 // allocate space for exactly zero operands
454 void *operator new(size_t S) { return User::operator new(S, 0); }
455 void operator delete(void *Ptr) { User::operator delete(Ptr); }
456
457 /// Returns the ordering constraint of this fence instruction.
458 AtomicOrdering getOrdering() const {
459 return getSubclassData<OrderingField>();
460 }
461
462 /// Sets the ordering constraint of this fence instruction. May only be
463 /// Acquire, Release, AcquireRelease, or SequentiallyConsistent.
464 void setOrdering(AtomicOrdering Ordering) {
465 setSubclassData<OrderingField>(Ordering);
466 }
467
468 /// Returns the synchronization scope ID of this fence instruction.
469 SyncScope::ID getSyncScopeID() const {
470 return SSID;
471 }
472
473 /// Sets the synchronization scope ID of this fence instruction.
474 void setSyncScopeID(SyncScope::ID SSID) {
475 this->SSID = SSID;
476 }
477
478 // Methods for support type inquiry through isa, cast, and dyn_cast:
479 static bool classof(const Instruction *I) {
480 return I->getOpcode() == Instruction::Fence;
481 }
482 static bool classof(const Value *V) {
483 return isa<Instruction>(V) && classof(cast<Instruction>(V));
484 }
485
486private:
487 // Shadow Instruction::setInstructionSubclassData with a private forwarding
488 // method so that subclasses cannot accidentally use it.
489 template <typename Bitfield>
490 void setSubclassData(typename Bitfield::Type Value) {
491 Instruction::setSubclassData<Bitfield>(Value);
492 }
493
494 /// The synchronization scope ID of this fence instruction. Not quite enough
495 /// room in SubClassData for everything, so synchronization scope ID gets its
496 /// own field.
497 SyncScope::ID SSID;
498};
499
500//===----------------------------------------------------------------------===//
501// AtomicCmpXchgInst Class
502//===----------------------------------------------------------------------===//
503
504/// An instruction that atomically checks whether a
505/// specified value is in a memory location, and, if it is, stores a new value
506/// there. The value returned by this instruction is a pair containing the
507/// original value as first element, and an i1 indicating success (true) or
508/// failure (false) as second element.
509///
510class AtomicCmpXchgInst : public Instruction {
511 void Init(Value *Ptr, Value *Cmp, Value *NewVal, Align Align,
512 AtomicOrdering SuccessOrdering, AtomicOrdering FailureOrdering,
513 SyncScope::ID SSID);
514
515 template <unsigned Offset>
516 using AtomicOrderingBitfieldElement =
517 typename Bitfield::Element<AtomicOrdering, Offset, 3,
518 AtomicOrdering::LAST>;
519
520protected:
521 // Note: Instruction needs to be a friend here to call cloneImpl.
522 friend class Instruction;
523
524 AtomicCmpXchgInst *cloneImpl() const;
525
526public:
527 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
528 AtomicOrdering SuccessOrdering,
529 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
530 Instruction *InsertBefore = nullptr);
531 AtomicCmpXchgInst(Value *Ptr, Value *Cmp, Value *NewVal, Align Alignment,
532 AtomicOrdering SuccessOrdering,
533 AtomicOrdering FailureOrdering, SyncScope::ID SSID,
534 BasicBlock *InsertAtEnd);
535
536 // allocate space for exactly three operands
537 void *operator new(size_t S) { return User::operator new(S, 3); }
538 void operator delete(void *Ptr) { User::operator delete(Ptr); }
539
540 using VolatileField = BoolBitfieldElementT<0>;
541 using WeakField = BoolBitfieldElementT<VolatileField::NextBit>;
542 using SuccessOrderingField =
543 AtomicOrderingBitfieldElementT<WeakField::NextBit>;
544 using FailureOrderingField =
545 AtomicOrderingBitfieldElementT<SuccessOrderingField::NextBit>;
546 using AlignmentField =
547 AlignmentBitfieldElementT<FailureOrderingField::NextBit>;
548 static_assert(
549 Bitfield::areContiguous<VolatileField, WeakField, SuccessOrderingField,
550 FailureOrderingField, AlignmentField>(),
551 "Bitfields must be contiguous");
552
553 /// Return the alignment of the memory that is being allocated by the
554 /// instruction.
555 Align getAlign() const {
556 return Align(1ULL << getSubclassData<AlignmentField>());
557 }
558
559 void setAlignment(Align Align) {
560 setSubclassData<AlignmentField>(Log2(Align));
561 }
562
563 /// Return true if this is a cmpxchg from a volatile memory
564 /// location.
565 ///
566 bool isVolatile() const { return getSubclassData<VolatileField>(); }
567
568 /// Specify whether this is a volatile cmpxchg.
569 ///
570 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
571
572 /// Return true if this cmpxchg may spuriously fail.
573 bool isWeak() const { return getSubclassData<WeakField>(); }
574
575 void setWeak(bool IsWeak) { setSubclassData<WeakField>(IsWeak); }
576
577 /// Transparently provide more efficient getOperand methods.
578 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
579
580 static bool isValidSuccessOrdering(AtomicOrdering Ordering) {
581 return Ordering != AtomicOrdering::NotAtomic &&
582 Ordering != AtomicOrdering::Unordered;
583 }
584
585 static bool isValidFailureOrdering(AtomicOrdering Ordering) {
586 return Ordering != AtomicOrdering::NotAtomic &&
587 Ordering != AtomicOrdering::Unordered &&
588 Ordering != AtomicOrdering::AcquireRelease &&
589 Ordering != AtomicOrdering::Release;
590 }
591
592 /// Returns the success ordering constraint of this cmpxchg instruction.
593 AtomicOrdering getSuccessOrdering() const {
594 return getSubclassData<SuccessOrderingField>();
595 }
596
597 /// Sets the success ordering constraint of this cmpxchg instruction.
598 void setSuccessOrdering(AtomicOrdering Ordering) {
599 assert(isValidSuccessOrdering(Ordering) &&(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 600, __extension__ __PRETTY_FUNCTION__
))
600 "invalid CmpXchg success ordering")(static_cast <bool> (isValidSuccessOrdering(Ordering) &&
"invalid CmpXchg success ordering") ? void (0) : __assert_fail
("isValidSuccessOrdering(Ordering) && \"invalid CmpXchg success ordering\""
, "llvm/include/llvm/IR/Instructions.h", 600, __extension__ __PRETTY_FUNCTION__
))
;
601 setSubclassData<SuccessOrderingField>(Ordering);
602 }
603
604 /// Returns the failure ordering constraint of this cmpxchg instruction.
605 AtomicOrdering getFailureOrdering() const {
606 return getSubclassData<FailureOrderingField>();
607 }
608
609 /// Sets the failure ordering constraint of this cmpxchg instruction.
610 void setFailureOrdering(AtomicOrdering Ordering) {
611 assert(isValidFailureOrdering(Ordering) &&(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
612 "invalid CmpXchg failure ordering")(static_cast <bool> (isValidFailureOrdering(Ordering) &&
"invalid CmpXchg failure ordering") ? void (0) : __assert_fail
("isValidFailureOrdering(Ordering) && \"invalid CmpXchg failure ordering\""
, "llvm/include/llvm/IR/Instructions.h", 612, __extension__ __PRETTY_FUNCTION__
))
;
613 setSubclassData<FailureOrderingField>(Ordering);
614 }
615
616 /// Returns a single ordering which is at least as strong as both the
617 /// success and failure orderings for this cmpxchg.
618 AtomicOrdering getMergedOrdering() const {
619 if (getFailureOrdering() == AtomicOrdering::SequentiallyConsistent)
620 return AtomicOrdering::SequentiallyConsistent;
621 if (getFailureOrdering() == AtomicOrdering::Acquire) {
622 if (getSuccessOrdering() == AtomicOrdering::Monotonic)
623 return AtomicOrdering::Acquire;
624 if (getSuccessOrdering() == AtomicOrdering::Release)
625 return AtomicOrdering::AcquireRelease;
626 }
627 return getSuccessOrdering();
628 }
629
630 /// Returns the synchronization scope ID of this cmpxchg instruction.
631 SyncScope::ID getSyncScopeID() const {
632 return SSID;
633 }
634
635 /// Sets the synchronization scope ID of this cmpxchg instruction.
636 void setSyncScopeID(SyncScope::ID SSID) {
637 this->SSID = SSID;
638 }
639
640 Value *getPointerOperand() { return getOperand(0); }
641 const Value *getPointerOperand() const { return getOperand(0); }
642 static unsigned getPointerOperandIndex() { return 0U; }
643
644 Value *getCompareOperand() { return getOperand(1); }
645 const Value *getCompareOperand() const { return getOperand(1); }
646
647 Value *getNewValOperand() { return getOperand(2); }
648 const Value *getNewValOperand() const { return getOperand(2); }
649
650 /// Returns the address space of the pointer operand.
651 unsigned getPointerAddressSpace() const {
652 return getPointerOperand()->getType()->getPointerAddressSpace();
653 }
654
655 /// Returns the strongest permitted ordering on failure, given the
656 /// desired ordering on success.
657 ///
658 /// If the comparison in a cmpxchg operation fails, there is no atomic store
659 /// so release semantics cannot be provided. So this function drops explicit
660 /// Release requests from the AtomicOrdering. A SequentiallyConsistent
661 /// operation would remain SequentiallyConsistent.
662 static AtomicOrdering
663 getStrongestFailureOrdering(AtomicOrdering SuccessOrdering) {
664 switch (SuccessOrdering) {
665 default:
666 llvm_unreachable("invalid cmpxchg success ordering")::llvm::llvm_unreachable_internal("invalid cmpxchg success ordering"
, "llvm/include/llvm/IR/Instructions.h", 666)
;
667 case AtomicOrdering::Release:
668 case AtomicOrdering::Monotonic:
669 return AtomicOrdering::Monotonic;
670 case AtomicOrdering::AcquireRelease:
671 case AtomicOrdering::Acquire:
672 return AtomicOrdering::Acquire;
673 case AtomicOrdering::SequentiallyConsistent:
674 return AtomicOrdering::SequentiallyConsistent;
675 }
676 }
677
678 // Methods for support type inquiry through isa, cast, and dyn_cast:
679 static bool classof(const Instruction *I) {
680 return I->getOpcode() == Instruction::AtomicCmpXchg;
681 }
682 static bool classof(const Value *V) {
683 return isa<Instruction>(V) && classof(cast<Instruction>(V));
684 }
685
686private:
687 // Shadow Instruction::setInstructionSubclassData with a private forwarding
688 // method so that subclasses cannot accidentally use it.
689 template <typename Bitfield>
690 void setSubclassData(typename Bitfield::Type Value) {
691 Instruction::setSubclassData<Bitfield>(Value);
692 }
693
694 /// The synchronization scope ID of this cmpxchg instruction. Not quite
695 /// enough room in SubClassData for everything, so synchronization scope ID
696 /// gets its own field.
697 SyncScope::ID SSID;
698};
699
700template <>
701struct OperandTraits<AtomicCmpXchgInst> :
702 public FixedNumOperandTraits<AtomicCmpXchgInst, 3> {
703};
704
705DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicCmpXchgInst, Value)AtomicCmpXchgInst::op_iterator AtomicCmpXchgInst::op_begin() {
return OperandTraits<AtomicCmpXchgInst>::op_begin(this
); } AtomicCmpXchgInst::const_op_iterator AtomicCmpXchgInst::
op_begin() const { return OperandTraits<AtomicCmpXchgInst>
::op_begin(const_cast<AtomicCmpXchgInst*>(this)); } AtomicCmpXchgInst
::op_iterator AtomicCmpXchgInst::op_end() { return OperandTraits
<AtomicCmpXchgInst>::op_end(this); } AtomicCmpXchgInst::
const_op_iterator AtomicCmpXchgInst::op_end() const { return OperandTraits
<AtomicCmpXchgInst>::op_end(const_cast<AtomicCmpXchgInst
*>(this)); } Value *AtomicCmpXchgInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 705, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicCmpXchgInst
>::op_begin(const_cast<AtomicCmpXchgInst*>(this))[i_nocapture
].get()); } void AtomicCmpXchgInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicCmpXchgInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicCmpXchgInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 705, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicCmpXchgInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicCmpXchgInst::getNumOperands
() const { return OperandTraits<AtomicCmpXchgInst>::operands
(this); } template <int Idx_nocapture> Use &AtomicCmpXchgInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicCmpXchgInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
706
707//===----------------------------------------------------------------------===//
708// AtomicRMWInst Class
709//===----------------------------------------------------------------------===//
710
711/// an instruction that atomically reads a memory location,
712/// combines it with another value, and then stores the result back. Returns
713/// the old value.
714///
715class AtomicRMWInst : public Instruction {
716protected:
717 // Note: Instruction needs to be a friend here to call cloneImpl.
718 friend class Instruction;
719
720 AtomicRMWInst *cloneImpl() const;
721
722public:
723 /// This enumeration lists the possible modifications atomicrmw can make. In
724 /// the descriptions, 'p' is the pointer to the instruction's memory location,
725 /// 'old' is the initial value of *p, and 'v' is the other value passed to the
726 /// instruction. These instructions always return 'old'.
727 enum BinOp : unsigned {
728 /// *p = v
729 Xchg,
730 /// *p = old + v
731 Add,
732 /// *p = old - v
733 Sub,
734 /// *p = old & v
735 And,
736 /// *p = ~(old & v)
737 Nand,
738 /// *p = old | v
739 Or,
740 /// *p = old ^ v
741 Xor,
742 /// *p = old >signed v ? old : v
743 Max,
744 /// *p = old <signed v ? old : v
745 Min,
746 /// *p = old >unsigned v ? old : v
747 UMax,
748 /// *p = old <unsigned v ? old : v
749 UMin,
750
751 /// *p = old + v
752 FAdd,
753
754 /// *p = old - v
755 FSub,
756
757 /// *p = maxnum(old, v)
758 /// \p maxnum matches the behavior of \p llvm.maxnum.*.
759 FMax,
760
761 /// *p = minnum(old, v)
762 /// \p minnum matches the behavior of \p llvm.minnum.*.
763 FMin,
764
765 FIRST_BINOP = Xchg,
766 LAST_BINOP = FMin,
767 BAD_BINOP
768 };
769
770private:
771 template <unsigned Offset>
772 using AtomicOrderingBitfieldElement =
773 typename Bitfield::Element<AtomicOrdering, Offset, 3,
774 AtomicOrdering::LAST>;
775
776 template <unsigned Offset>
777 using BinOpBitfieldElement =
778 typename Bitfield::Element<BinOp, Offset, 4, BinOp::LAST_BINOP>;
779
780public:
781 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
782 AtomicOrdering Ordering, SyncScope::ID SSID,
783 Instruction *InsertBefore = nullptr);
784 AtomicRMWInst(BinOp Operation, Value *Ptr, Value *Val, Align Alignment,
785 AtomicOrdering Ordering, SyncScope::ID SSID,
786 BasicBlock *InsertAtEnd);
787
788 // allocate space for exactly two operands
789 void *operator new(size_t S) { return User::operator new(S, 2); }
790 void operator delete(void *Ptr) { User::operator delete(Ptr); }
791
792 using VolatileField = BoolBitfieldElementT<0>;
793 using AtomicOrderingField =
794 AtomicOrderingBitfieldElementT<VolatileField::NextBit>;
795 using OperationField = BinOpBitfieldElement<AtomicOrderingField::NextBit>;
796 using AlignmentField = AlignmentBitfieldElementT<OperationField::NextBit>;
797 static_assert(Bitfield::areContiguous<VolatileField, AtomicOrderingField,
798 OperationField, AlignmentField>(),
799 "Bitfields must be contiguous");
800
801 BinOp getOperation() const { return getSubclassData<OperationField>(); }
802
803 static StringRef getOperationName(BinOp Op);
804
805 static bool isFPOperation(BinOp Op) {
806 switch (Op) {
807 case AtomicRMWInst::FAdd:
808 case AtomicRMWInst::FSub:
809 case AtomicRMWInst::FMax:
810 case AtomicRMWInst::FMin:
811 return true;
812 default:
813 return false;
814 }
815 }
816
817 void setOperation(BinOp Operation) {
818 setSubclassData<OperationField>(Operation);
819 }
820
821 /// Return the alignment of the memory that is being allocated by the
822 /// instruction.
823 Align getAlign() const {
824 return Align(1ULL << getSubclassData<AlignmentField>());
825 }
826
827 void setAlignment(Align Align) {
828 setSubclassData<AlignmentField>(Log2(Align));
829 }
830
831 /// Return true if this is a RMW on a volatile memory location.
832 ///
833 bool isVolatile() const { return getSubclassData<VolatileField>(); }
834
835 /// Specify whether this is a volatile RMW or not.
836 ///
837 void setVolatile(bool V) { setSubclassData<VolatileField>(V); }
838
839 /// Transparently provide more efficient getOperand methods.
840 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
841
842 /// Returns the ordering constraint of this rmw instruction.
843 AtomicOrdering getOrdering() const {
844 return getSubclassData<AtomicOrderingField>();
845 }
846
847 /// Sets the ordering constraint of this rmw instruction.
848 void setOrdering(AtomicOrdering Ordering) {
849 assert(Ordering != AtomicOrdering::NotAtomic &&(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 850, __extension__ __PRETTY_FUNCTION__
))
850 "atomicrmw instructions can only be atomic.")(static_cast <bool> (Ordering != AtomicOrdering::NotAtomic
&& "atomicrmw instructions can only be atomic.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::NotAtomic && \"atomicrmw instructions can only be atomic.\""
, "llvm/include/llvm/IR/Instructions.h", 850, __extension__ __PRETTY_FUNCTION__
))
;
851 assert(Ordering != AtomicOrdering::Unordered &&(static_cast <bool> (Ordering != AtomicOrdering::Unordered
&& "atomicrmw instructions cannot be unordered.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
852 "atomicrmw instructions cannot be unordered.")(static_cast <bool> (Ordering != AtomicOrdering::Unordered
&& "atomicrmw instructions cannot be unordered.") ? void
(0) : __assert_fail ("Ordering != AtomicOrdering::Unordered && \"atomicrmw instructions cannot be unordered.\""
, "llvm/include/llvm/IR/Instructions.h", 852, __extension__ __PRETTY_FUNCTION__
))
;
853 setSubclassData<AtomicOrderingField>(Ordering);
854 }
855
856 /// Returns the synchronization scope ID of this rmw instruction.
857 SyncScope::ID getSyncScopeID() const {
858 return SSID;
859 }
860
861 /// Sets the synchronization scope ID of this rmw instruction.
862 void setSyncScopeID(SyncScope::ID SSID) {
863 this->SSID = SSID;
864 }
865
866 Value *getPointerOperand() { return getOperand(0); }
867 const Value *getPointerOperand() const { return getOperand(0); }
868 static unsigned getPointerOperandIndex() { return 0U; }
869
870 Value *getValOperand() { return getOperand(1); }
871 const Value *getValOperand() const { return getOperand(1); }
872
873 /// Returns the address space of the pointer operand.
874 unsigned getPointerAddressSpace() const {
875 return getPointerOperand()->getType()->getPointerAddressSpace();
876 }
877
878 bool isFloatingPointOperation() const {
879 return isFPOperation(getOperation());
880 }
881
882 // Methods for support type inquiry through isa, cast, and dyn_cast:
883 static bool classof(const Instruction *I) {
884 return I->getOpcode() == Instruction::AtomicRMW;
885 }
886 static bool classof(const Value *V) {
887 return isa<Instruction>(V) && classof(cast<Instruction>(V));
888 }
889
890private:
891 void Init(BinOp Operation, Value *Ptr, Value *Val, Align Align,
892 AtomicOrdering Ordering, SyncScope::ID SSID);
893
894 // Shadow Instruction::setInstructionSubclassData with a private forwarding
895 // method so that subclasses cannot accidentally use it.
896 template <typename Bitfield>
897 void setSubclassData(typename Bitfield::Type Value) {
898 Instruction::setSubclassData<Bitfield>(Value);
899 }
900
901 /// The synchronization scope ID of this rmw instruction. Not quite enough
902 /// room in SubClassData for everything, so synchronization scope ID gets its
903 /// own field.
904 SyncScope::ID SSID;
905};
906
907template <>
908struct OperandTraits<AtomicRMWInst>
909 : public FixedNumOperandTraits<AtomicRMWInst,2> {
910};
911
912DEFINE_TRANSPARENT_OPERAND_ACCESSORS(AtomicRMWInst, Value)AtomicRMWInst::op_iterator AtomicRMWInst::op_begin() { return
OperandTraits<AtomicRMWInst>::op_begin(this); } AtomicRMWInst
::const_op_iterator AtomicRMWInst::op_begin() const { return OperandTraits
<AtomicRMWInst>::op_begin(const_cast<AtomicRMWInst*>
(this)); } AtomicRMWInst::op_iterator AtomicRMWInst::op_end()
{ return OperandTraits<AtomicRMWInst>::op_end(this); }
AtomicRMWInst::const_op_iterator AtomicRMWInst::op_end() const
{ return OperandTraits<AtomicRMWInst>::op_end(const_cast
<AtomicRMWInst*>(this)); } Value *AtomicRMWInst::getOperand
(unsigned i_nocapture) const { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<AtomicRMWInst
>::op_begin(const_cast<AtomicRMWInst*>(this))[i_nocapture
].get()); } void AtomicRMWInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<AtomicRMWInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<AtomicRMWInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 912, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<AtomicRMWInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned AtomicRMWInst::getNumOperands()
const { return OperandTraits<AtomicRMWInst>::operands(
this); } template <int Idx_nocapture> Use &AtomicRMWInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &AtomicRMWInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
913
914//===----------------------------------------------------------------------===//
915// GetElementPtrInst Class
916//===----------------------------------------------------------------------===//
917
918// checkGEPType - Simple wrapper function to give a better assertion failure
919// message on bad indexes for a gep instruction.
920//
921inline Type *checkGEPType(Type *Ty) {
922 assert(Ty && "Invalid GetElementPtrInst indices for type!")(static_cast <bool> (Ty && "Invalid GetElementPtrInst indices for type!"
) ? void (0) : __assert_fail ("Ty && \"Invalid GetElementPtrInst indices for type!\""
, "llvm/include/llvm/IR/Instructions.h", 922, __extension__ __PRETTY_FUNCTION__
))
;
923 return Ty;
924}
925
926/// an instruction for type-safe pointer arithmetic to
927/// access elements of arrays and structs
928///
929class GetElementPtrInst : public Instruction {
930 Type *SourceElementType;
931 Type *ResultElementType;
932
933 GetElementPtrInst(const GetElementPtrInst &GEPI);
934
935 /// Constructors - Create a getelementptr instruction with a base pointer an
936 /// list of indices. The first ctor can optionally insert before an existing
937 /// instruction, the second appends the new instruction to the specified
938 /// BasicBlock.
939 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
940 ArrayRef<Value *> IdxList, unsigned Values,
941 const Twine &NameStr, Instruction *InsertBefore);
942 inline GetElementPtrInst(Type *PointeeType, Value *Ptr,
943 ArrayRef<Value *> IdxList, unsigned Values,
944 const Twine &NameStr, BasicBlock *InsertAtEnd);
945
946 void init(Value *Ptr, ArrayRef<Value *> IdxList, const Twine &NameStr);
947
948protected:
949 // Note: Instruction needs to be a friend here to call cloneImpl.
950 friend class Instruction;
951
952 GetElementPtrInst *cloneImpl() const;
953
954public:
955 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
956 ArrayRef<Value *> IdxList,
957 const Twine &NameStr = "",
958 Instruction *InsertBefore = nullptr) {
959 unsigned Values = 1 + unsigned(IdxList.size());
960 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 960, __extension__ __PRETTY_FUNCTION__
))
;
961 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
962 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 962, __extension__ __PRETTY_FUNCTION__
))
;
963 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
964 NameStr, InsertBefore);
965 }
966
967 static GetElementPtrInst *Create(Type *PointeeType, Value *Ptr,
968 ArrayRef<Value *> IdxList,
969 const Twine &NameStr,
970 BasicBlock *InsertAtEnd) {
971 unsigned Values = 1 + unsigned(IdxList.size());
972 assert(PointeeType && "Must specify element type")(static_cast <bool> (PointeeType && "Must specify element type"
) ? void (0) : __assert_fail ("PointeeType && \"Must specify element type\""
, "llvm/include/llvm/IR/Instructions.h", 972, __extension__ __PRETTY_FUNCTION__
))
;
973 assert(cast<PointerType>(Ptr->getType()->getScalarType())(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
974 ->isOpaqueOrPointeeTypeMatches(PointeeType))(static_cast <bool> (cast<PointerType>(Ptr->getType
()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType
)) ? void (0) : __assert_fail ("cast<PointerType>(Ptr->getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(PointeeType)"
, "llvm/include/llvm/IR/Instructions.h", 974, __extension__ __PRETTY_FUNCTION__
))
;
975 return new (Values) GetElementPtrInst(PointeeType, Ptr, IdxList, Values,
976 NameStr, InsertAtEnd);
977 }
978
979 /// Create an "inbounds" getelementptr. See the documentation for the
980 /// "inbounds" flag in LangRef.html for details.
981 static GetElementPtrInst *
982 CreateInBounds(Type *PointeeType, Value *Ptr, ArrayRef<Value *> IdxList,
983 const Twine &NameStr = "",
984 Instruction *InsertBefore = nullptr) {
985 GetElementPtrInst *GEP =
986 Create(PointeeType, Ptr, IdxList, NameStr, InsertBefore);
987 GEP->setIsInBounds(true);
988 return GEP;
989 }
990
991 static GetElementPtrInst *CreateInBounds(Type *PointeeType, Value *Ptr,
992 ArrayRef<Value *> IdxList,
993 const Twine &NameStr,
994 BasicBlock *InsertAtEnd) {
995 GetElementPtrInst *GEP =
996 Create(PointeeType, Ptr, IdxList, NameStr, InsertAtEnd);
997 GEP->setIsInBounds(true);
998 return GEP;
999 }
1000
1001 /// Transparently provide more efficient getOperand methods.
1002 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1003
1004 Type *getSourceElementType() const { return SourceElementType; }
1005
1006 void setSourceElementType(Type *Ty) { SourceElementType = Ty; }
1007 void setResultElementType(Type *Ty) { ResultElementType = Ty; }
1008
1009 Type *getResultElementType() const {
1010 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
1011 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1011, __extension__ __PRETTY_FUNCTION__
))
;
1012 return ResultElementType;
1013 }
1014
1015 /// Returns the address space of this instruction's pointer type.
1016 unsigned getAddressSpace() const {
1017 // Note that this is always the same as the pointer operand's address space
1018 // and that is cheaper to compute, so cheat here.
1019 return getPointerAddressSpace();
1020 }
1021
1022 /// Returns the result type of a getelementptr with the given source
1023 /// element type and indexes.
1024 ///
1025 /// Null is returned if the indices are invalid for the specified
1026 /// source element type.
1027 static Type *getIndexedType(Type *Ty, ArrayRef<Value *> IdxList);
1028 static Type *getIndexedType(Type *Ty, ArrayRef<Constant *> IdxList);
1029 static Type *getIndexedType(Type *Ty, ArrayRef<uint64_t> IdxList);
1030
1031 /// Return the type of the element at the given index of an indexable
1032 /// type. This is equivalent to "getIndexedType(Agg, {Zero, Idx})".
1033 ///
1034 /// Returns null if the type can't be indexed, or the given index is not
1035 /// legal for the given type.
1036 static Type *getTypeAtIndex(Type *Ty, Value *Idx);
1037 static Type *getTypeAtIndex(Type *Ty, uint64_t Idx);
1038
1039 inline op_iterator idx_begin() { return op_begin()+1; }
1040 inline const_op_iterator idx_begin() const { return op_begin()+1; }
1041 inline op_iterator idx_end() { return op_end(); }
1042 inline const_op_iterator idx_end() const { return op_end(); }
1043
1044 inline iterator_range<op_iterator> indices() {
1045 return make_range(idx_begin(), idx_end());
1046 }
1047
1048 inline iterator_range<const_op_iterator> indices() const {
1049 return make_range(idx_begin(), idx_end());
1050 }
1051
1052 Value *getPointerOperand() {
1053 return getOperand(0);
1054 }
1055 const Value *getPointerOperand() const {
1056 return getOperand(0);
1057 }
1058 static unsigned getPointerOperandIndex() {
1059 return 0U; // get index for modifying correct operand.
1060 }
1061
1062 /// Method to return the pointer operand as a
1063 /// PointerType.
1064 Type *getPointerOperandType() const {
1065 return getPointerOperand()->getType();
1066 }
1067
1068 /// Returns the address space of the pointer operand.
1069 unsigned getPointerAddressSpace() const {
1070 return getPointerOperandType()->getPointerAddressSpace();
1071 }
1072
1073 /// Returns the pointer type returned by the GEP
1074 /// instruction, which may be a vector of pointers.
1075 static Type *getGEPReturnType(Type *ElTy, Value *Ptr,
1076 ArrayRef<Value *> IdxList) {
1077 PointerType *OrigPtrTy = cast<PointerType>(Ptr->getType()->getScalarType());
1078 unsigned AddrSpace = OrigPtrTy->getAddressSpace();
1079 Type *ResultElemTy = checkGEPType(getIndexedType(ElTy, IdxList));
1080 Type *PtrTy = OrigPtrTy->isOpaque()
1081 ? PointerType::get(OrigPtrTy->getContext(), AddrSpace)
1082 : PointerType::get(ResultElemTy, AddrSpace);
1083 // Vector GEP
1084 if (auto *PtrVTy = dyn_cast<VectorType>(Ptr->getType())) {
1085 ElementCount EltCount = PtrVTy->getElementCount();
1086 return VectorType::get(PtrTy, EltCount);
1087 }
1088 for (Value *Index : IdxList)
1089 if (auto *IndexVTy = dyn_cast<VectorType>(Index->getType())) {
1090 ElementCount EltCount = IndexVTy->getElementCount();
1091 return VectorType::get(PtrTy, EltCount);
1092 }
1093 // Scalar GEP
1094 return PtrTy;
1095 }
1096
1097 unsigned getNumIndices() const { // Note: always non-negative
1098 return getNumOperands() - 1;
1099 }
1100
1101 bool hasIndices() const {
1102 return getNumOperands() > 1;
1103 }
1104
1105 /// Return true if all of the indices of this GEP are
1106 /// zeros. If so, the result pointer and the first operand have the same
1107 /// value, just potentially different types.
1108 bool hasAllZeroIndices() const;
1109
1110 /// Return true if all of the indices of this GEP are
1111 /// constant integers. If so, the result pointer and the first operand have
1112 /// a constant offset between them.
1113 bool hasAllConstantIndices() const;
1114
1115 /// Set or clear the inbounds flag on this GEP instruction.
1116 /// See LangRef.html for the meaning of inbounds on a getelementptr.
1117 void setIsInBounds(bool b = true);
1118
1119 /// Determine whether the GEP has the inbounds flag.
1120 bool isInBounds() const;
1121
1122 /// Accumulate the constant address offset of this GEP if possible.
1123 ///
1124 /// This routine accepts an APInt into which it will accumulate the constant
1125 /// offset of this GEP if the GEP is in fact constant. If the GEP is not
1126 /// all-constant, it returns false and the value of the offset APInt is
1127 /// undefined (it is *not* preserved!). The APInt passed into this routine
1128 /// must be at least as wide as the IntPtr type for the address space of
1129 /// the base GEP pointer.
1130 bool accumulateConstantOffset(const DataLayout &DL, APInt &Offset) const;
1131 bool collectOffset(const DataLayout &DL, unsigned BitWidth,
1132 MapVector<Value *, APInt> &VariableOffsets,
1133 APInt &ConstantOffset) const;
1134 // Methods for support type inquiry through isa, cast, and dyn_cast:
1135 static bool classof(const Instruction *I) {
1136 return (I->getOpcode() == Instruction::GetElementPtr);
1137 }
1138 static bool classof(const Value *V) {
1139 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1140 }
1141};
1142
1143template <>
1144struct OperandTraits<GetElementPtrInst> :
1145 public VariadicOperandTraits<GetElementPtrInst, 1> {
1146};
1147
1148GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1149 ArrayRef<Value *> IdxList, unsigned Values,
1150 const Twine &NameStr,
1151 Instruction *InsertBefore)
1152 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1153 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1154 Values, InsertBefore),
1155 SourceElementType(PointeeType),
1156 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1157 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
1158 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1158, __extension__ __PRETTY_FUNCTION__
))
;
1159 init(Ptr, IdxList, NameStr);
1160}
1161
1162GetElementPtrInst::GetElementPtrInst(Type *PointeeType, Value *Ptr,
1163 ArrayRef<Value *> IdxList, unsigned Values,
1164 const Twine &NameStr,
1165 BasicBlock *InsertAtEnd)
1166 : Instruction(getGEPReturnType(PointeeType, Ptr, IdxList), GetElementPtr,
1167 OperandTraits<GetElementPtrInst>::op_end(this) - Values,
1168 Values, InsertAtEnd),
1169 SourceElementType(PointeeType),
1170 ResultElementType(getIndexedType(PointeeType, IdxList)) {
1171 assert(cast<PointerType>(getType()->getScalarType())(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
1172 ->isOpaqueOrPointeeTypeMatches(ResultElementType))(static_cast <bool> (cast<PointerType>(getType()->
getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType
)) ? void (0) : __assert_fail ("cast<PointerType>(getType()->getScalarType()) ->isOpaqueOrPointeeTypeMatches(ResultElementType)"
, "llvm/include/llvm/IR/Instructions.h", 1172, __extension__ __PRETTY_FUNCTION__
))
;
1173 init(Ptr, IdxList, NameStr);
1174}
1175
1176DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value)GetElementPtrInst::op_iterator GetElementPtrInst::op_begin() {
return OperandTraits<GetElementPtrInst>::op_begin(this
); } GetElementPtrInst::const_op_iterator GetElementPtrInst::
op_begin() const { return OperandTraits<GetElementPtrInst>
::op_begin(const_cast<GetElementPtrInst*>(this)); } GetElementPtrInst
::op_iterator GetElementPtrInst::op_end() { return OperandTraits
<GetElementPtrInst>::op_end(this); } GetElementPtrInst::
const_op_iterator GetElementPtrInst::op_end() const { return OperandTraits
<GetElementPtrInst>::op_end(const_cast<GetElementPtrInst
*>(this)); } Value *GetElementPtrInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<GetElementPtrInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<GetElementPtrInst
>::op_begin(const_cast<GetElementPtrInst*>(this))[i_nocapture
].get()); } void GetElementPtrInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<GetElementPtrInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<GetElementPtrInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1176, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<GetElementPtrInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned GetElementPtrInst::getNumOperands
() const { return OperandTraits<GetElementPtrInst>::operands
(this); } template <int Idx_nocapture> Use &GetElementPtrInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &GetElementPtrInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1177
1178//===----------------------------------------------------------------------===//
1179// ICmpInst Class
1180//===----------------------------------------------------------------------===//
1181
1182/// This instruction compares its operands according to the predicate given
1183/// to the constructor. It only operates on integers or pointers. The operands
1184/// must be identical types.
1185/// Represent an integer comparison operator.
1186class ICmpInst: public CmpInst {
1187 void AssertOK() {
1188 assert(isIntPredicate() &&(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
1189 "Invalid ICmp predicate value")(static_cast <bool> (isIntPredicate() && "Invalid ICmp predicate value"
) ? void (0) : __assert_fail ("isIntPredicate() && \"Invalid ICmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1189, __extension__ __PRETTY_FUNCTION__
))
;
1190 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
1191 "Both operands to ICmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to ICmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to ICmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1191, __extension__ __PRETTY_FUNCTION__
))
;
1192 // Check that the operands are the right type
1193 assert((getOperand(0)->getType()->isIntOrIntVectorTy() ||(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1194 getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
1195 "Invalid operand types for ICmp instruction")(static_cast <bool> ((getOperand(0)->getType()->isIntOrIntVectorTy
() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) &&
"Invalid operand types for ICmp instruction") ? void (0) : __assert_fail
("(getOperand(0)->getType()->isIntOrIntVectorTy() || getOperand(0)->getType()->isPtrOrPtrVectorTy()) && \"Invalid operand types for ICmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1195, __extension__ __PRETTY_FUNCTION__
))
;
1196 }
1197
1198protected:
1199 // Note: Instruction needs to be a friend here to call cloneImpl.
1200 friend class Instruction;
1201
1202 /// Clone an identical ICmpInst
1203 ICmpInst *cloneImpl() const;
1204
1205public:
1206 /// Constructor with insert-before-instruction semantics.
1207 ICmpInst(
1208 Instruction *InsertBefore, ///< Where to insert
1209 Predicate pred, ///< The predicate to use for the comparison
1210 Value *LHS, ///< The left-hand-side of the expression
1211 Value *RHS, ///< The right-hand-side of the expression
1212 const Twine &NameStr = "" ///< Name of the instruction
1213 ) : CmpInst(makeCmpResultType(LHS->getType()),
1214 Instruction::ICmp, pred, LHS, RHS, NameStr,
1215 InsertBefore) {
1216#ifndef NDEBUG
1217 AssertOK();
1218#endif
1219 }
1220
1221 /// Constructor with insert-at-end semantics.
1222 ICmpInst(
1223 BasicBlock &InsertAtEnd, ///< Block to insert into.
1224 Predicate pred, ///< The predicate to use for the comparison
1225 Value *LHS, ///< The left-hand-side of the expression
1226 Value *RHS, ///< The right-hand-side of the expression
1227 const Twine &NameStr = "" ///< Name of the instruction
1228 ) : CmpInst(makeCmpResultType(LHS->getType()),
1229 Instruction::ICmp, pred, LHS, RHS, NameStr,
1230 &InsertAtEnd) {
1231#ifndef NDEBUG
1232 AssertOK();
1233#endif
1234 }
1235
1236 /// Constructor with no-insertion semantics
1237 ICmpInst(
1238 Predicate pred, ///< The predicate to use for the comparison
1239 Value *LHS, ///< The left-hand-side of the expression
1240 Value *RHS, ///< The right-hand-side of the expression
1241 const Twine &NameStr = "" ///< Name of the instruction
1242 ) : CmpInst(makeCmpResultType(LHS->getType()),
16
Called C++ object pointer is null
1243 Instruction::ICmp, pred, LHS, RHS, NameStr) {
1244#ifndef NDEBUG
1245 AssertOK();
1246#endif
1247 }
1248
1249 /// For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
1250 /// @returns the predicate that would be the result if the operand were
1251 /// regarded as signed.
1252 /// Return the signed version of the predicate
1253 Predicate getSignedPredicate() const {
1254 return getSignedPredicate(getPredicate());
1255 }
1256
1257 /// This is a static version that you can use without an instruction.
1258 /// Return the signed version of the predicate.
1259 static Predicate getSignedPredicate(Predicate pred);
1260
1261 /// For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
1262 /// @returns the predicate that would be the result if the operand were
1263 /// regarded as unsigned.
1264 /// Return the unsigned version of the predicate
1265 Predicate getUnsignedPredicate() const {
1266 return getUnsignedPredicate(getPredicate());
1267 }
1268
1269 /// This is a static version that you can use without an instruction.
1270 /// Return the unsigned version of the predicate.
1271 static Predicate getUnsignedPredicate(Predicate pred);
1272
1273 /// Return true if this predicate is either EQ or NE. This also
1274 /// tests for commutativity.
1275 static bool isEquality(Predicate P) {
1276 return P == ICMP_EQ || P == ICMP_NE;
1277 }
1278
1279 /// Return true if this predicate is either EQ or NE. This also
1280 /// tests for commutativity.
1281 bool isEquality() const {
1282 return isEquality(getPredicate());
1283 }
1284
1285 /// @returns true if the predicate of this ICmpInst is commutative
1286 /// Determine if this relation is commutative.
1287 bool isCommutative() const { return isEquality(); }
1288
1289 /// Return true if the predicate is relational (not EQ or NE).
1290 ///
1291 bool isRelational() const {
1292 return !isEquality();
1293 }
1294
1295 /// Return true if the predicate is relational (not EQ or NE).
1296 ///
1297 static bool isRelational(Predicate P) {
1298 return !isEquality(P);
1299 }
1300
1301 /// Return true if the predicate is SGT or UGT.
1302 ///
1303 static bool isGT(Predicate P) {
1304 return P == ICMP_SGT || P == ICMP_UGT;
1305 }
1306
1307 /// Return true if the predicate is SLT or ULT.
1308 ///
1309 static bool isLT(Predicate P) {
1310 return P == ICMP_SLT || P == ICMP_ULT;
1311 }
1312
1313 /// Return true if the predicate is SGE or UGE.
1314 ///
1315 static bool isGE(Predicate P) {
1316 return P == ICMP_SGE || P == ICMP_UGE;
1317 }
1318
1319 /// Return true if the predicate is SLE or ULE.
1320 ///
1321 static bool isLE(Predicate P) {
1322 return P == ICMP_SLE || P == ICMP_ULE;
1323 }
1324
1325 /// Returns the sequence of all ICmp predicates.
1326 ///
1327 static auto predicates() { return ICmpPredicates(); }
1328
1329 /// Exchange the two operands to this instruction in such a way that it does
1330 /// not modify the semantics of the instruction. The predicate value may be
1331 /// changed to retain the same result if the predicate is order dependent
1332 /// (e.g. ult).
1333 /// Swap operands and adjust predicate.
1334 void swapOperands() {
1335 setPredicate(getSwappedPredicate());
1336 Op<0>().swap(Op<1>());
1337 }
1338
1339 /// Return result of `LHS Pred RHS` comparison.
1340 static bool compare(const APInt &LHS, const APInt &RHS,
1341 ICmpInst::Predicate Pred);
1342
1343 // Methods for support type inquiry through isa, cast, and dyn_cast:
1344 static bool classof(const Instruction *I) {
1345 return I->getOpcode() == Instruction::ICmp;
1346 }
1347 static bool classof(const Value *V) {
1348 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1349 }
1350};
1351
1352//===----------------------------------------------------------------------===//
1353// FCmpInst Class
1354//===----------------------------------------------------------------------===//
1355
1356/// This instruction compares its operands according to the predicate given
1357/// to the constructor. It only operates on floating point values or packed
1358/// vectors of floating point values. The operands must be identical types.
1359/// Represents a floating point comparison operator.
1360class FCmpInst: public CmpInst {
1361 void AssertOK() {
1362 assert(isFPPredicate() && "Invalid FCmp predicate value")(static_cast <bool> (isFPPredicate() && "Invalid FCmp predicate value"
) ? void (0) : __assert_fail ("isFPPredicate() && \"Invalid FCmp predicate value\""
, "llvm/include/llvm/IR/Instructions.h", 1362, __extension__ __PRETTY_FUNCTION__
))
;
1363 assert(getOperand(0)->getType() == getOperand(1)->getType() &&(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
1364 "Both operands to FCmp instruction are not of the same type!")(static_cast <bool> (getOperand(0)->getType() == getOperand
(1)->getType() && "Both operands to FCmp instruction are not of the same type!"
) ? void (0) : __assert_fail ("getOperand(0)->getType() == getOperand(1)->getType() && \"Both operands to FCmp instruction are not of the same type!\""
, "llvm/include/llvm/IR/Instructions.h", 1364, __extension__ __PRETTY_FUNCTION__
))
;
1365 // Check that the operands are the right type
1366 assert(getOperand(0)->getType()->isFPOrFPVectorTy() &&(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
1367 "Invalid operand types for FCmp instruction")(static_cast <bool> (getOperand(0)->getType()->isFPOrFPVectorTy
() && "Invalid operand types for FCmp instruction") ?
void (0) : __assert_fail ("getOperand(0)->getType()->isFPOrFPVectorTy() && \"Invalid operand types for FCmp instruction\""
, "llvm/include/llvm/IR/Instructions.h", 1367, __extension__ __PRETTY_FUNCTION__
))
;
1368 }
1369
1370protected:
1371 // Note: Instruction needs to be a friend here to call cloneImpl.
1372 friend class Instruction;
1373
1374 /// Clone an identical FCmpInst
1375 FCmpInst *cloneImpl() const;
1376
1377public:
1378 /// Constructor with insert-before-instruction semantics.
1379 FCmpInst(
1380 Instruction *InsertBefore, ///< Where to insert
1381 Predicate pred, ///< The predicate to use for the comparison
1382 Value *LHS, ///< The left-hand-side of the expression
1383 Value *RHS, ///< The right-hand-side of the expression
1384 const Twine &NameStr = "" ///< Name of the instruction
1385 ) : CmpInst(makeCmpResultType(LHS->getType()),
1386 Instruction::FCmp, pred, LHS, RHS, NameStr,
1387 InsertBefore) {
1388 AssertOK();
1389 }
1390
1391 /// Constructor with insert-at-end semantics.
1392 FCmpInst(
1393 BasicBlock &InsertAtEnd, ///< Block to insert into.
1394 Predicate pred, ///< The predicate to use for the comparison
1395 Value *LHS, ///< The left-hand-side of the expression
1396 Value *RHS, ///< The right-hand-side of the expression
1397 const Twine &NameStr = "" ///< Name of the instruction
1398 ) : CmpInst(makeCmpResultType(LHS->getType()),
1399 Instruction::FCmp, pred, LHS, RHS, NameStr,
1400 &InsertAtEnd) {
1401 AssertOK();
1402 }
1403
1404 /// Constructor with no-insertion semantics
1405 FCmpInst(
1406 Predicate Pred, ///< The predicate to use for the comparison
1407 Value *LHS, ///< The left-hand-side of the expression
1408 Value *RHS, ///< The right-hand-side of the expression
1409 const Twine &NameStr = "", ///< Name of the instruction
1410 Instruction *FlagsSource = nullptr
1411 ) : CmpInst(makeCmpResultType(LHS->getType()), Instruction::FCmp, Pred, LHS,
1412 RHS, NameStr, nullptr, FlagsSource) {
1413 AssertOK();
1414 }
1415
1416 /// @returns true if the predicate of this instruction is EQ or NE.
1417 /// Determine if this is an equality predicate.
1418 static bool isEquality(Predicate Pred) {
1419 return Pred == FCMP_OEQ || Pred == FCMP_ONE || Pred == FCMP_UEQ ||
1420 Pred == FCMP_UNE;
1421 }
1422
1423 /// @returns true if the predicate of this instruction is EQ or NE.
1424 /// Determine if this is an equality predicate.
1425 bool isEquality() const { return isEquality(getPredicate()); }
1426
1427 /// @returns true if the predicate of this instruction is commutative.
1428 /// Determine if this is a commutative predicate.
1429 bool isCommutative() const {
1430 return isEquality() ||
1431 getPredicate() == FCMP_FALSE ||
1432 getPredicate() == FCMP_TRUE ||
1433 getPredicate() == FCMP_ORD ||
1434 getPredicate() == FCMP_UNO;
1435 }
1436
1437 /// @returns true if the predicate is relational (not EQ or NE).
1438 /// Determine if this a relational predicate.
1439 bool isRelational() const { return !isEquality(); }
1440
1441 /// Exchange the two operands to this instruction in such a way that it does
1442 /// not modify the semantics of the instruction. The predicate value may be
1443 /// changed to retain the same result if the predicate is order dependent
1444 /// (e.g. ult).
1445 /// Swap operands and adjust predicate.
1446 void swapOperands() {
1447 setPredicate(getSwappedPredicate());
1448 Op<0>().swap(Op<1>());
1449 }
1450
1451 /// Returns the sequence of all FCmp predicates.
1452 ///
1453 static auto predicates() { return FCmpPredicates(); }
1454
1455 /// Return result of `LHS Pred RHS` comparison.
1456 static bool compare(const APFloat &LHS, const APFloat &RHS,
1457 FCmpInst::Predicate Pred);
1458
1459 /// Methods for support type inquiry through isa, cast, and dyn_cast:
1460 static bool classof(const Instruction *I) {
1461 return I->getOpcode() == Instruction::FCmp;
1462 }
1463 static bool classof(const Value *V) {
1464 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1465 }
1466};
1467
1468//===----------------------------------------------------------------------===//
1469/// This class represents a function call, abstracting a target
1470/// machine's calling convention. This class uses low bit of the SubClassData
1471/// field to indicate whether or not this is a tail call. The rest of the bits
1472/// hold the calling convention of the call.
1473///
1474class CallInst : public CallBase {
1475 CallInst(const CallInst &CI);
1476
1477 /// Construct a CallInst given a range of arguments.
1478 /// Construct a CallInst from a range of arguments
1479 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1480 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1481 Instruction *InsertBefore);
1482
1483 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1484 const Twine &NameStr, Instruction *InsertBefore)
1485 : CallInst(Ty, Func, Args, None, NameStr, InsertBefore) {}
1486
1487 /// Construct a CallInst given a range of arguments.
1488 /// Construct a CallInst from a range of arguments
1489 inline CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1490 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1491 BasicBlock *InsertAtEnd);
1492
1493 explicit CallInst(FunctionType *Ty, Value *F, const Twine &NameStr,
1494 Instruction *InsertBefore);
1495
1496 CallInst(FunctionType *ty, Value *F, const Twine &NameStr,
1497 BasicBlock *InsertAtEnd);
1498
1499 void init(FunctionType *FTy, Value *Func, ArrayRef<Value *> Args,
1500 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr);
1501 void init(FunctionType *FTy, Value *Func, const Twine &NameStr);
1502
1503 /// Compute the number of operands to allocate.
1504 static int ComputeNumOperands(int NumArgs, int NumBundleInputs = 0) {
1505 // We need one operand for the called function, plus the input operand
1506 // counts provided.
1507 return 1 + NumArgs + NumBundleInputs;
1508 }
1509
1510protected:
1511 // Note: Instruction needs to be a friend here to call cloneImpl.
1512 friend class Instruction;
1513
1514 CallInst *cloneImpl() const;
1515
1516public:
1517 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr = "",
1518 Instruction *InsertBefore = nullptr) {
1519 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertBefore);
1520 }
1521
1522 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1523 const Twine &NameStr,
1524 Instruction *InsertBefore = nullptr) {
1525 return new (ComputeNumOperands(Args.size()))
1526 CallInst(Ty, Func, Args, None, NameStr, InsertBefore);
1527 }
1528
1529 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1530 ArrayRef<OperandBundleDef> Bundles = None,
1531 const Twine &NameStr = "",
1532 Instruction *InsertBefore = nullptr) {
1533 const int NumOperands =
1534 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1535 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1536
1537 return new (NumOperands, DescriptorBytes)
1538 CallInst(Ty, Func, Args, Bundles, NameStr, InsertBefore);
1539 }
1540
1541 static CallInst *Create(FunctionType *Ty, Value *F, const Twine &NameStr,
1542 BasicBlock *InsertAtEnd) {
1543 return new (ComputeNumOperands(0)) CallInst(Ty, F, NameStr, InsertAtEnd);
1544 }
1545
1546 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1547 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1548 return new (ComputeNumOperands(Args.size()))
1549 CallInst(Ty, Func, Args, None, NameStr, InsertAtEnd);
1550 }
1551
1552 static CallInst *Create(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1553 ArrayRef<OperandBundleDef> Bundles,
1554 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1555 const int NumOperands =
1556 ComputeNumOperands(Args.size(), CountBundleInputs(Bundles));
1557 const unsigned DescriptorBytes = Bundles.size() * sizeof(BundleOpInfo);
1558
1559 return new (NumOperands, DescriptorBytes)
1560 CallInst(Ty, Func, Args, Bundles, NameStr, InsertAtEnd);
1561 }
1562
1563 static CallInst *Create(FunctionCallee Func, const Twine &NameStr = "",
1564 Instruction *InsertBefore = nullptr) {
1565 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1566 InsertBefore);
1567 }
1568
1569 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1570 ArrayRef<OperandBundleDef> Bundles = None,
1571 const Twine &NameStr = "",
1572 Instruction *InsertBefore = nullptr) {
1573 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1574 NameStr, InsertBefore);
1575 }
1576
1577 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1578 const Twine &NameStr,
1579 Instruction *InsertBefore = nullptr) {
1580 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1581 InsertBefore);
1582 }
1583
1584 static CallInst *Create(FunctionCallee Func, const Twine &NameStr,
1585 BasicBlock *InsertAtEnd) {
1586 return Create(Func.getFunctionType(), Func.getCallee(), NameStr,
1587 InsertAtEnd);
1588 }
1589
1590 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1591 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1592 return Create(Func.getFunctionType(), Func.getCallee(), Args, NameStr,
1593 InsertAtEnd);
1594 }
1595
1596 static CallInst *Create(FunctionCallee Func, ArrayRef<Value *> Args,
1597 ArrayRef<OperandBundleDef> Bundles,
1598 const Twine &NameStr, BasicBlock *InsertAtEnd) {
1599 return Create(Func.getFunctionType(), Func.getCallee(), Args, Bundles,
1600 NameStr, InsertAtEnd);
1601 }
1602
1603 /// Create a clone of \p CI with a different set of operand bundles and
1604 /// insert it before \p InsertPt.
1605 ///
1606 /// The returned call instruction is identical \p CI in every way except that
1607 /// the operand bundles for the new instruction are set to the operand bundles
1608 /// in \p Bundles.
1609 static CallInst *Create(CallInst *CI, ArrayRef<OperandBundleDef> Bundles,
1610 Instruction *InsertPt = nullptr);
1611
1612 /// Generate the IR for a call to malloc:
1613 /// 1. Compute the malloc call's argument as the specified type's size,
1614 /// possibly multiplied by the array size if the array size is not
1615 /// constant 1.
1616 /// 2. Call malloc with that argument.
1617 /// 3. Bitcast the result of the malloc call to the specified type.
1618 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1619 Type *AllocTy, Value *AllocSize,
1620 Value *ArraySize = nullptr,
1621 Function *MallocF = nullptr,
1622 const Twine &Name = "");
1623 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1624 Type *AllocTy, Value *AllocSize,
1625 Value *ArraySize = nullptr,
1626 Function *MallocF = nullptr,
1627 const Twine &Name = "");
1628 static Instruction *CreateMalloc(Instruction *InsertBefore, Type *IntPtrTy,
1629 Type *AllocTy, Value *AllocSize,
1630 Value *ArraySize = nullptr,
1631 ArrayRef<OperandBundleDef> Bundles = None,
1632 Function *MallocF = nullptr,
1633 const Twine &Name = "");
1634 static Instruction *CreateMalloc(BasicBlock *InsertAtEnd, Type *IntPtrTy,
1635 Type *AllocTy, Value *AllocSize,
1636 Value *ArraySize = nullptr,
1637 ArrayRef<OperandBundleDef> Bundles = None,
1638 Function *MallocF = nullptr,
1639 const Twine &Name = "");
1640 /// Generate the IR for a call to the builtin free function.
1641 static Instruction *CreateFree(Value *Source, Instruction *InsertBefore);
1642 static Instruction *CreateFree(Value *Source, BasicBlock *InsertAtEnd);
1643 static Instruction *CreateFree(Value *Source,
1644 ArrayRef<OperandBundleDef> Bundles,
1645 Instruction *InsertBefore);
1646 static Instruction *CreateFree(Value *Source,
1647 ArrayRef<OperandBundleDef> Bundles,
1648 BasicBlock *InsertAtEnd);
1649
1650 // Note that 'musttail' implies 'tail'.
1651 enum TailCallKind : unsigned {
1652 TCK_None = 0,
1653 TCK_Tail = 1,
1654 TCK_MustTail = 2,
1655 TCK_NoTail = 3,
1656 TCK_LAST = TCK_NoTail
1657 };
1658
1659 using TailCallKindField = Bitfield::Element<TailCallKind, 0, 2, TCK_LAST>;
1660 static_assert(
1661 Bitfield::areContiguous<TailCallKindField, CallBase::CallingConvField>(),
1662 "Bitfields must be contiguous");
1663
1664 TailCallKind getTailCallKind() const {
1665 return getSubclassData<TailCallKindField>();
1666 }
1667
1668 bool isTailCall() const {
1669 TailCallKind Kind = getTailCallKind();
1670 return Kind == TCK_Tail || Kind == TCK_MustTail;
1671 }
1672
1673 bool isMustTailCall() const { return getTailCallKind() == TCK_MustTail; }
1674
1675 bool isNoTailCall() const { return getTailCallKind() == TCK_NoTail; }
1676
1677 void setTailCallKind(TailCallKind TCK) {
1678 setSubclassData<TailCallKindField>(TCK);
1679 }
1680
1681 void setTailCall(bool IsTc = true) {
1682 setTailCallKind(IsTc ? TCK_Tail : TCK_None);
1683 }
1684
1685 /// Return true if the call can return twice
1686 bool canReturnTwice() const { return hasFnAttr(Attribute::ReturnsTwice); }
1687 void setCanReturnTwice() { addFnAttr(Attribute::ReturnsTwice); }
1688
1689 // Methods for support type inquiry through isa, cast, and dyn_cast:
1690 static bool classof(const Instruction *I) {
1691 return I->getOpcode() == Instruction::Call;
1692 }
1693 static bool classof(const Value *V) {
1694 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1695 }
1696
1697 /// Updates profile metadata by scaling it by \p S / \p T.
1698 void updateProfWeight(uint64_t S, uint64_t T);
1699
1700private:
1701 // Shadow Instruction::setInstructionSubclassData with a private forwarding
1702 // method so that subclasses cannot accidentally use it.
1703 template <typename Bitfield>
1704 void setSubclassData(typename Bitfield::Type Value) {
1705 Instruction::setSubclassData<Bitfield>(Value);
1706 }
1707};
1708
1709CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1710 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1711 BasicBlock *InsertAtEnd)
1712 : CallBase(Ty->getReturnType(), Instruction::Call,
1713 OperandTraits<CallBase>::op_end(this) -
1714 (Args.size() + CountBundleInputs(Bundles) + 1),
1715 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1716 InsertAtEnd) {
1717 init(Ty, Func, Args, Bundles, NameStr);
1718}
1719
1720CallInst::CallInst(FunctionType *Ty, Value *Func, ArrayRef<Value *> Args,
1721 ArrayRef<OperandBundleDef> Bundles, const Twine &NameStr,
1722 Instruction *InsertBefore)
1723 : CallBase(Ty->getReturnType(), Instruction::Call,
1724 OperandTraits<CallBase>::op_end(this) -
1725 (Args.size() + CountBundleInputs(Bundles) + 1),
1726 unsigned(Args.size() + CountBundleInputs(Bundles) + 1),
1727 InsertBefore) {
1728 init(Ty, Func, Args, Bundles, NameStr);
1729}
1730
1731//===----------------------------------------------------------------------===//
1732// SelectInst Class
1733//===----------------------------------------------------------------------===//
1734
1735/// This class represents the LLVM 'select' instruction.
1736///
1737class SelectInst : public Instruction {
1738 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1739 Instruction *InsertBefore)
1740 : Instruction(S1->getType(), Instruction::Select,
1741 &Op<0>(), 3, InsertBefore) {
1742 init(C, S1, S2);
1743 setName(NameStr);
1744 }
1745
1746 SelectInst(Value *C, Value *S1, Value *S2, const Twine &NameStr,
1747 BasicBlock *InsertAtEnd)
1748 : Instruction(S1->getType(), Instruction::Select,
1749 &Op<0>(), 3, InsertAtEnd) {
1750 init(C, S1, S2);
1751 setName(NameStr);
1752 }
1753
1754 void init(Value *C, Value *S1, Value *S2) {
1755 assert(!areInvalidOperands(C, S1, S2) && "Invalid operands for select")(static_cast <bool> (!areInvalidOperands(C, S1, S2) &&
"Invalid operands for select") ? void (0) : __assert_fail ("!areInvalidOperands(C, S1, S2) && \"Invalid operands for select\""
, "llvm/include/llvm/IR/Instructions.h", 1755, __extension__ __PRETTY_FUNCTION__
))
;
1756 Op<0>() = C;
1757 Op<1>() = S1;
1758 Op<2>() = S2;
1759 }
1760
1761protected:
1762 // Note: Instruction needs to be a friend here to call cloneImpl.
1763 friend class Instruction;
1764
1765 SelectInst *cloneImpl() const;
1766
1767public:
1768 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1769 const Twine &NameStr = "",
1770 Instruction *InsertBefore = nullptr,
1771 Instruction *MDFrom = nullptr) {
1772 SelectInst *Sel = new(3) SelectInst(C, S1, S2, NameStr, InsertBefore);
1773 if (MDFrom)
1774 Sel->copyMetadata(*MDFrom);
1775 return Sel;
1776 }
1777
1778 static SelectInst *Create(Value *C, Value *S1, Value *S2,
1779 const Twine &NameStr,
1780 BasicBlock *InsertAtEnd) {
1781 return new(3) SelectInst(C, S1, S2, NameStr, InsertAtEnd);
1782 }
1783
1784 const Value *getCondition() const { return Op<0>(); }
1785 const Value *getTrueValue() const { return Op<1>(); }
1786 const Value *getFalseValue() const { return Op<2>(); }
1787 Value *getCondition() { return Op<0>(); }
1788 Value *getTrueValue() { return Op<1>(); }
1789 Value *getFalseValue() { return Op<2>(); }
1790
1791 void setCondition(Value *V) { Op<0>() = V; }
1792 void setTrueValue(Value *V) { Op<1>() = V; }
1793 void setFalseValue(Value *V) { Op<2>() = V; }
1794
1795 /// Swap the true and false values of the select instruction.
1796 /// This doesn't swap prof metadata.
1797 void swapValues() { Op<1>().swap(Op<2>()); }
1798
1799 /// Return a string if the specified operands are invalid
1800 /// for a select operation, otherwise return null.
1801 static const char *areInvalidOperands(Value *Cond, Value *True, Value *False);
1802
1803 /// Transparently provide more efficient getOperand methods.
1804 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1805
1806 OtherOps getOpcode() const {
1807 return static_cast<OtherOps>(Instruction::getOpcode());
1808 }
1809
1810 // Methods for support type inquiry through isa, cast, and dyn_cast:
1811 static bool classof(const Instruction *I) {
1812 return I->getOpcode() == Instruction::Select;
1813 }
1814 static bool classof(const Value *V) {
1815 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1816 }
1817};
1818
1819template <>
1820struct OperandTraits<SelectInst> : public FixedNumOperandTraits<SelectInst, 3> {
1821};
1822
1823DEFINE_TRANSPARENT_OPERAND_ACCESSORS(SelectInst, Value)SelectInst::op_iterator SelectInst::op_begin() { return OperandTraits
<SelectInst>::op_begin(this); } SelectInst::const_op_iterator
SelectInst::op_begin() const { return OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this)); } SelectInst
::op_iterator SelectInst::op_end() { return OperandTraits<
SelectInst>::op_end(this); } SelectInst::const_op_iterator
SelectInst::op_end() const { return OperandTraits<SelectInst
>::op_end(const_cast<SelectInst*>(this)); } Value *SelectInst
::getOperand(unsigned i_nocapture) const { (static_cast <bool
> (i_nocapture < OperandTraits<SelectInst>::operands
(this) && "getOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<SelectInst
>::op_begin(const_cast<SelectInst*>(this))[i_nocapture
].get()); } void SelectInst::setOperand(unsigned i_nocapture,
Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<SelectInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<SelectInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1823, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<SelectInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned SelectInst::getNumOperands() const
{ return OperandTraits<SelectInst>::operands(this); } template
<int Idx_nocapture> Use &SelectInst::Op() { return
this->OpFrom<Idx_nocapture>(this); } template <int
Idx_nocapture> const Use &SelectInst::Op() const { return
this->OpFrom<Idx_nocapture>(this); }
1824
1825//===----------------------------------------------------------------------===//
1826// VAArgInst Class
1827//===----------------------------------------------------------------------===//
1828
1829/// This class represents the va_arg llvm instruction, which returns
1830/// an argument of the specified type given a va_list and increments that list
1831///
1832class VAArgInst : public UnaryInstruction {
1833protected:
1834 // Note: Instruction needs to be a friend here to call cloneImpl.
1835 friend class Instruction;
1836
1837 VAArgInst *cloneImpl() const;
1838
1839public:
1840 VAArgInst(Value *List, Type *Ty, const Twine &NameStr = "",
1841 Instruction *InsertBefore = nullptr)
1842 : UnaryInstruction(Ty, VAArg, List, InsertBefore) {
1843 setName(NameStr);
1844 }
1845
1846 VAArgInst(Value *List, Type *Ty, const Twine &NameStr,
1847 BasicBlock *InsertAtEnd)
1848 : UnaryInstruction(Ty, VAArg, List, InsertAtEnd) {
1849 setName(NameStr);
1850 }
1851
1852 Value *getPointerOperand() { return getOperand(0); }
1853 const Value *getPointerOperand() const { return getOperand(0); }
1854 static unsigned getPointerOperandIndex() { return 0U; }
1855
1856 // Methods for support type inquiry through isa, cast, and dyn_cast:
1857 static bool classof(const Instruction *I) {
1858 return I->getOpcode() == VAArg;
1859 }
1860 static bool classof(const Value *V) {
1861 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1862 }
1863};
1864
1865//===----------------------------------------------------------------------===//
1866// ExtractElementInst Class
1867//===----------------------------------------------------------------------===//
1868
1869/// This instruction extracts a single (scalar)
1870/// element from a VectorType value
1871///
1872class ExtractElementInst : public Instruction {
1873 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr = "",
1874 Instruction *InsertBefore = nullptr);
1875 ExtractElementInst(Value *Vec, Value *Idx, const Twine &NameStr,
1876 BasicBlock *InsertAtEnd);
1877
1878protected:
1879 // Note: Instruction needs to be a friend here to call cloneImpl.
1880 friend class Instruction;
1881
1882 ExtractElementInst *cloneImpl() const;
1883
1884public:
1885 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1886 const Twine &NameStr = "",
1887 Instruction *InsertBefore = nullptr) {
1888 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertBefore);
1889 }
1890
1891 static ExtractElementInst *Create(Value *Vec, Value *Idx,
1892 const Twine &NameStr,
1893 BasicBlock *InsertAtEnd) {
1894 return new(2) ExtractElementInst(Vec, Idx, NameStr, InsertAtEnd);
1895 }
1896
1897 /// Return true if an extractelement instruction can be
1898 /// formed with the specified operands.
1899 static bool isValidOperands(const Value *Vec, const Value *Idx);
1900
1901 Value *getVectorOperand() { return Op<0>(); }
1902 Value *getIndexOperand() { return Op<1>(); }
1903 const Value *getVectorOperand() const { return Op<0>(); }
1904 const Value *getIndexOperand() const { return Op<1>(); }
1905
1906 VectorType *getVectorOperandType() const {
1907 return cast<VectorType>(getVectorOperand()->getType());
1908 }
1909
1910 /// Transparently provide more efficient getOperand methods.
1911 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1912
1913 // Methods for support type inquiry through isa, cast, and dyn_cast:
1914 static bool classof(const Instruction *I) {
1915 return I->getOpcode() == Instruction::ExtractElement;
1916 }
1917 static bool classof(const Value *V) {
1918 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1919 }
1920};
1921
1922template <>
1923struct OperandTraits<ExtractElementInst> :
1924 public FixedNumOperandTraits<ExtractElementInst, 2> {
1925};
1926
1927DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ExtractElementInst, Value)ExtractElementInst::op_iterator ExtractElementInst::op_begin(
) { return OperandTraits<ExtractElementInst>::op_begin(
this); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_begin() const { return OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this)); }
ExtractElementInst::op_iterator ExtractElementInst::op_end()
{ return OperandTraits<ExtractElementInst>::op_end(this
); } ExtractElementInst::const_op_iterator ExtractElementInst
::op_end() const { return OperandTraits<ExtractElementInst
>::op_end(const_cast<ExtractElementInst*>(this)); } Value
*ExtractElementInst::getOperand(unsigned i_nocapture) const {
(static_cast <bool> (i_nocapture < OperandTraits<
ExtractElementInst>::operands(this) && "getOperand() out of range!"
) ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ExtractElementInst
>::op_begin(const_cast<ExtractElementInst*>(this))[i_nocapture
].get()); } void ExtractElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ExtractElementInst>::operands(this)
&& "setOperand() out of range!") ? void (0) : __assert_fail
("i_nocapture < OperandTraits<ExtractElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1927, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ExtractElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ExtractElementInst::getNumOperands
() const { return OperandTraits<ExtractElementInst>::operands
(this); } template <int Idx_nocapture> Use &ExtractElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ExtractElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1928
1929//===----------------------------------------------------------------------===//
1930// InsertElementInst Class
1931//===----------------------------------------------------------------------===//
1932
1933/// This instruction inserts a single (scalar)
1934/// element into a VectorType value
1935///
1936class InsertElementInst : public Instruction {
1937 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx,
1938 const Twine &NameStr = "",
1939 Instruction *InsertBefore = nullptr);
1940 InsertElementInst(Value *Vec, Value *NewElt, Value *Idx, const Twine &NameStr,
1941 BasicBlock *InsertAtEnd);
1942
1943protected:
1944 // Note: Instruction needs to be a friend here to call cloneImpl.
1945 friend class Instruction;
1946
1947 InsertElementInst *cloneImpl() const;
1948
1949public:
1950 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1951 const Twine &NameStr = "",
1952 Instruction *InsertBefore = nullptr) {
1953 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertBefore);
1954 }
1955
1956 static InsertElementInst *Create(Value *Vec, Value *NewElt, Value *Idx,
1957 const Twine &NameStr,
1958 BasicBlock *InsertAtEnd) {
1959 return new(3) InsertElementInst(Vec, NewElt, Idx, NameStr, InsertAtEnd);
1960 }
1961
1962 /// Return true if an insertelement instruction can be
1963 /// formed with the specified operands.
1964 static bool isValidOperands(const Value *Vec, const Value *NewElt,
1965 const Value *Idx);
1966
1967 /// Overload to return most specific vector type.
1968 ///
1969 VectorType *getType() const {
1970 return cast<VectorType>(Instruction::getType());
1971 }
1972
1973 /// Transparently provide more efficient getOperand methods.
1974 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
1975
1976 // Methods for support type inquiry through isa, cast, and dyn_cast:
1977 static bool classof(const Instruction *I) {
1978 return I->getOpcode() == Instruction::InsertElement;
1979 }
1980 static bool classof(const Value *V) {
1981 return isa<Instruction>(V) && classof(cast<Instruction>(V));
1982 }
1983};
1984
1985template <>
1986struct OperandTraits<InsertElementInst> :
1987 public FixedNumOperandTraits<InsertElementInst, 3> {
1988};
1989
1990DEFINE_TRANSPARENT_OPERAND_ACCESSORS(InsertElementInst, Value)InsertElementInst::op_iterator InsertElementInst::op_begin() {
return OperandTraits<InsertElementInst>::op_begin(this
); } InsertElementInst::const_op_iterator InsertElementInst::
op_begin() const { return OperandTraits<InsertElementInst>
::op_begin(const_cast<InsertElementInst*>(this)); } InsertElementInst
::op_iterator InsertElementInst::op_end() { return OperandTraits
<InsertElementInst>::op_end(this); } InsertElementInst::
const_op_iterator InsertElementInst::op_end() const { return OperandTraits
<InsertElementInst>::op_end(const_cast<InsertElementInst
*>(this)); } Value *InsertElementInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<InsertElementInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<InsertElementInst
>::op_begin(const_cast<InsertElementInst*>(this))[i_nocapture
].get()); } void InsertElementInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<InsertElementInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<InsertElementInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 1990, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<InsertElementInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned InsertElementInst::getNumOperands
() const { return OperandTraits<InsertElementInst>::operands
(this); } template <int Idx_nocapture> Use &InsertElementInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &InsertElementInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
1991
1992//===----------------------------------------------------------------------===//
1993// ShuffleVectorInst Class
1994//===----------------------------------------------------------------------===//
1995
1996constexpr int UndefMaskElem = -1;
1997
1998/// This instruction constructs a fixed permutation of two
1999/// input vectors.
2000///
2001/// For each element of the result vector, the shuffle mask selects an element
2002/// from one of the input vectors to copy to the result. Non-negative elements
2003/// in the mask represent an index into the concatenated pair of input vectors.
2004/// UndefMaskElem (-1) specifies that the result element is undefined.
2005///
2006/// For scalable vectors, all the elements of the mask must be 0 or -1. This
2007/// requirement may be relaxed in the future.
2008class ShuffleVectorInst : public Instruction {
2009 SmallVector<int, 4> ShuffleMask;
2010 Constant *ShuffleMaskForBitcode;
2011
2012protected:
2013 // Note: Instruction needs to be a friend here to call cloneImpl.
2014 friend class Instruction;
2015
2016 ShuffleVectorInst *cloneImpl() const;
2017
2018public:
2019 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr = "",
2020 Instruction *InsertBefore = nullptr);
2021 ShuffleVectorInst(Value *V1, Value *Mask, const Twine &NameStr,
2022 BasicBlock *InsertAtEnd);
2023 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr = "",
2024 Instruction *InsertBefore = nullptr);
2025 ShuffleVectorInst(Value *V1, ArrayRef<int> Mask, const Twine &NameStr,
2026 BasicBlock *InsertAtEnd);
2027 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2028 const Twine &NameStr = "",
2029 Instruction *InsertBefor = nullptr);
2030 ShuffleVectorInst(Value *V1, Value *V2, Value *Mask,
2031 const Twine &NameStr, BasicBlock *InsertAtEnd);
2032 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2033 const Twine &NameStr = "",
2034 Instruction *InsertBefor = nullptr);
2035 ShuffleVectorInst(Value *V1, Value *V2, ArrayRef<int> Mask,
2036 const Twine &NameStr, BasicBlock *InsertAtEnd);
2037
2038 void *operator new(size_t S) { return User::operator new(S, 2); }
2039 void operator delete(void *Ptr) { return User::operator delete(Ptr); }
2040
2041 /// Swap the operands and adjust the mask to preserve the semantics
2042 /// of the instruction.
2043 void commute();
2044
2045 /// Return true if a shufflevector instruction can be
2046 /// formed with the specified operands.
2047 static bool isValidOperands(const Value *V1, const Value *V2,
2048 const Value *Mask);
2049 static bool isValidOperands(const Value *V1, const Value *V2,
2050 ArrayRef<int> Mask);
2051
2052 /// Overload to return most specific vector type.
2053 ///
2054 VectorType *getType() const {
2055 return cast<VectorType>(Instruction::getType());
2056 }
2057
2058 /// Transparently provide more efficient getOperand methods.
2059 DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value)public: inline Value *getOperand(unsigned) const; inline void
setOperand(unsigned, Value*); inline op_iterator op_begin();
inline const_op_iterator op_begin() const; inline op_iterator
op_end(); inline const_op_iterator op_end() const; protected
: template <int> inline Use &Op(); template <int
> inline const Use &Op() const; public: inline unsigned
getNumOperands() const
;
2060
2061 /// Return the shuffle mask value of this instruction for the given element
2062 /// index. Return UndefMaskElem if the element is undef.
2063 int getMaskValue(unsigned Elt) const { return ShuffleMask[Elt]; }
2064
2065 /// Convert the input shuffle mask operand to a vector of integers. Undefined
2066 /// elements of the mask are returned as UndefMaskElem.
2067 static void getShuffleMask(const Constant *Mask,
2068 SmallVectorImpl<int> &Result);
2069
2070 /// Return the mask for this instruction as a vector of integers. Undefined
2071 /// elements of the mask are returned as UndefMaskElem.
2072 void getShuffleMask(SmallVectorImpl<int> &Result) const {
2073 Result.assign(ShuffleMask.begin(), ShuffleMask.end());
2074 }
2075
2076 /// Return the mask for this instruction, for use in bitcode.
2077 ///
2078 /// TODO: This is temporary until we decide a new bitcode encoding for
2079 /// shufflevector.
2080 Constant *getShuffleMaskForBitcode() const { return ShuffleMaskForBitcode; }
2081
2082 static Constant *convertShuffleMaskForBitcode(ArrayRef<int> Mask,
2083 Type *ResultTy);
2084
2085 void setShuffleMask(ArrayRef<int> Mask);
2086
2087 ArrayRef<int> getShuffleMask() const { return ShuffleMask; }
2088
2089 /// Return true if this shuffle returns a vector with a different number of
2090 /// elements than its source vectors.
2091 /// Examples: shufflevector <4 x n> A, <4 x n> B, <1,2,3>
2092 /// shufflevector <4 x n> A, <4 x n> B, <1,2,3,4,5>
2093 bool changesLength() const {
2094 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2095 ->getElementCount()
2096 .getKnownMinValue();
2097 unsigned NumMaskElts = ShuffleMask.size();
2098 return NumSourceElts != NumMaskElts;
2099 }
2100
2101 /// Return true if this shuffle returns a vector with a greater number of
2102 /// elements than its source vectors.
2103 /// Example: shufflevector <2 x n> A, <2 x n> B, <1,2,3>
2104 bool increasesLength() const {
2105 unsigned NumSourceElts = cast<VectorType>(Op<0>()->getType())
2106 ->getElementCount()
2107 .getKnownMinValue();
2108 unsigned NumMaskElts = ShuffleMask.size();
2109 return NumSourceElts < NumMaskElts;
2110 }
2111
2112 /// Return true if this shuffle mask chooses elements from exactly one source
2113 /// vector.
2114 /// Example: <7,5,undef,7>
2115 /// This assumes that vector operands are the same length as the mask.
2116 static bool isSingleSourceMask(ArrayRef<int> Mask);
2117 static bool isSingleSourceMask(const Constant *Mask) {
2118 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2118, __extension__ __PRETTY_FUNCTION__
))
;
2119 SmallVector<int, 16> MaskAsInts;
2120 getShuffleMask(Mask, MaskAsInts);
2121 return isSingleSourceMask(MaskAsInts);
2122 }
2123
2124 /// Return true if this shuffle chooses elements from exactly one source
2125 /// vector without changing the length of that vector.
2126 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,0,undef,3>
2127 /// TODO: Optionally allow length-changing shuffles.
2128 bool isSingleSource() const {
2129 return !changesLength() && isSingleSourceMask(ShuffleMask);
2130 }
2131
2132 /// Return true if this shuffle mask chooses elements from exactly one source
2133 /// vector without lane crossings. A shuffle using this mask is not
2134 /// necessarily a no-op because it may change the number of elements from its
2135 /// input vectors or it may provide demanded bits knowledge via undef lanes.
2136 /// Example: <undef,undef,2,3>
2137 static bool isIdentityMask(ArrayRef<int> Mask);
2138 static bool isIdentityMask(const Constant *Mask) {
2139 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2139, __extension__ __PRETTY_FUNCTION__
))
;
2140
2141 // Not possible to express a shuffle mask for a scalable vector for this
2142 // case.
2143 if (isa<ScalableVectorType>(Mask->getType()))
2144 return false;
2145
2146 SmallVector<int, 16> MaskAsInts;
2147 getShuffleMask(Mask, MaskAsInts);
2148 return isIdentityMask(MaskAsInts);
2149 }
2150
2151 /// Return true if this shuffle chooses elements from exactly one source
2152 /// vector without lane crossings and does not change the number of elements
2153 /// from its input vectors.
2154 /// Example: shufflevector <4 x n> A, <4 x n> B, <4,undef,6,undef>
2155 bool isIdentity() const {
2156 // Not possible to express a shuffle mask for a scalable vector for this
2157 // case.
2158 if (isa<ScalableVectorType>(getType()))
2159 return false;
2160
2161 return !changesLength() && isIdentityMask(ShuffleMask);
2162 }
2163
2164 /// Return true if this shuffle lengthens exactly one source vector with
2165 /// undefs in the high elements.
2166 bool isIdentityWithPadding() const;
2167
2168 /// Return true if this shuffle extracts the first N elements of exactly one
2169 /// source vector.
2170 bool isIdentityWithExtract() const;
2171
2172 /// Return true if this shuffle concatenates its 2 source vectors. This
2173 /// returns false if either input is undefined. In that case, the shuffle is
2174 /// is better classified as an identity with padding operation.
2175 bool isConcat() const;
2176
2177 /// Return true if this shuffle mask chooses elements from its source vectors
2178 /// without lane crossings. A shuffle using this mask would be
2179 /// equivalent to a vector select with a constant condition operand.
2180 /// Example: <4,1,6,undef>
2181 /// This returns false if the mask does not choose from both input vectors.
2182 /// In that case, the shuffle is better classified as an identity shuffle.
2183 /// This assumes that vector operands are the same length as the mask
2184 /// (a length-changing shuffle can never be equivalent to a vector select).
2185 static bool isSelectMask(ArrayRef<int> Mask);
2186 static bool isSelectMask(const Constant *Mask) {
2187 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2187, __extension__ __PRETTY_FUNCTION__
))
;
2188 SmallVector<int, 16> MaskAsInts;
2189 getShuffleMask(Mask, MaskAsInts);
2190 return isSelectMask(MaskAsInts);
2191 }
2192
2193 /// Return true if this shuffle chooses elements from its source vectors
2194 /// without lane crossings and all operands have the same number of elements.
2195 /// In other words, this shuffle is equivalent to a vector select with a
2196 /// constant condition operand.
2197 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,1,6,3>
2198 /// This returns false if the mask does not choose from both input vectors.
2199 /// In that case, the shuffle is better classified as an identity shuffle.
2200 /// TODO: Optionally allow length-changing shuffles.
2201 bool isSelect() const {
2202 return !changesLength() && isSelectMask(ShuffleMask);
2203 }
2204
2205 /// Return true if this shuffle mask swaps the order of elements from exactly
2206 /// one source vector.
2207 /// Example: <7,6,undef,4>
2208 /// This assumes that vector operands are the same length as the mask.
2209 static bool isReverseMask(ArrayRef<int> Mask);
2210 static bool isReverseMask(const Constant *Mask) {
2211 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2211, __extension__ __PRETTY_FUNCTION__
))
;
2212 SmallVector<int, 16> MaskAsInts;
2213 getShuffleMask(Mask, MaskAsInts);
2214 return isReverseMask(MaskAsInts);
2215 }
2216
2217 /// Return true if this shuffle swaps the order of elements from exactly
2218 /// one source vector.
2219 /// Example: shufflevector <4 x n> A, <4 x n> B, <3,undef,1,undef>
2220 /// TODO: Optionally allow length-changing shuffles.
2221 bool isReverse() const {
2222 return !changesLength() && isReverseMask(ShuffleMask);
2223 }
2224
2225 /// Return true if this shuffle mask chooses all elements with the same value
2226 /// as the first element of exactly one source vector.
2227 /// Example: <4,undef,undef,4>
2228 /// This assumes that vector operands are the same length as the mask.
2229 static bool isZeroEltSplatMask(ArrayRef<int> Mask);
2230 static bool isZeroEltSplatMask(const Constant *Mask) {
2231 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2231, __extension__ __PRETTY_FUNCTION__
))
;
2232 SmallVector<int, 16> MaskAsInts;
2233 getShuffleMask(Mask, MaskAsInts);
2234 return isZeroEltSplatMask(MaskAsInts);
2235 }
2236
2237 /// Return true if all elements of this shuffle are the same value as the
2238 /// first element of exactly one source vector without changing the length
2239 /// of that vector.
2240 /// Example: shufflevector <4 x n> A, <4 x n> B, <undef,0,undef,0>
2241 /// TODO: Optionally allow length-changing shuffles.
2242 /// TODO: Optionally allow splats from other elements.
2243 bool isZeroEltSplat() const {
2244 return !changesLength() && isZeroEltSplatMask(ShuffleMask);
2245 }
2246
2247 /// Return true if this shuffle mask is a transpose mask.
2248 /// Transpose vector masks transpose a 2xn matrix. They read corresponding
2249 /// even- or odd-numbered vector elements from two n-dimensional source
2250 /// vectors and write each result into consecutive elements of an
2251 /// n-dimensional destination vector. Two shuffles are necessary to complete
2252 /// the transpose, one for the even elements and another for the odd elements.
2253 /// This description closely follows how the TRN1 and TRN2 AArch64
2254 /// instructions operate.
2255 ///
2256 /// For example, a simple 2x2 matrix can be transposed with:
2257 ///
2258 /// ; Original matrix
2259 /// m0 = < a, b >
2260 /// m1 = < c, d >
2261 ///
2262 /// ; Transposed matrix
2263 /// t0 = < a, c > = shufflevector m0, m1, < 0, 2 >
2264 /// t1 = < b, d > = shufflevector m0, m1, < 1, 3 >
2265 ///
2266 /// For matrices having greater than n columns, the resulting nx2 transposed
2267 /// matrix is stored in two result vectors such that one vector contains
2268 /// interleaved elements from all the even-numbered rows and the other vector
2269 /// contains interleaved elements from all the odd-numbered rows. For example,
2270 /// a 2x4 matrix can be transposed with:
2271 ///
2272 /// ; Original matrix
2273 /// m0 = < a, b, c, d >
2274 /// m1 = < e, f, g, h >
2275 ///
2276 /// ; Transposed matrix
2277 /// t0 = < a, e, c, g > = shufflevector m0, m1 < 0, 4, 2, 6 >
2278 /// t1 = < b, f, d, h > = shufflevector m0, m1 < 1, 5, 3, 7 >
2279 static bool isTransposeMask(ArrayRef<int> Mask);
2280 static bool isTransposeMask(const Constant *Mask) {
2281 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2281, __extension__ __PRETTY_FUNCTION__
))
;
2282 SmallVector<int, 16> MaskAsInts;
2283 getShuffleMask(Mask, MaskAsInts);
2284 return isTransposeMask(MaskAsInts);
2285 }
2286
2287 /// Return true if this shuffle transposes the elements of its inputs without
2288 /// changing the length of the vectors. This operation may also be known as a
2289 /// merge or interleave. See the description for isTransposeMask() for the
2290 /// exact specification.
2291 /// Example: shufflevector <4 x n> A, <4 x n> B, <0,4,2,6>
2292 bool isTranspose() const {
2293 return !changesLength() && isTransposeMask(ShuffleMask);
2294 }
2295
2296 /// Return true if this shuffle mask is a splice mask, concatenating the two
2297 /// inputs together and then extracts an original width vector starting from
2298 /// the splice index.
2299 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2300 static bool isSpliceMask(ArrayRef<int> Mask, int &Index);
2301 static bool isSpliceMask(const Constant *Mask, int &Index) {
2302 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2302, __extension__ __PRETTY_FUNCTION__
))
;
2303 SmallVector<int, 16> MaskAsInts;
2304 getShuffleMask(Mask, MaskAsInts);
2305 return isSpliceMask(MaskAsInts, Index);
2306 }
2307
2308 /// Return true if this shuffle splices two inputs without changing the length
2309 /// of the vectors. This operation concatenates the two inputs together and
2310 /// then extracts an original width vector starting from the splice index.
2311 /// Example: shufflevector <4 x n> A, <4 x n> B, <1,2,3,4>
2312 bool isSplice(int &Index) const {
2313 return !changesLength() && isSpliceMask(ShuffleMask, Index);
2314 }
2315
2316 /// Return true if this shuffle mask is an extract subvector mask.
2317 /// A valid extract subvector mask returns a smaller vector from a single
2318 /// source operand. The base extraction index is returned as well.
2319 static bool isExtractSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2320 int &Index);
2321 static bool isExtractSubvectorMask(const Constant *Mask, int NumSrcElts,
2322 int &Index) {
2323 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2323, __extension__ __PRETTY_FUNCTION__
))
;
2324 // Not possible to express a shuffle mask for a scalable vector for this
2325 // case.
2326 if (isa<ScalableVectorType>(Mask->getType()))
2327 return false;
2328 SmallVector<int, 16> MaskAsInts;
2329 getShuffleMask(Mask, MaskAsInts);
2330 return isExtractSubvectorMask(MaskAsInts, NumSrcElts, Index);
2331 }
2332
2333 /// Return true if this shuffle mask is an extract subvector mask.
2334 bool isExtractSubvectorMask(int &Index) const {
2335 // Not possible to express a shuffle mask for a scalable vector for this
2336 // case.
2337 if (isa<ScalableVectorType>(getType()))
2338 return false;
2339
2340 int NumSrcElts =
2341 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2342 return isExtractSubvectorMask(ShuffleMask, NumSrcElts, Index);
2343 }
2344
2345 /// Return true if this shuffle mask is an insert subvector mask.
2346 /// A valid insert subvector mask inserts the lowest elements of a second
2347 /// source operand into an in-place first source operand operand.
2348 /// Both the sub vector width and the insertion index is returned.
2349 static bool isInsertSubvectorMask(ArrayRef<int> Mask, int NumSrcElts,
2350 int &NumSubElts, int &Index);
2351 static bool isInsertSubvectorMask(const Constant *Mask, int NumSrcElts,
2352 int &NumSubElts, int &Index) {
2353 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2353, __extension__ __PRETTY_FUNCTION__
))
;
2354 // Not possible to express a shuffle mask for a scalable vector for this
2355 // case.
2356 if (isa<ScalableVectorType>(Mask->getType()))
2357 return false;
2358 SmallVector<int, 16> MaskAsInts;
2359 getShuffleMask(Mask, MaskAsInts);
2360 return isInsertSubvectorMask(MaskAsInts, NumSrcElts, NumSubElts, Index);
2361 }
2362
2363 /// Return true if this shuffle mask is an insert subvector mask.
2364 bool isInsertSubvectorMask(int &NumSubElts, int &Index) const {
2365 // Not possible to express a shuffle mask for a scalable vector for this
2366 // case.
2367 if (isa<ScalableVectorType>(getType()))
2368 return false;
2369
2370 int NumSrcElts =
2371 cast<FixedVectorType>(Op<0>()->getType())->getNumElements();
2372 return isInsertSubvectorMask(ShuffleMask, NumSrcElts, NumSubElts, Index);
2373 }
2374
2375 /// Return true if this shuffle mask replicates each of the \p VF elements
2376 /// in a vector \p ReplicationFactor times.
2377 /// For example, the mask for \p ReplicationFactor=3 and \p VF=4 is:
2378 /// <0,0,0,1,1,1,2,2,2,3,3,3>
2379 static bool isReplicationMask(ArrayRef<int> Mask, int &ReplicationFactor,
2380 int &VF);
2381 static bool isReplicationMask(const Constant *Mask, int &ReplicationFactor,
2382 int &VF) {
2383 assert(Mask->getType()->isVectorTy() && "Shuffle needs vector constant.")(static_cast <bool> (Mask->getType()->isVectorTy(
) && "Shuffle needs vector constant.") ? void (0) : __assert_fail
("Mask->getType()->isVectorTy() && \"Shuffle needs vector constant.\""
, "llvm/include/llvm/IR/Instructions.h", 2383, __extension__ __PRETTY_FUNCTION__
))
;
2384 // Not possible to express a shuffle mask for a scalable vector for this
2385 // case.
2386 if (isa<ScalableVectorType>(Mask->getType()))
2387 return false;
2388 SmallVector<int, 16> MaskAsInts;
2389 getShuffleMask(Mask, MaskAsInts);
2390 return isReplicationMask(MaskAsInts, ReplicationFactor, VF);
2391 }
2392
2393 /// Return true if this shuffle mask is a replication mask.
2394 bool isReplicationMask(int &ReplicationFactor, int &VF) const;
2395
2396 /// Change values in a shuffle permute mask assuming the two vector operands
2397 /// of length InVecNumElts have swapped position.
2398 static void commuteShuffleMask(MutableArrayRef<int> Mask,
2399 unsigned InVecNumElts) {
2400 for (int &Idx : Mask) {
2401 if (Idx == -1)
2402 continue;
2403 Idx = Idx < (int)InVecNumElts ? Idx + InVecNumElts : Idx - InVecNumElts;
2404 assert(Idx >= 0 && Idx < (int)InVecNumElts * 2 &&(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2405, __extension__ __PRETTY_FUNCTION__
))
2405 "shufflevector mask index out of range")(static_cast <bool> (Idx >= 0 && Idx < (int
)InVecNumElts * 2 && "shufflevector mask index out of range"
) ? void (0) : __assert_fail ("Idx >= 0 && Idx < (int)InVecNumElts * 2 && \"shufflevector mask index out of range\""
, "llvm/include/llvm/IR/Instructions.h", 2405, __extension__ __PRETTY_FUNCTION__
))
;
2406 }
2407 }
2408
2409 // Methods for support type inquiry through isa, cast, and dyn_cast:
2410 static bool classof(const Instruction *I) {
2411 return I->getOpcode() == Instruction::ShuffleVector;
2412 }
2413 static bool classof(const Value *V) {
2414 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2415 }
2416};
2417
2418template <>
2419struct OperandTraits<ShuffleVectorInst>
2420 : public FixedNumOperandTraits<ShuffleVectorInst, 2> {};
2421
2422DEFINE_TRANSPARENT_OPERAND_ACCESSORS(ShuffleVectorInst, Value)ShuffleVectorInst::op_iterator ShuffleVectorInst::op_begin() {
return OperandTraits<ShuffleVectorInst>::op_begin(this
); } ShuffleVectorInst::const_op_iterator ShuffleVectorInst::
op_begin() const { return OperandTraits<ShuffleVectorInst>
::op_begin(const_cast<ShuffleVectorInst*>(this)); } ShuffleVectorInst
::op_iterator ShuffleVectorInst::op_end() { return OperandTraits
<ShuffleVectorInst>::op_end(this); } ShuffleVectorInst::
const_op_iterator ShuffleVectorInst::op_end() const { return OperandTraits
<ShuffleVectorInst>::op_end(const_cast<ShuffleVectorInst
*>(this)); } Value *ShuffleVectorInst::getOperand(unsigned
i_nocapture) const { (static_cast <bool> (i_nocapture <
OperandTraits<ShuffleVectorInst>::operands(this) &&
"getOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"getOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2422, __extension__ __PRETTY_FUNCTION__
)); return cast_or_null<Value>( OperandTraits<ShuffleVectorInst
>::op_begin(const_cast<ShuffleVectorInst*>(this))[i_nocapture
].get()); } void ShuffleVectorInst::setOperand(unsigned i_nocapture
, Value *Val_nocapture) { (static_cast <bool> (i_nocapture
< OperandTraits<ShuffleVectorInst>::operands(this) &&
"setOperand() out of range!") ? void (0) : __assert_fail ("i_nocapture < OperandTraits<ShuffleVectorInst>::operands(this) && \"setOperand() out of range!\""
, "llvm/include/llvm/IR/Instructions.h", 2422, __extension__ __PRETTY_FUNCTION__
)); OperandTraits<ShuffleVectorInst>::op_begin(this)[i_nocapture
] = Val_nocapture; } unsigned ShuffleVectorInst::getNumOperands
() const { return OperandTraits<ShuffleVectorInst>::operands
(this); } template <int Idx_nocapture> Use &ShuffleVectorInst
::Op() { return this->OpFrom<Idx_nocapture>(this); }
template <int Idx_nocapture> const Use &ShuffleVectorInst
::Op() const { return this->OpFrom<Idx_nocapture>(this
); }
2423
2424//===----------------------------------------------------------------------===//
2425// ExtractValueInst Class
2426//===----------------------------------------------------------------------===//
2427
2428/// This instruction extracts a struct member or array
2429/// element value from an aggregate value.
2430///
2431class ExtractValueInst : public UnaryInstruction {
2432 SmallVector<unsigned, 4> Indices;
2433
2434 ExtractValueInst(const ExtractValueInst &EVI);
2435
2436 /// Constructors - Create a extractvalue instruction with a base aggregate
2437 /// value and a list of indices. The first ctor can optionally insert before
2438 /// an existing instruction, the second appends the new instruction to the
2439 /// specified BasicBlock.
2440 inline ExtractValueInst(Value *Agg,
2441 ArrayRef<unsigned> Idxs,
2442 const Twine &NameStr,
2443 Instruction *InsertBefore);
2444 inline ExtractValueInst(Value *Agg,
2445 ArrayRef<unsigned> Idxs,
2446 const Twine &NameStr, BasicBlock *InsertAtEnd);
2447
2448 void init(ArrayRef<unsigned> Idxs, const Twine &NameStr);
2449
2450protected:
2451 // Note: Instruction needs to be a friend here to call cloneImpl.
2452 friend class Instruction;
2453
2454 ExtractValueInst *cloneImpl() const;
2455
2456public:
2457 static ExtractValueInst *Create(Value *Agg,
2458 ArrayRef<unsigned> Idxs,
2459 const Twine &NameStr = "",
2460 Instruction *InsertBefore = nullptr) {
2461 return new
2462 ExtractValueInst(Agg, Idxs, NameStr, InsertBefore);
2463 }
2464
2465 static ExtractValueInst *Create(Value *Agg,
2466 ArrayRef<unsigned> Idxs,
2467 const Twine &NameStr,
2468 BasicBlock *InsertAtEnd) {
2469 return new ExtractValueInst(Agg, Idxs, NameStr, InsertAtEnd);
2470 }
2471
2472 /// Returns the type of the element that would be extracted
2473 /// with an extractvalue instruction with the specified parameters.
2474 ///
2475 /// Null is returned if the indices are invalid for the specified type.
2476 static Type *getIndexedType(Type *Agg, ArrayRef<unsigned> Idxs);
2477
2478 using idx_iterator = const unsigned*;
2479
2480 inline idx_iterator idx_begin() const { return Indices.begin(); }
2481 inline idx_iterator idx_end() const { return Indices.end(); }
2482 inline iterator_range<idx_iterator> indices() const {
2483 return make_range(idx_begin(), idx_end());
2484 }
2485
2486 Value *getAggregateOperand() {
2487 return getOperand(0);
2488 }
2489 const Value *getAggregateOperand() const {
2490 return getOperand(0);
2491 }
2492 static unsigned getAggregateOperandIndex() {
2493 return 0U; // get index for modifying correct operand
2494 }
2495
2496 ArrayRef<unsigned> getIndices() const {
2497 return Indices;
2498 }
2499
2500 unsigned getNumIndices() const {
2501 return (unsigned)Indices.size();
2502 }
2503
2504 bool hasIndices() const {
2505 return true;
2506 }
2507
2508 // Methods for support type inquiry through isa, cast, and dyn_cast:
2509 static bool classof(const Instruction *I) {
2510 return I->getOpcode() == Instruction::ExtractValue;
2511 }
2512 static bool classof(const Value *V) {
2513 return isa<Instruction>(V) && classof(cast<Instruction>(V));
2514 }
2515};
2516
2517ExtractValueInst::ExtractValueInst(Value *Agg,
2518 ArrayRef<unsigned> Idxs,
2519 const Twine &NameStr,
2520 Instruction *InsertBefore)
2521 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2522 ExtractValue, Agg, InsertBefore) {
2523 init(Idxs, NameStr);
2524}
2525
2526ExtractValueInst::ExtractValueInst(Value *Agg,
2527 ArrayRef<unsigned> Idxs,
2528 const Twine &NameStr,
2529 BasicBlock *InsertAtEnd)
2530 : UnaryInstruction(checkGEPType(getIndexedType(Agg->getType(), Idxs)),
2531 ExtractValue, Agg, InsertAtEnd) {
2532 init(Idxs, NameStr);
2533}
2534
2535//===----------------------------------------------------------------------===//
2536// InsertValueInst Class
2537//===----------------------------------------------------------------------===//
2538
2539/// This instruction inserts a struct field of array element
2540/// value into an aggregate value.
2541///
2542class InsertValueInst : public Instruction {
2543 SmallVector<unsigned, 4> Indices;
2544
2545 InsertValueInst(const InsertValueInst &IVI);
2546
2547 /// Constructors - Create a insertvalue instruction with a base aggregate
2548 /// value, a value to insert, and a list of indices. The first ctor can
2549 /// optionally insert before an existing instruction, the second appends
2550 /// the new instruction to the specified BasicBlock.
2551 inline InsertValueInst(Value *Agg, Value *Val,
2552 ArrayRef<unsigned> Idxs,
2553 const Twine &NameStr,
2554 Instruction *InsertBefore);
2555 inline InsertValueInst(Value *Agg, Value *Val,
2556 ArrayRef<unsigned> Idxs,
2557 const Twine &NameStr, BasicBlock *InsertAtEnd);
2558
2559 /// Constructors - These two constructors are convenience methods because one
2560 /// and two index insertvalue instructions are so common.
2561 InsertValueInst(Value *Agg, Value *Val, unsigned Idx,
2562 const Twine &NameStr = "",
2563 Instruction *InsertBefore = nullptr);
2564 InsertValueInst(Value *Agg, Value *Val, unsigned Idx, const Twine &NameStr,
2565 BasicBlock *InsertAtEnd);
2566
2567 void init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
2568 const Twine &NameStr);
2569
2570protected:
2571 // Note: Instruction needs to be a friend here to call cloneImpl.
2572 friend class Instruction;
2573
2574 InsertValueInst *cloneImpl() const;
2575
2576public:
2577 // allocate space for exactly two operands
2578 void *operator new(size_t S) { return User::operator new(S, 2); }
2579 void operator delete(void *Ptr) { User::operator delete(Ptr); }
2580