Bug Summary

File:llvm/include/llvm/IR/IRBuilder.h
Warning:line 2637, column 23
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86LowerAMXType.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/Target/X86 -resource-dir /usr/lib/llvm-13/lib/clang/13.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/X86 -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/include -I /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-13/lib/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/build-llvm/lib/Target/X86 -fdebug-prefix-map=/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7=. -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -stack-protector 2 -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-06-13-111025-38230-1 -x c++ /build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/X86/X86LowerAMXType.cpp

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/X86/X86LowerAMXType.cpp

1//===- Target/X86/X86LowerAMXType.cpp - -------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file Pass to transform <256 x i32> load/store
10/// <256 x i32> is bitcasted to x86_amx on X86, and AMX instruction set only
11/// provides simple operation on x86_amx. The basic elementwise operation
12/// is not supported by AMX. Since x86_amx is bitcasted from vector <256 x i32>
13/// and only AMX intrinsics can operate on the type, we need transform
14/// load/store <256 x i32> instruction to AMX load/store. If the bitcast can
15/// not be combined with load/store, we transform the bitcast to amx load/store
16/// and <256 x i32> store/load.
17///
18/// If Front End not use O0 but the Mid/Back end use O0, (e.g. "Clang -O2 -S
19/// -emit-llvm t.c" + "llc t.ll") we should make sure the amx data is volatile,
20/// because that is necessary for AMX fast register allocation. (In Fast
21/// registera allocation, register will be allocated before spill/reload, so
22/// there is no additional register for amx to identify the step in spill.)
23/// The volatileTileData() will handle this case.
24/// e.g.
25/// ----------------------------------------------------------
26/// | def %td = ... |
27/// | ... |
28/// | "use %td" |
29/// ----------------------------------------------------------
30/// will transfer to -->
31/// ----------------------------------------------------------
32/// | def %td = ... |
33/// | call void @llvm.x86.tilestored64.internal(mem, %td) |
34/// | ... |
35/// | %td2 = call x86_amx @llvm.x86.tileloadd64.internal(mem)|
36/// | "use %td2" |
37/// ----------------------------------------------------------
38//
39//===----------------------------------------------------------------------===//
40//
41#include "X86.h"
42#include "llvm/ADT/PostOrderIterator.h"
43#include "llvm/ADT/SmallSet.h"
44#include "llvm/Analysis/OptimizationRemarkEmitter.h"
45#include "llvm/Analysis/TargetTransformInfo.h"
46#include "llvm/CodeGen/Passes.h"
47#include "llvm/CodeGen/TargetPassConfig.h"
48#include "llvm/CodeGen/ValueTypes.h"
49#include "llvm/IR/DataLayout.h"
50#include "llvm/IR/Function.h"
51#include "llvm/IR/IRBuilder.h"
52#include "llvm/IR/Instructions.h"
53#include "llvm/IR/IntrinsicInst.h"
54#include "llvm/IR/IntrinsicsX86.h"
55#include "llvm/IR/PatternMatch.h"
56#include "llvm/InitializePasses.h"
57#include "llvm/Pass.h"
58#include "llvm/Target/TargetMachine.h"
59
60using namespace llvm;
61using namespace PatternMatch;
62
63#define DEBUG_TYPE"lower-amx-type" "lower-amx-type"
64
65static AllocaInst *createAllocaInstAtEntry(IRBuilder<> &Builder,
66 BasicBlock *BB) {
67 Function &F = *BB->getParent();
68 Module *M = BB->getModule();
69 const DataLayout &DL = M->getDataLayout();
70
71 Type *V256I32Ty = VectorType::get(Builder.getInt32Ty(), 256, false);
72 LLVMContext &Ctx = Builder.getContext();
73 auto AllocaAlignment = DL.getPrefTypeAlign(Type::getX86_AMXTy(Ctx));
74 unsigned AllocaAS = DL.getAllocaAddrSpace();
75 AllocaInst *AllocaRes =
76 new AllocaInst(V256I32Ty, AllocaAS, "", &F.getEntryBlock().front());
77 AllocaRes->setAlignment(AllocaAlignment);
78 return AllocaRes;
79}
80
81namespace {
82class X86LowerAMXType {
83 Function &Func;
84 TargetMachine *TM = nullptr;
85
86 // In AMX intrinsics we let Shape = {Row, Col}, but the
87 // RealCol = Col / ElementSize. We may use the RealCol
88 // as a new Row for other new created AMX intrinsics.
89 std::map<Value *, Value *> Col2Row;
90
91public:
92 X86LowerAMXType(Function &F, TargetMachine *TargetM) : Func(F), TM(TargetM) {}
93 bool visit();
94 void combineLoadBitcast(LoadInst *LD, BitCastInst *Bitcast);
95 void combineBitcastStore(BitCastInst *Bitcast, StoreInst *ST);
96 bool transformBitcast(BitCastInst *Bitcast);
97 std::pair<Value *, Value *> getShape(IntrinsicInst *II, unsigned OpNo);
98 Value *getRowFromCol(Instruction *II, Value *V, unsigned Granularity);
99};
100
101Value *X86LowerAMXType::getRowFromCol(Instruction *II, Value *V,
102 unsigned Granularity) {
103 if (Col2Row.count(V))
104 return Col2Row[V];
105 IRBuilder<> Builder(&*II->getParent()->getFirstInsertionPt());
106 if (auto *I = dyn_cast<Instruction>(V)) {
107 BasicBlock::iterator Iter = I->getIterator();
108 ++Iter;
109 Builder.SetInsertPoint(&*Iter);
110 }
111 ConstantInt *Gran = Builder.getInt16(Granularity);
112 Value *RealRow = Builder.CreateUDiv(V, Gran);
113 Col2Row[V] = RealRow;
114 return RealRow;
115}
116
117std::pair<Value *, Value *> X86LowerAMXType::getShape(IntrinsicInst *II,
118 unsigned OpNo) {
119 Value *Row = nullptr, *Col = nullptr;
120 switch (II->getIntrinsicID()) {
121 default:
122 llvm_unreachable("Expect amx intrinsics")::llvm::llvm_unreachable_internal("Expect amx intrinsics", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 122)
;
123 case Intrinsic::x86_tileloadd64_internal:
124 case Intrinsic::x86_tileloaddt164_internal:
125 case Intrinsic::x86_tilestored64_internal: {
126 Row = II->getArgOperand(0);
127 Col = II->getArgOperand(1);
128 break;
129 }
130 // a * b + c
131 // The shape depends on which operand.
132 case Intrinsic::x86_tdpbssd_internal:
133 case Intrinsic::x86_tdpbsud_internal:
134 case Intrinsic::x86_tdpbusd_internal:
135 case Intrinsic::x86_tdpbuud_internal:
136 case Intrinsic::x86_tdpbf16ps_internal: {
137 switch (OpNo) {
138 case 3:
139 Row = II->getArgOperand(0);
140 Col = II->getArgOperand(1);
141 break;
142 case 4:
143 Row = II->getArgOperand(0);
144 Col = II->getArgOperand(2);
145 break;
146 case 5:
147 Row = II->getArgOperand(2);
148 // FIXME: There is a design bug for AMX shape, which the Col should be
149 // Col/4 if it will be used as Row, but current Greedy RA can't handle
150 // this case well, it may failed if we generate a new Shape definition.
151 // So Let's just do it in O0 first.
152 // Row = Row / 4
153 if (TM->getOptLevel() == CodeGenOpt::None)
154 Row = getRowFromCol(II, Row, 4);
155 Col = II->getArgOperand(1);
156 break;
157 }
158 break;
159 }
160 }
161
162 return std::make_pair(Row, Col);
163}
164
165// %src = load <256 x i32>, <256 x i32>* %addr, align 64
166// %2 = bitcast <256 x i32> %src to x86_amx
167// -->
168// %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col,
169// i8* %addr, i64 %stride64)
170void X86LowerAMXType::combineLoadBitcast(LoadInst *LD, BitCastInst *Bitcast) {
171 Value *Row = nullptr, *Col = nullptr;
172 Use &U = *(Bitcast->use_begin());
173 unsigned OpNo = U.getOperandNo();
174 auto *II = cast<IntrinsicInst>(U.getUser());
175 std::tie(Row, Col) = getShape(II, OpNo);
176 IRBuilder<> Builder(Bitcast);
177 // Use the maximun column as stride.
178 Value *Stride = Builder.getInt64(64);
179 Value *I8Ptr =
180 Builder.CreateBitCast(LD->getOperand(0), Builder.getInt8PtrTy());
181 std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride};
182
183 Value *NewInst =
184 Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, None, Args);
185 Bitcast->replaceAllUsesWith(NewInst);
186}
187
188// %src = call x86_amx @llvm.x86.tileloadd64.internal(%row, %col, %addr,
189// %stride);
190// %13 = bitcast x86_amx %src to <256 x i32>
191// store <256 x i32> %13, <256 x i32>* %addr, align 64
192// -->
193// call void @llvm.x86.tilestored64.internal(%row, %col, %addr,
194// %stride64, %13)
195void X86LowerAMXType::combineBitcastStore(BitCastInst *Bitcast, StoreInst *ST) {
196
197 Value *Tile = Bitcast->getOperand(0);
198 auto *II = cast<IntrinsicInst>(Tile);
199 // Tile is output from AMX intrinsic. The first operand of the
200 // intrinsic is row, the second operand of the intrinsic is column.
201 Value *Row = II->getOperand(0);
202 Value *Col = II->getOperand(1);
203 IRBuilder<> Builder(ST);
204 // Use the maximum column as stride. It must be the same with load
205 // stride.
206 Value *Stride = Builder.getInt64(64);
207 Value *I8Ptr =
208 Builder.CreateBitCast(ST->getOperand(1), Builder.getInt8PtrTy());
209 std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Tile};
210 Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args);
211 if (Bitcast->hasOneUse())
212 return;
213 // %13 = bitcast x86_amx %src to <256 x i32>
214 // store <256 x i32> %13, <256 x i32>* %addr, align 64
215 // %add = <256 x i32> %13, <256 x i32> %src2
216 // -->
217 // %13 = bitcast x86_amx %src to <256 x i32>
218 // call void @llvm.x86.tilestored64.internal(%row, %col, %addr,
219 // %stride64, %13)
220 // %14 = load <256 x i32>, %addr
221 // %add = <256 x i32> %14, <256 x i32> %src2
222 Value *Vec = Builder.CreateLoad(Bitcast->getType(), ST->getOperand(1));
223 Bitcast->replaceAllUsesWith(Vec);
224}
225
226// transform bitcast to <store, load> instructions.
227bool X86LowerAMXType::transformBitcast(BitCastInst *Bitcast) {
228 IRBuilder<> Builder(Bitcast);
229 AllocaInst *AllocaAddr;
230 Value *I8Ptr, *Stride;
231 auto *Src = Bitcast->getOperand(0);
232
233 auto Prepare = [&]() {
234 AllocaAddr = createAllocaInstAtEntry(Builder, Bitcast->getParent());
235 I8Ptr = Builder.CreateBitCast(AllocaAddr, Builder.getInt8PtrTy());
236 Stride = Builder.getInt64(64);
237 };
238
239 if (Bitcast->getType()->isX86_AMXTy()) {
240 // %2 = bitcast <256 x i32> %src to x86_amx
241 // -->
242 // %addr = alloca <256 x i32>, align 64
243 // store <256 x i32> %src, <256 x i32>* %addr, align 64
244 // %addr2 = bitcast <256 x i32>* to i8*
245 // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col,
246 // i8* %addr2,
247 // i64 64)
248 Use &U = *(Bitcast->use_begin());
249 unsigned OpNo = U.getOperandNo();
250 auto *II = dyn_cast<IntrinsicInst>(U.getUser());
251 if (!II)
252 return false; // May be bitcast from x86amx to <256 x i32>.
253 Prepare();
254 Builder.CreateStore(Src, AllocaAddr);
255 // TODO we can pick an constant operand for the shape.
256 Value *Row = nullptr, *Col = nullptr;
257 std::tie(Row, Col) = getShape(II, OpNo);
258 std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride};
259 Value *NewInst = Builder.CreateIntrinsic(
260 Intrinsic::x86_tileloadd64_internal, None, Args);
261 Bitcast->replaceAllUsesWith(NewInst);
262 } else {
263 // %2 = bitcast x86_amx %src to <256 x i32>
264 // -->
265 // %addr = alloca <256 x i32>, align 64
266 // %addr2 = bitcast <256 x i32>* to i8*
267 // call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col,
268 // i8* %addr2, i64 %stride)
269 // %2 = load <256 x i32>, <256 x i32>* %addr, align 64
270 auto *II = dyn_cast<IntrinsicInst>(Src);
271 if (!II)
272 return false; // May be bitcast from <256 x i32> to x86amx.
273 Prepare();
274 Value *Row = II->getOperand(0);
275 Value *Col = II->getOperand(1);
276 std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Src};
277 Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args);
278 Value *NewInst = Builder.CreateLoad(Bitcast->getType(), AllocaAddr);
279 Bitcast->replaceAllUsesWith(NewInst);
280 }
281
282 return true;
283}
284
285bool X86LowerAMXType::visit() {
286 SmallVector<Instruction *, 8> DeadInsts;
287 Col2Row.clear();
288
289 for (BasicBlock *BB : post_order(&Func)) {
290 for (BasicBlock::reverse_iterator II = BB->rbegin(), IE = BB->rend();
291 II != IE;) {
292 Instruction &Inst = *II++;
293 auto *Bitcast = dyn_cast<BitCastInst>(&Inst);
294 if (!Bitcast)
295 continue;
296
297 Value *Src = Bitcast->getOperand(0);
298 if (Bitcast->getType()->isX86_AMXTy()) {
299 if (Bitcast->user_empty()) {
300 DeadInsts.push_back(Bitcast);
301 continue;
302 }
303 LoadInst *LD = dyn_cast<LoadInst>(Src);
304 if (!LD) {
305 if (transformBitcast(Bitcast))
306 DeadInsts.push_back(Bitcast);
307 continue;
308 }
309 // If load has mutli-user, duplicate a vector load.
310 // %src = load <256 x i32>, <256 x i32>* %addr, align 64
311 // %2 = bitcast <256 x i32> %src to x86_amx
312 // %add = add <256 x i32> %src, <256 x i32> %src2
313 // -->
314 // %src = load <256 x i32>, <256 x i32>* %addr, align 64
315 // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col,
316 // i8* %addr, i64 %stride64)
317 // %add = add <256 x i32> %src, <256 x i32> %src2
318
319 // If load has one user, the load will be eliminated in DAG ISel.
320 // %src = load <256 x i32>, <256 x i32>* %addr, align 64
321 // %2 = bitcast <256 x i32> %src to x86_amx
322 // -->
323 // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col,
324 // i8* %addr, i64 %stride64)
325 combineLoadBitcast(LD, Bitcast);
326 DeadInsts.push_back(Bitcast);
327 if (LD->hasOneUse())
328 DeadInsts.push_back(LD);
329 } else if (Src->getType()->isX86_AMXTy()) {
330 if (Bitcast->user_empty()) {
331 DeadInsts.push_back(Bitcast);
332 continue;
333 }
334 StoreInst *ST = nullptr;
335 for (auto UI = Bitcast->use_begin(), UE = Bitcast->use_end();
336 UI != UE;) {
337 Value *I = (UI++)->getUser();
338 ST = dyn_cast<StoreInst>(I);
339 if (ST)
340 break;
341 }
342 if (!ST) {
343 if (transformBitcast(Bitcast))
344 DeadInsts.push_back(Bitcast);
345 continue;
346 }
347 // If bitcast (%13) has one use, combine bitcast and store to amx store.
348 // %src = call x86_amx @llvm.x86.tileloadd64.internal(%row, %col, %addr,
349 // %stride);
350 // %13 = bitcast x86_amx %src to <256 x i32>
351 // store <256 x i32> %13, <256 x i32>* %addr, align 64
352 // -->
353 // call void @llvm.x86.tilestored64.internal(%row, %col, %addr,
354 // %stride64, %13)
355 //
356 // If bitcast (%13) has multi-use, transform as below.
357 // %13 = bitcast x86_amx %src to <256 x i32>
358 // store <256 x i32> %13, <256 x i32>* %addr, align 64
359 // %add = <256 x i32> %13, <256 x i32> %src2
360 // -->
361 // %13 = bitcast x86_amx %src to <256 x i32>
362 // call void @llvm.x86.tilestored64.internal(%row, %col, %addr,
363 // %stride64, %13)
364 // %14 = load <256 x i32>, %addr
365 // %add = <256 x i32> %14, <256 x i32> %src2
366 //
367 combineBitcastStore(Bitcast, ST);
368 // Delete user first.
369 DeadInsts.push_back(ST);
370 DeadInsts.push_back(Bitcast);
371 }
372 }
373 }
374
375 bool C = !DeadInsts.empty();
376
377 for (auto *Inst : DeadInsts)
378 Inst->eraseFromParent();
379
380 return C;
381}
382} // anonymous namespace
383
384static Value *getAllocaPos(BasicBlock *BB) {
385 Module *M = BB->getModule();
386 Function *F = BB->getParent();
387 IRBuilder<> Builder(&F->getEntryBlock().front());
388 const DataLayout &DL = M->getDataLayout();
389 unsigned AllocaAS = DL.getAllocaAddrSpace();
390 Type *V256I32Ty = VectorType::get(Builder.getInt32Ty(), 256, false);
391 AllocaInst *AllocaRes =
392 new AllocaInst(V256I32Ty, AllocaAS, "", &F->getEntryBlock().front());
393 BasicBlock::iterator Iter = AllocaRes->getIterator();
394 ++Iter;
395 Builder.SetInsertPoint(&*Iter);
396 Value *I8Ptr = Builder.CreateBitCast(AllocaRes, Builder.getInt8PtrTy());
397 return I8Ptr;
398}
399
400static Instruction *createTileStore(Instruction *TileDef, Value *Ptr) {
401 assert(TileDef->getType()->isX86_AMXTy() && "Not define tile!")(static_cast <bool> (TileDef->getType()->isX86_AMXTy
() && "Not define tile!") ? void (0) : __assert_fail (
"TileDef->getType()->isX86_AMXTy() && \"Not define tile!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 401, __extension__ __PRETTY_FUNCTION__))
;
402 auto *II = cast<IntrinsicInst>(TileDef);
403 assert(II && "Not tile intrinsic!")(static_cast <bool> (II && "Not tile intrinsic!"
) ? void (0) : __assert_fail ("II && \"Not tile intrinsic!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 403, __extension__ __PRETTY_FUNCTION__))
;
404 Value *Row = II->getOperand(0);
405 Value *Col = II->getOperand(1);
406
407 BasicBlock *BB = TileDef->getParent();
408 BasicBlock::iterator Iter = TileDef->getIterator();
409 IRBuilder<> Builder(BB, ++Iter);
410 Value *Stride = Builder.getInt64(64);
411 std::array<Value *, 5> Args = {Row, Col, Ptr, Stride, TileDef};
412
413 Instruction *TileStore =
414 Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args);
415 return TileStore;
416}
417
418static void replaceWithTileLoad(Use &U, Value *Ptr, bool IsPHI = false) {
419 Value *V = U.get();
420 assert(V->getType()->isX86_AMXTy() && "Not define tile!")(static_cast <bool> (V->getType()->isX86_AMXTy() &&
"Not define tile!") ? void (0) : __assert_fail ("V->getType()->isX86_AMXTy() && \"Not define tile!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 420, __extension__ __PRETTY_FUNCTION__))
;
14
'?' condition is true
421
422 // Get tile shape.
423 IntrinsicInst *II = nullptr;
424 if (IsPHI
14.1
'IsPHI' is true
14.1
'IsPHI' is true
) {
15
Taking true branch
425 Value *PhiOp = dyn_cast<PHINode>(V)->getIncomingValue(0);
16
Assuming 'V' is a 'PHINode'
426 II = cast<IntrinsicInst>(PhiOp);
17
'PhiOp' is a 'IntrinsicInst'
427 } else {
428 II = cast<IntrinsicInst>(V);
429 }
430 Value *Row = II->getOperand(0);
431 Value *Col = II->getOperand(1);
432
433 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
18
Assuming the object is not a 'Instruction'
19
'UserI' initialized to a null pointer value
434 IRBuilder<> Builder(UserI);
20
Passing null pointer value via 1st parameter 'IP'
21
Calling constructor for 'IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>'
435 Value *Stride = Builder.getInt64(64);
436 std::array<Value *, 4> Args = {Row, Col, Ptr, Stride};
437
438 Value *TileLoad =
439 Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, None, Args);
440 UserI->replaceUsesOfWith(V, TileLoad);
441}
442
443static bool isIncomingOfPHI(Instruction *I) {
444 for (Use &U : I->uses()) {
445 User *V = U.getUser();
446 if (isa<PHINode>(V))
447 return true;
448 }
449 return false;
450}
451
452// Let all AMX tile data become volatile data, shorten the life range
453// of each tile register before fast register allocation.
454namespace {
455class X86VolatileTileData {
456 Function &F;
457
458public:
459 X86VolatileTileData(Function &Func) : F(Func) {}
460 Value *updatePhiIncomings(BasicBlock *BB,
461 SmallVector<Instruction *, 2> &Incomings);
462 void replacePhiDefWithLoad(Instruction *PHI, Value *StorePtr);
463 bool volatileTileData();
464 void volatileTilePHI(PHINode *Inst);
465 void volatileTileNonPHI(Instruction *I);
466};
467
468Value *X86VolatileTileData::updatePhiIncomings(
469 BasicBlock *BB, SmallVector<Instruction *, 2> &Incomings) {
470 Value *I8Ptr = getAllocaPos(BB);
471
472 for (auto *I : Incomings) {
473 User *Store = createTileStore(I, I8Ptr);
474
475 // All its uses (except phi) should load from stored mem.
476 for (Use &U : I->uses()) {
477 User *V = U.getUser();
478 if (isa<PHINode>(V) || V == Store)
479 continue;
480 replaceWithTileLoad(U, I8Ptr);
481 }
482 }
483 return I8Ptr;
484}
485
486void X86VolatileTileData::replacePhiDefWithLoad(Instruction *PHI,
487 Value *StorePtr) {
488 for (Use &U : PHI->uses())
489 replaceWithTileLoad(U, StorePtr, true);
13
Calling 'replaceWithTileLoad'
490 PHI->eraseFromParent();
491}
492
493// Smilar with volatileTileNonPHI, this function only handle PHI Nodes
494// and their related AMX intrinsics.
495// 1) PHI Def should change to tileload.
496// 2) PHI Incoming Values should tilestored in just after their def.
497// 3) The mem of these tileload and tilestores should be same.
498// e.g.
499// ------------------------------------------------------
500// bb_dom:
501// ...
502// br i1 %bool.cond, label %if.else, label %if.then
503//
504// if.then:
505// def %t0 = ...
506// ...
507// use %t0
508// ...
509// br label %if.end
510//
511// if.else:
512// def %t1 = ...
513// br label %if.end
514//
515// if.end:
516// %td = phi x86_amx [ %t1, %if.else ], [ %t0, %if.then ]
517// ...
518// use %td
519// ------------------------------------------------------
520// -->
521// ------------------------------------------------------
522// bb_entry:
523// %mem = alloca <256 x i32>, align 1024 *
524// ...
525// bb_dom:
526// ...
527// br i1 %bool.cond, label %if.else, label %if.then
528//
529// if.then:
530// def %t0 = ...
531// call void @llvm.x86.tilestored64.internal(mem, %t0) *
532// ...
533// %t0` = call x86_amx @llvm.x86.tileloadd64.internal(mem)*
534// use %t0` *
535// ...
536// br label %if.end
537//
538// if.else:
539// def %t1 = ...
540// call void @llvm.x86.tilestored64.internal(mem, %t1) *
541// br label %if.end
542//
543// if.end:
544// ...
545// %td = call x86_amx @llvm.x86.tileloadd64.internal(mem) *
546// use %td
547// ------------------------------------------------------
548void X86VolatileTileData::volatileTilePHI(PHINode *PHI) {
549 BasicBlock *BB = PHI->getParent();
550 SmallVector<Instruction *, 2> Incomings;
551
552 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) {
10
Assuming 'I' is equal to 'E'
11
Loop condition is false. Execution continues on line 559
553 Value *Op = PHI->getIncomingValue(I);
554 Instruction *Inst = dyn_cast<Instruction>(Op);
555 assert(Inst && "We shouldn't fold AMX instrution!")(static_cast <bool> (Inst && "We shouldn't fold AMX instrution!"
) ? void (0) : __assert_fail ("Inst && \"We shouldn't fold AMX instrution!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 555, __extension__ __PRETTY_FUNCTION__))
;
556 Incomings.push_back(Inst);
557 }
558
559 Value *StorePtr = updatePhiIncomings(BB, Incomings);
560 replacePhiDefWithLoad(PHI, StorePtr);
12
Calling 'X86VolatileTileData::replacePhiDefWithLoad'
561}
562
563// Store the defined tile and load it before use.
564// All its users are not PHI.
565// e.g.
566// ------------------------------------------------------
567// def %td = ...
568// ...
569// "use %td"
570// ------------------------------------------------------
571// -->
572// ------------------------------------------------------
573// def %td = ...
574// call void @llvm.x86.tilestored64.internal(mem, %td)
575// ...
576// %td2 = call x86_amx @llvm.x86.tileloadd64.internal(mem)
577// "use %td2"
578// ------------------------------------------------------
579void X86VolatileTileData::volatileTileNonPHI(Instruction *I) {
580 BasicBlock *BB = I->getParent();
581 Value *I8Ptr = getAllocaPos(BB);
582 User *Store = createTileStore(I, I8Ptr);
583
584 // All its uses should load from stored mem.
585 for (Use &U : I->uses()) {
586 User *V = U.getUser();
587 assert(!isa<PHINode>(V) && "PHI Nodes should be excluded!")(static_cast <bool> (!isa<PHINode>(V) && "PHI Nodes should be excluded!"
) ? void (0) : __assert_fail ("!isa<PHINode>(V) && \"PHI Nodes should be excluded!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 587, __extension__ __PRETTY_FUNCTION__))
;
588 if (V != Store)
589 replaceWithTileLoad(U, I8Ptr);
590 }
591}
592
593// Volatile Tile Model:
594// 1) All the uses of tile data comes from tileload in time.
595// 2) All the defs of tile data tilestore into mem immediately.
596// For example:
597// --------------------------------------------------------------------------
598// %t1 = call x86_amx @llvm.x86.tileloadd64.internal(m, k, ...) key
599// %t2 = call x86_amx @llvm.x86.tileloadd64.internal(k, n, ...)
600// %t3 = call x86_amx @llvm.x86.tileloadd64.internal(m, n, ...) amx
601// %td = tail call x86_amx @llvm.x86.tdpbssd.internal(m, n, k, t1, t2, t3)
602// call void @llvm.x86.tilestored64.internal(... td) area
603// --------------------------------------------------------------------------
604// 3) No terminator, call or other amx instructions in the key amx area.
605bool X86VolatileTileData::volatileTileData() {
606 bool Changed = false;
607 for (BasicBlock &BB : F) {
608 SmallVector<Instruction *, 2> PHIInsts;
609 SmallVector<Instruction *, 8> AMXDefInsts;
610
611 for (Instruction &I : BB) {
612 if (!I.getType()->isX86_AMXTy())
613 continue;
614 if (isa<PHINode>(&I))
615 PHIInsts.push_back(&I);
616 else
617 AMXDefInsts.push_back(&I);
618 }
619
620 // First we "volatile" the non-phi related amx intrinsics.
621 for (Instruction *I : AMXDefInsts) {
6
Assuming '__begin2' is equal to '__end2'
622 if (isIncomingOfPHI(I))
623 continue;
624 volatileTileNonPHI(I);
625 Changed = true;
626 }
627
628 for (Instruction *I : PHIInsts) {
7
Assuming '__begin2' is not equal to '__end2'
629 volatileTilePHI(dyn_cast<PHINode>(I));
8
Assuming 'I' is a 'PHINode'
9
Calling 'X86VolatileTileData::volatileTilePHI'
630 Changed = true;
631 }
632 }
633 return Changed;
634}
635
636} // anonymous namespace
637
638namespace {
639
640class X86LowerAMXTypeLegacyPass : public FunctionPass {
641public:
642 static char ID;
643
644 X86LowerAMXTypeLegacyPass() : FunctionPass(ID) {
645 initializeX86LowerAMXTypeLegacyPassPass(*PassRegistry::getPassRegistry());
646 }
647
648 bool runOnFunction(Function &F) override {
649 TargetMachine *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
650
651 X86LowerAMXType LAT(F, TM);
652 bool C = LAT.visit();
653
654 // Prepare for fast register allocation at O0.
655 // Todo: May better check the volatile model of AMX code, not just
656 // by checking Attribute::OptimizeNone and CodeGenOpt::None.
657 if (TM->getOptLevel() == CodeGenOpt::None) {
1
Assuming the condition is true
2
Taking true branch
658 // If Front End not use O0 but the Mid/Back end use O0, (e.g.
659 // "Clang -O2 -S -emit-llvm t.c" + "llc t.ll") we should make
660 // sure the amx data is volatile, that is nessary for AMX fast
661 // register allocation.
662 if (!F.hasFnAttribute(Attribute::OptimizeNone)) {
3
Assuming the condition is true
4
Taking true branch
663 X86VolatileTileData VTD(F);
664 C = VTD.volatileTileData() || C;
5
Calling 'X86VolatileTileData::volatileTileData'
665 }
666 }
667
668 return C;
669 }
670
671 void getAnalysisUsage(AnalysisUsage &AU) const override {
672 AU.setPreservesCFG();
673 AU.addRequired<TargetPassConfig>();
674 }
675};
676
677} // anonymous namespace
678
679static const char PassName[] = "Lower AMX type for load/store";
680char X86LowerAMXTypeLegacyPass::ID = 0;
681INITIALIZE_PASS_BEGIN(X86LowerAMXTypeLegacyPass, DEBUG_TYPE, PassName, false,static void *initializeX86LowerAMXTypeLegacyPassPassOnce(PassRegistry
&Registry) {
682 false)static void *initializeX86LowerAMXTypeLegacyPassPassOnce(PassRegistry
&Registry) {
683INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry);
684INITIALIZE_PASS_END(X86LowerAMXTypeLegacyPass, DEBUG_TYPE, PassName, false,PassInfo *PI = new PassInfo( PassName, "lower-amx-type", &
X86LowerAMXTypeLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<X86LowerAMXTypeLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeX86LowerAMXTypeLegacyPassPassFlag
; void llvm::initializeX86LowerAMXTypeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeX86LowerAMXTypeLegacyPassPassFlag
, initializeX86LowerAMXTypeLegacyPassPassOnce, std::ref(Registry
)); }
685 false)PassInfo *PI = new PassInfo( PassName, "lower-amx-type", &
X86LowerAMXTypeLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<X86LowerAMXTypeLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeX86LowerAMXTypeLegacyPassPassFlag
; void llvm::initializeX86LowerAMXTypeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeX86LowerAMXTypeLegacyPassPassFlag
, initializeX86LowerAMXTypeLegacyPassPassOnce, std::ref(Registry
)); }
686
687FunctionPass *llvm::createX86LowerAMXTypePass() {
688 return new X86LowerAMXTypeLegacyPass();
689}

/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfoMetadata.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50#include <utility>
51
52namespace llvm {
53
54class APInt;
55class MDNode;
56class Use;
57
58/// This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64public:
65 virtual ~IRBuilderDefaultInserter();
66
67 virtual void InsertHelper(Instruction *I, const Twine &Name,
68 BasicBlock *BB,
69 BasicBlock::iterator InsertPt) const {
70 if (BB) BB->getInstList().insert(InsertPt, I);
71 I->setName(Name);
72 }
73};
74
75/// Provides an 'InsertHelper' that calls a user-provided callback after
76/// performing the default insertion.
77class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
78 std::function<void(Instruction *)> Callback;
79
80public:
81 virtual ~IRBuilderCallbackInserter();
82
83 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
84 : Callback(std::move(Callback)) {}
85
86 void InsertHelper(Instruction *I, const Twine &Name,
87 BasicBlock *BB,
88 BasicBlock::iterator InsertPt) const override {
89 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
90 Callback(I);
91 }
92};
93
94/// Common base class shared among various IRBuilders.
95class IRBuilderBase {
96 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
97 /// created instructions, like !dbg metadata.
98 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
99
100 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
101 /// null. If \p MD is null, remove the entry with \p Kind.
102 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
103 if (!MD) {
104 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
105 return KV.first == Kind;
106 });
107 return;
108 }
109
110 for (auto &KV : MetadataToCopy)
111 if (KV.first == Kind) {
112 KV.second = MD;
113 return;
114 }
115
116 MetadataToCopy.emplace_back(Kind, MD);
117 }
118
119protected:
120 BasicBlock *BB;
121 BasicBlock::iterator InsertPt;
122 LLVMContext &Context;
123 const IRBuilderFolder &Folder;
124 const IRBuilderDefaultInserter &Inserter;
125
126 MDNode *DefaultFPMathTag;
127 FastMathFlags FMF;
128
129 bool IsFPConstrained;
130 fp::ExceptionBehavior DefaultConstrainedExcept;
131 RoundingMode DefaultConstrainedRounding;
132
133 ArrayRef<OperandBundleDef> DefaultOperandBundles;
134
135public:
136 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
137 const IRBuilderDefaultInserter &Inserter,
138 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
139 : Context(context), Folder(Folder), Inserter(Inserter),
140 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
141 DefaultConstrainedExcept(fp::ebStrict),
142 DefaultConstrainedRounding(RoundingMode::Dynamic),
143 DefaultOperandBundles(OpBundles) {
144 ClearInsertionPoint();
145 }
146
147 /// Insert and return the specified instruction.
148 template<typename InstTy>
149 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
150 Inserter.InsertHelper(I, Name, BB, InsertPt);
151 AddMetadataToInst(I);
152 return I;
153 }
154
155 /// No-op overload to handle constants.
156 Constant *Insert(Constant *C, const Twine& = "") const {
157 return C;
158 }
159
160 Value *Insert(Value *V, const Twine &Name = "") const {
161 if (Instruction *I = dyn_cast<Instruction>(V))
162 return Insert(I, Name);
163 assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0)
: __assert_fail ("isa<Constant>(V)", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 163, __extension__ __PRETTY_FUNCTION__))
;
164 return V;
165 }
166
167 //===--------------------------------------------------------------------===//
168 // Builder configuration methods
169 //===--------------------------------------------------------------------===//
170
171 /// Clear the insertion point: created instructions will not be
172 /// inserted into a block.
173 void ClearInsertionPoint() {
174 BB = nullptr;
175 InsertPt = BasicBlock::iterator();
176 }
177
178 BasicBlock *GetInsertBlock() const { return BB; }
179 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
180 LLVMContext &getContext() const { return Context; }
181
182 /// This specifies that created instructions should be appended to the
183 /// end of the specified block.
184 void SetInsertPoint(BasicBlock *TheBB) {
185 BB = TheBB;
186 InsertPt = BB->end();
187 }
188
189 /// This specifies that created instructions should be inserted before
190 /// the specified instruction.
191 void SetInsertPoint(Instruction *I) {
192 BB = I->getParent();
193 InsertPt = I->getIterator();
194 assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() &&
"Can't read debug loc from end()") ? void (0) : __assert_fail
("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 194, __extension__ __PRETTY_FUNCTION__))
;
195 SetCurrentDebugLocation(I->getDebugLoc());
196 }
197
198 /// This specifies that created instructions should be inserted at the
199 /// specified point.
200 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
201 BB = TheBB;
202 InsertPt = IP;
203 if (IP != TheBB->end())
204 SetCurrentDebugLocation(IP->getDebugLoc());
205 }
206
207 /// Set location information used by debugging information.
208 void SetCurrentDebugLocation(DebugLoc L) {
209 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
210 }
211
212 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
213 /// added to all created instructions. Entries present in MedataDataToCopy but
214 /// not on \p Src will be dropped from MetadataToCopy.
215 void CollectMetadataToCopy(Instruction *Src,
216 ArrayRef<unsigned> MetadataKinds) {
217 for (unsigned K : MetadataKinds)
218 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
219 }
220
221 /// Get location information used by debugging information.
222 DebugLoc getCurrentDebugLocation() const {
223 for (auto &KV : MetadataToCopy)
224 if (KV.first == LLVMContext::MD_dbg)
225 return {cast<DILocation>(KV.second)};
226
227 return {};
228 }
229
230 /// If this builder has a current debug location, set it on the
231 /// specified instruction.
232 void SetInstDebugLocation(Instruction *I) const {
233 for (const auto &KV : MetadataToCopy)
234 if (KV.first == LLVMContext::MD_dbg) {
235 I->setDebugLoc(DebugLoc(KV.second));
236 return;
237 }
238 }
239
240 /// Add all entries in MetadataToCopy to \p I.
241 void AddMetadataToInst(Instruction *I) const {
242 for (auto &KV : MetadataToCopy)
243 I->setMetadata(KV.first, KV.second);
244 }
245
246 /// Get the return type of the current function that we're emitting
247 /// into.
248 Type *getCurrentFunctionReturnType() const;
249
250 /// InsertPoint - A saved insertion point.
251 class InsertPoint {
252 BasicBlock *Block = nullptr;
253 BasicBlock::iterator Point;
254
255 public:
256 /// Creates a new insertion point which doesn't point to anything.
257 InsertPoint() = default;
258
259 /// Creates a new insertion point at the given location.
260 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
261 : Block(InsertBlock), Point(InsertPoint) {}
262
263 /// Returns true if this insert point is set.
264 bool isSet() const { return (Block != nullptr); }
265
266 BasicBlock *getBlock() const { return Block; }
267 BasicBlock::iterator getPoint() const { return Point; }
268 };
269
270 /// Returns the current insert point.
271 InsertPoint saveIP() const {
272 return InsertPoint(GetInsertBlock(), GetInsertPoint());
273 }
274
275 /// Returns the current insert point, clearing it in the process.
276 InsertPoint saveAndClearIP() {
277 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
278 ClearInsertionPoint();
279 return IP;
280 }
281
282 /// Sets the current insert point to a previously-saved location.
283 void restoreIP(InsertPoint IP) {
284 if (IP.isSet())
285 SetInsertPoint(IP.getBlock(), IP.getPoint());
286 else
287 ClearInsertionPoint();
288 }
289
290 /// Get the floating point math metadata being used.
291 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
292
293 /// Get the flags to be applied to created floating point ops
294 FastMathFlags getFastMathFlags() const { return FMF; }
295
296 FastMathFlags &getFastMathFlags() { return FMF; }
297
298 /// Clear the fast-math flags.
299 void clearFastMathFlags() { FMF.clear(); }
300
301 /// Set the floating point math metadata to be used.
302 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
303
304 /// Set the fast-math flags to be used with generated fp-math operators
305 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
306
307 /// Enable/Disable use of constrained floating point math. When
308 /// enabled the CreateF<op>() calls instead create constrained
309 /// floating point intrinsic calls. Fast math flags are unaffected
310 /// by this setting.
311 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
312
313 /// Query for the use of constrained floating point math
314 bool getIsFPConstrained() { return IsFPConstrained; }
315
316 /// Set the exception handling to be used with constrained floating point
317 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
318#ifndef NDEBUG
319 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
320 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 320, __extension__ __PRETTY_FUNCTION__))
;
321#endif
322 DefaultConstrainedExcept = NewExcept;
323 }
324
325 /// Set the rounding mode handling to be used with constrained floating point
326 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
327#ifndef NDEBUG
328 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
329 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 329, __extension__ __PRETTY_FUNCTION__))
;
330#endif
331 DefaultConstrainedRounding = NewRounding;
332 }
333
334 /// Get the exception handling used with constrained floating point
335 fp::ExceptionBehavior getDefaultConstrainedExcept() {
336 return DefaultConstrainedExcept;
337 }
338
339 /// Get the rounding mode handling used with constrained floating point
340 RoundingMode getDefaultConstrainedRounding() {
341 return DefaultConstrainedRounding;
342 }
343
344 void setConstrainedFPFunctionAttr() {
345 assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!"
) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 345, __extension__ __PRETTY_FUNCTION__))
;
346
347 Function *F = BB->getParent();
348 if (!F->hasFnAttribute(Attribute::StrictFP)) {
349 F->addFnAttr(Attribute::StrictFP);
350 }
351 }
352
353 void setConstrainedFPCallAttr(CallBase *I) {
354 I->addAttribute(AttributeList::FunctionIndex, Attribute::StrictFP);
355 }
356
357 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
358 DefaultOperandBundles = OpBundles;
359 }
360
361 //===--------------------------------------------------------------------===//
362 // RAII helpers.
363 //===--------------------------------------------------------------------===//
364
365 // RAII object that stores the current insertion point and restores it
366 // when the object is destroyed. This includes the debug location.
367 class InsertPointGuard {
368 IRBuilderBase &Builder;
369 AssertingVH<BasicBlock> Block;
370 BasicBlock::iterator Point;
371 DebugLoc DbgLoc;
372
373 public:
374 InsertPointGuard(IRBuilderBase &B)
375 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
376 DbgLoc(B.getCurrentDebugLocation()) {}
377
378 InsertPointGuard(const InsertPointGuard &) = delete;
379 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
380
381 ~InsertPointGuard() {
382 Builder.restoreIP(InsertPoint(Block, Point));
383 Builder.SetCurrentDebugLocation(DbgLoc);
384 }
385 };
386
387 // RAII object that stores the current fast math settings and restores
388 // them when the object is destroyed.
389 class FastMathFlagGuard {
390 IRBuilderBase &Builder;
391 FastMathFlags FMF;
392 MDNode *FPMathTag;
393 bool IsFPConstrained;
394 fp::ExceptionBehavior DefaultConstrainedExcept;
395 RoundingMode DefaultConstrainedRounding;
396
397 public:
398 FastMathFlagGuard(IRBuilderBase &B)
399 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
400 IsFPConstrained(B.IsFPConstrained),
401 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
402 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
403
404 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
405 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
406
407 ~FastMathFlagGuard() {
408 Builder.FMF = FMF;
409 Builder.DefaultFPMathTag = FPMathTag;
410 Builder.IsFPConstrained = IsFPConstrained;
411 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
412 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
413 }
414 };
415
416 // RAII object that stores the current default operand bundles and restores
417 // them when the object is destroyed.
418 class OperandBundlesGuard {
419 IRBuilderBase &Builder;
420 ArrayRef<OperandBundleDef> DefaultOperandBundles;
421
422 public:
423 OperandBundlesGuard(IRBuilderBase &B)
424 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
425
426 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
427 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
428
429 ~OperandBundlesGuard() {
430 Builder.DefaultOperandBundles = DefaultOperandBundles;
431 }
432 };
433
434
435 //===--------------------------------------------------------------------===//
436 // Miscellaneous creation methods.
437 //===--------------------------------------------------------------------===//
438
439 /// Make a new global variable with initializer type i8*
440 ///
441 /// Make a new global variable with an initializer that has array of i8 type
442 /// filled in with the null terminated string value specified. The new global
443 /// variable will be marked mergable with any others of the same contents. If
444 /// Name is specified, it is the name of the global variable created.
445 ///
446 /// If no module is given via \p M, it is take from the insertion point basic
447 /// block.
448 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
449 unsigned AddressSpace = 0,
450 Module *M = nullptr);
451
452 /// Get a constant value representing either true or false.
453 ConstantInt *getInt1(bool V) {
454 return ConstantInt::get(getInt1Ty(), V);
455 }
456
457 /// Get the constant value for i1 true.
458 ConstantInt *getTrue() {
459 return ConstantInt::getTrue(Context);
460 }
461
462 /// Get the constant value for i1 false.
463 ConstantInt *getFalse() {
464 return ConstantInt::getFalse(Context);
465 }
466
467 /// Get a constant 8-bit value.
468 ConstantInt *getInt8(uint8_t C) {
469 return ConstantInt::get(getInt8Ty(), C);
470 }
471
472 /// Get a constant 16-bit value.
473 ConstantInt *getInt16(uint16_t C) {
474 return ConstantInt::get(getInt16Ty(), C);
475 }
476
477 /// Get a constant 32-bit value.
478 ConstantInt *getInt32(uint32_t C) {
479 return ConstantInt::get(getInt32Ty(), C);
480 }
481
482 /// Get a constant 64-bit value.
483 ConstantInt *getInt64(uint64_t C) {
484 return ConstantInt::get(getInt64Ty(), C);
485 }
486
487 /// Get a constant N-bit value, zero extended or truncated from
488 /// a 64-bit value.
489 ConstantInt *getIntN(unsigned N, uint64_t C) {
490 return ConstantInt::get(getIntNTy(N), C);
491 }
492
493 /// Get a constant integer value.
494 ConstantInt *getInt(const APInt &AI) {
495 return ConstantInt::get(Context, AI);
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Type creation methods
500 //===--------------------------------------------------------------------===//
501
502 /// Fetch the type representing a single bit
503 IntegerType *getInt1Ty() {
504 return Type::getInt1Ty(Context);
505 }
506
507 /// Fetch the type representing an 8-bit integer.
508 IntegerType *getInt8Ty() {
509 return Type::getInt8Ty(Context);
510 }
511
512 /// Fetch the type representing a 16-bit integer.
513 IntegerType *getInt16Ty() {
514 return Type::getInt16Ty(Context);
515 }
516
517 /// Fetch the type representing a 32-bit integer.
518 IntegerType *getInt32Ty() {
519 return Type::getInt32Ty(Context);
520 }
521
522 /// Fetch the type representing a 64-bit integer.
523 IntegerType *getInt64Ty() {
524 return Type::getInt64Ty(Context);
525 }
526
527 /// Fetch the type representing a 128-bit integer.
528 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
529
530 /// Fetch the type representing an N-bit integer.
531 IntegerType *getIntNTy(unsigned N) {
532 return Type::getIntNTy(Context, N);
533 }
534
535 /// Fetch the type representing a 16-bit floating point value.
536 Type *getHalfTy() {
537 return Type::getHalfTy(Context);
538 }
539
540 /// Fetch the type representing a 16-bit brain floating point value.
541 Type *getBFloatTy() {
542 return Type::getBFloatTy(Context);
543 }
544
545 /// Fetch the type representing a 32-bit floating point value.
546 Type *getFloatTy() {
547 return Type::getFloatTy(Context);
548 }
549
550 /// Fetch the type representing a 64-bit floating point value.
551 Type *getDoubleTy() {
552 return Type::getDoubleTy(Context);
553 }
554
555 /// Fetch the type representing void.
556 Type *getVoidTy() {
557 return Type::getVoidTy(Context);
558 }
559
560 /// Fetch the type representing a pointer to an 8-bit integer value.
561 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
562 return Type::getInt8PtrTy(Context, AddrSpace);
563 }
564
565 /// Fetch the type representing a pointer to an integer value.
566 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
567 return DL.getIntPtrType(Context, AddrSpace);
568 }
569
570 //===--------------------------------------------------------------------===//
571 // Intrinsic creation methods
572 //===--------------------------------------------------------------------===//
573
574 /// Create and insert a memset to the specified pointer and the
575 /// specified value.
576 ///
577 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
578 /// specified, it will be added to the instruction. Likewise with alias.scope
579 /// and noalias tags.
580 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
581 MaybeAlign Align, bool isVolatile = false,
582 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
583 MDNode *NoAliasTag = nullptr) {
584 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
585 TBAATag, ScopeTag, NoAliasTag);
586 }
587
588 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
589 bool isVolatile = false, MDNode *TBAATag = nullptr,
590 MDNode *ScopeTag = nullptr,
591 MDNode *NoAliasTag = nullptr);
592
593 /// Create and insert an element unordered-atomic memset of the region of
594 /// memory starting at the given pointer to the given value.
595 ///
596 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
597 /// specified, it will be added to the instruction. Likewise with alias.scope
598 /// and noalias tags.
599 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
600 uint64_t Size, Align Alignment,
601 uint32_t ElementSize,
602 MDNode *TBAATag = nullptr,
603 MDNode *ScopeTag = nullptr,
604 MDNode *NoAliasTag = nullptr) {
605 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
606 Align(Alignment), ElementSize,
607 TBAATag, ScopeTag, NoAliasTag);
608 }
609
610 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
611 Value *Size, Align Alignment,
612 uint32_t ElementSize,
613 MDNode *TBAATag = nullptr,
614 MDNode *ScopeTag = nullptr,
615 MDNode *NoAliasTag = nullptr);
616
617 /// Create and insert a memcpy between the specified pointers.
618 ///
619 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
620 /// specified, it will be added to the instruction. Likewise with alias.scope
621 /// and noalias tags.
622 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
623 MaybeAlign SrcAlign, uint64_t Size,
624 bool isVolatile = false, MDNode *TBAATag = nullptr,
625 MDNode *TBAAStructTag = nullptr,
626 MDNode *ScopeTag = nullptr,
627 MDNode *NoAliasTag = nullptr) {
628 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
629 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
630 NoAliasTag);
631 }
632
633 CallInst *CreateMemTransferInst(
634 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
635 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
636 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
638
639 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
640 MaybeAlign SrcAlign, Value *Size,
641 bool isVolatile = false, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr,
643 MDNode *ScopeTag = nullptr,
644 MDNode *NoAliasTag = nullptr) {
645 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
646 SrcAlign, Size, isVolatile, TBAATag,
647 TBAAStructTag, ScopeTag, NoAliasTag);
648 }
649
650 CallInst *CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
651 MaybeAlign SrcAlign, Value *Size);
652
653 /// Create and insert an element unordered-atomic memcpy between the
654 /// specified pointers.
655 ///
656 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
657 ///
658 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
659 /// specified, it will be added to the instruction. Likewise with alias.scope
660 /// and noalias tags.
661 CallInst *CreateElementUnorderedAtomicMemCpy(
662 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
663 uint32_t ElementSize, MDNode *TBAATag = nullptr,
664 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
665 MDNode *NoAliasTag = nullptr);
666
667 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
668 MaybeAlign SrcAlign, uint64_t Size,
669 bool isVolatile = false, MDNode *TBAATag = nullptr,
670 MDNode *ScopeTag = nullptr,
671 MDNode *NoAliasTag = nullptr) {
672 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
673 isVolatile, TBAATag, ScopeTag, NoAliasTag);
674 }
675
676 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
677 MaybeAlign SrcAlign, Value *Size,
678 bool isVolatile = false, MDNode *TBAATag = nullptr,
679 MDNode *ScopeTag = nullptr,
680 MDNode *NoAliasTag = nullptr);
681
682 /// \brief Create and insert an element unordered-atomic memmove between the
683 /// specified pointers.
684 ///
685 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
686 /// respectively.
687 ///
688 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
689 /// specified, it will be added to the instruction. Likewise with alias.scope
690 /// and noalias tags.
691 CallInst *CreateElementUnorderedAtomicMemMove(
692 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
693 uint32_t ElementSize, MDNode *TBAATag = nullptr,
694 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
695 MDNode *NoAliasTag = nullptr);
696
697 /// Create a vector fadd reduction intrinsic of the source vector.
698 /// The first parameter is a scalar accumulator value for ordered reductions.
699 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
700
701 /// Create a vector fmul reduction intrinsic of the source vector.
702 /// The first parameter is a scalar accumulator value for ordered reductions.
703 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
704
705 /// Create a vector int add reduction intrinsic of the source vector.
706 CallInst *CreateAddReduce(Value *Src);
707
708 /// Create a vector int mul reduction intrinsic of the source vector.
709 CallInst *CreateMulReduce(Value *Src);
710
711 /// Create a vector int AND reduction intrinsic of the source vector.
712 CallInst *CreateAndReduce(Value *Src);
713
714 /// Create a vector int OR reduction intrinsic of the source vector.
715 CallInst *CreateOrReduce(Value *Src);
716
717 /// Create a vector int XOR reduction intrinsic of the source vector.
718 CallInst *CreateXorReduce(Value *Src);
719
720 /// Create a vector integer max reduction intrinsic of the source
721 /// vector.
722 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
723
724 /// Create a vector integer min reduction intrinsic of the source
725 /// vector.
726 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
727
728 /// Create a vector float max reduction intrinsic of the source
729 /// vector.
730 CallInst *CreateFPMaxReduce(Value *Src);
731
732 /// Create a vector float min reduction intrinsic of the source
733 /// vector.
734 CallInst *CreateFPMinReduce(Value *Src);
735
736 /// Create a lifetime.start intrinsic.
737 ///
738 /// If the pointer isn't i8* it will be converted.
739 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
740
741 /// Create a lifetime.end intrinsic.
742 ///
743 /// If the pointer isn't i8* it will be converted.
744 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
745
746 /// Create a call to invariant.start intrinsic.
747 ///
748 /// If the pointer isn't i8* it will be converted.
749 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
750
751 /// Create a call to Masked Load intrinsic
752 CallInst *CreateMaskedLoad(Value *Ptr, Align Alignment, Value *Mask,
753 Value *PassThru = nullptr, const Twine &Name = "");
754
755 /// Create a call to Masked Store intrinsic
756 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
757 Value *Mask);
758
759 /// Create a call to Masked Gather intrinsic
760 CallInst *CreateMaskedGather(Value *Ptrs, Align Alignment,
761 Value *Mask = nullptr, Value *PassThru = nullptr,
762 const Twine &Name = "");
763
764 /// Create a call to Masked Scatter intrinsic
765 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
766 Value *Mask = nullptr);
767
768 /// Create an assume intrinsic call that allows the optimizer to
769 /// assume that the provided condition will be true.
770 ///
771 /// The optional argument \p OpBundles specifies operand bundles that are
772 /// added to the call instruction.
773 CallInst *CreateAssumption(Value *Cond,
774 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
775
776 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
777 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
778 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
779 return CreateNoAliasScopeDeclaration(
780 MetadataAsValue::get(Context, ScopeTag));
781 }
782
783 /// Create a call to the experimental.gc.statepoint intrinsic to
784 /// start a new statepoint sequence.
785 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
786 Value *ActualCallee,
787 ArrayRef<Value *> CallArgs,
788 Optional<ArrayRef<Value *>> DeoptArgs,
789 ArrayRef<Value *> GCArgs,
790 const Twine &Name = "");
791
792 /// Create a call to the experimental.gc.statepoint intrinsic to
793 /// start a new statepoint sequence.
794 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
795 Value *ActualCallee, uint32_t Flags,
796 ArrayRef<Value *> CallArgs,
797 Optional<ArrayRef<Use>> TransitionArgs,
798 Optional<ArrayRef<Use>> DeoptArgs,
799 ArrayRef<Value *> GCArgs,
800 const Twine &Name = "");
801
802 /// Conveninence function for the common case when CallArgs are filled
803 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
804 /// .get()'ed to get the Value pointer.
805 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
806 Value *ActualCallee, ArrayRef<Use> CallArgs,
807 Optional<ArrayRef<Value *>> DeoptArgs,
808 ArrayRef<Value *> GCArgs,
809 const Twine &Name = "");
810
811 /// Create an invoke to the experimental.gc.statepoint intrinsic to
812 /// start a new statepoint sequence.
813 InvokeInst *
814 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
815 Value *ActualInvokee, BasicBlock *NormalDest,
816 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
817 Optional<ArrayRef<Value *>> DeoptArgs,
818 ArrayRef<Value *> GCArgs, const Twine &Name = "");
819
820 /// Create an invoke to the experimental.gc.statepoint intrinsic to
821 /// start a new statepoint sequence.
822 InvokeInst *CreateGCStatepointInvoke(
823 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
824 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
825 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
826 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
827 const Twine &Name = "");
828
829 // Convenience function for the common case when CallArgs are filled in using
830 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
831 // get the Value *.
832 InvokeInst *
833 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
834 Value *ActualInvokee, BasicBlock *NormalDest,
835 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
836 Optional<ArrayRef<Value *>> DeoptArgs,
837 ArrayRef<Value *> GCArgs, const Twine &Name = "");
838
839 /// Create a call to the experimental.gc.result intrinsic to extract
840 /// the result from a call wrapped in a statepoint.
841 CallInst *CreateGCResult(Instruction *Statepoint,
842 Type *ResultType,
843 const Twine &Name = "");
844
845 /// Create a call to the experimental.gc.relocate intrinsics to
846 /// project the relocated value of one pointer from the statepoint.
847 CallInst *CreateGCRelocate(Instruction *Statepoint,
848 int BaseOffset,
849 int DerivedOffset,
850 Type *ResultType,
851 const Twine &Name = "");
852
853 /// Create a call to the experimental.gc.pointer.base intrinsic to get the
854 /// base pointer for the specified derived pointer.
855 CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = "");
856
857 /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get
858 /// the offset of the specified derived pointer from its base.
859 CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = "");
860
861 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
862 /// will be the same type as that of \p Scaling.
863 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
864
865 /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
866 Value *CreateStepVector(Type *DstType, const Twine &Name = "");
867
868 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
869 /// type.
870 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
871 Instruction *FMFSource = nullptr,
872 const Twine &Name = "");
873
874 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
875 /// first type.
876 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
877 Instruction *FMFSource = nullptr,
878 const Twine &Name = "");
879
880 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
881 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
882 /// the intrinsic.
883 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
884 ArrayRef<Value *> Args,
885 Instruction *FMFSource = nullptr,
886 const Twine &Name = "");
887
888 /// Create call to the minnum intrinsic.
889 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
890 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
891 }
892
893 /// Create call to the maxnum intrinsic.
894 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
895 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
896 }
897
898 /// Create call to the minimum intrinsic.
899 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
900 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
901 }
902
903 /// Create call to the maximum intrinsic.
904 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
905 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
906 }
907
908 /// Create a call to the experimental.vector.extract intrinsic.
909 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
910 const Twine &Name = "") {
911 return CreateIntrinsic(Intrinsic::experimental_vector_extract,
912 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
913 Name);
914 }
915
916 /// Create a call to the experimental.vector.insert intrinsic.
917 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
918 Value *Idx, const Twine &Name = "") {
919 return CreateIntrinsic(Intrinsic::experimental_vector_insert,
920 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
921 nullptr, Name);
922 }
923
924private:
925 /// Create a call to a masked intrinsic with given Id.
926 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
927 ArrayRef<Type *> OverloadedTypes,
928 const Twine &Name = "");
929
930 Value *getCastedInt8PtrValue(Value *Ptr);
931
932 //===--------------------------------------------------------------------===//
933 // Instruction creation methods: Terminators
934 //===--------------------------------------------------------------------===//
935
936private:
937 /// Helper to add branch weight and unpredictable metadata onto an
938 /// instruction.
939 /// \returns The annotated instruction.
940 template <typename InstTy>
941 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
942 if (Weights)
943 I->setMetadata(LLVMContext::MD_prof, Weights);
944 if (Unpredictable)
945 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
946 return I;
947 }
948
949public:
950 /// Create a 'ret void' instruction.
951 ReturnInst *CreateRetVoid() {
952 return Insert(ReturnInst::Create(Context));
953 }
954
955 /// Create a 'ret <val>' instruction.
956 ReturnInst *CreateRet(Value *V) {
957 return Insert(ReturnInst::Create(Context, V));
958 }
959
960 /// Create a sequence of N insertvalue instructions,
961 /// with one Value from the retVals array each, that build a aggregate
962 /// return value one value at a time, and a ret instruction to return
963 /// the resulting aggregate value.
964 ///
965 /// This is a convenience function for code that uses aggregate return values
966 /// as a vehicle for having multiple return values.
967 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
968 Value *V = UndefValue::get(getCurrentFunctionReturnType());
969 for (unsigned i = 0; i != N; ++i)
970 V = CreateInsertValue(V, retVals[i], i, "mrv");
971 return Insert(ReturnInst::Create(Context, V));
972 }
973
974 /// Create an unconditional 'br label X' instruction.
975 BranchInst *CreateBr(BasicBlock *Dest) {
976 return Insert(BranchInst::Create(Dest));
977 }
978
979 /// Create a conditional 'br Cond, TrueDest, FalseDest'
980 /// instruction.
981 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
982 MDNode *BranchWeights = nullptr,
983 MDNode *Unpredictable = nullptr) {
984 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
985 BranchWeights, Unpredictable));
986 }
987
988 /// Create a conditional 'br Cond, TrueDest, FalseDest'
989 /// instruction. Copy branch meta data if available.
990 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
991 Instruction *MDSrc) {
992 BranchInst *Br = BranchInst::Create(True, False, Cond);
993 if (MDSrc) {
994 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
995 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
996 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
997 }
998 return Insert(Br);
999 }
1000
1001 /// Create a switch instruction with the specified value, default dest,
1002 /// and with a hint for the number of cases that will be added (for efficient
1003 /// allocation).
1004 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1005 MDNode *BranchWeights = nullptr,
1006 MDNode *Unpredictable = nullptr) {
1007 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1008 BranchWeights, Unpredictable));
1009 }
1010
1011 /// Create an indirect branch instruction with the specified address
1012 /// operand, with an optional hint for the number of destinations that will be
1013 /// added (for efficient allocation).
1014 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1015 return Insert(IndirectBrInst::Create(Addr, NumDests));
1016 }
1017
1018 /// Create an invoke instruction.
1019 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1020 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1021 ArrayRef<Value *> Args,
1022 ArrayRef<OperandBundleDef> OpBundles,
1023 const Twine &Name = "") {
1024 InvokeInst *II =
1025 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1026 if (IsFPConstrained)
1027 setConstrainedFPCallAttr(II);
1028 return Insert(II, Name);
1029 }
1030 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1031 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1032 ArrayRef<Value *> Args = None,
1033 const Twine &Name = "") {
1034 InvokeInst *II =
1035 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1036 if (IsFPConstrained)
1037 setConstrainedFPCallAttr(II);
1038 return Insert(II, Name);
1039 }
1040
1041 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1042 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1043 ArrayRef<OperandBundleDef> OpBundles,
1044 const Twine &Name = "") {
1045 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1046 NormalDest, UnwindDest, Args, OpBundles, Name);
1047 }
1048
1049 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1050 BasicBlock *UnwindDest,
1051 ArrayRef<Value *> Args = None,
1052 const Twine &Name = "") {
1053 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1054 NormalDest, UnwindDest, Args, Name);
1055 }
1056
1057 /// \brief Create a callbr instruction.
1058 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1059 BasicBlock *DefaultDest,
1060 ArrayRef<BasicBlock *> IndirectDests,
1061 ArrayRef<Value *> Args = None,
1062 const Twine &Name = "") {
1063 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1064 Args), Name);
1065 }
1066 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1067 BasicBlock *DefaultDest,
1068 ArrayRef<BasicBlock *> IndirectDests,
1069 ArrayRef<Value *> Args,
1070 ArrayRef<OperandBundleDef> OpBundles,
1071 const Twine &Name = "") {
1072 return Insert(
1073 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1074 OpBundles), Name);
1075 }
1076
1077 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1078 ArrayRef<BasicBlock *> IndirectDests,
1079 ArrayRef<Value *> Args = None,
1080 const Twine &Name = "") {
1081 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1082 DefaultDest, IndirectDests, Args, Name);
1083 }
1084 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1085 ArrayRef<BasicBlock *> IndirectDests,
1086 ArrayRef<Value *> Args,
1087 ArrayRef<OperandBundleDef> OpBundles,
1088 const Twine &Name = "") {
1089 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1090 DefaultDest, IndirectDests, Args, Name);
1091 }
1092
1093 ResumeInst *CreateResume(Value *Exn) {
1094 return Insert(ResumeInst::Create(Exn));
1095 }
1096
1097 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1098 BasicBlock *UnwindBB = nullptr) {
1099 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1100 }
1101
1102 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1103 unsigned NumHandlers,
1104 const Twine &Name = "") {
1105 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1106 Name);
1107 }
1108
1109 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1110 const Twine &Name = "") {
1111 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1112 }
1113
1114 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1115 ArrayRef<Value *> Args = None,
1116 const Twine &Name = "") {
1117 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1118 }
1119
1120 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1121 return Insert(CatchReturnInst::Create(CatchPad, BB));
1122 }
1123
1124 UnreachableInst *CreateUnreachable() {
1125 return Insert(new UnreachableInst(Context));
1126 }
1127
1128 //===--------------------------------------------------------------------===//
1129 // Instruction creation methods: Binary Operators
1130 //===--------------------------------------------------------------------===//
1131private:
1132 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1133 Value *LHS, Value *RHS,
1134 const Twine &Name,
1135 bool HasNUW, bool HasNSW) {
1136 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1137 if (HasNUW) BO->setHasNoUnsignedWrap();
1138 if (HasNSW) BO->setHasNoSignedWrap();
1139 return BO;
1140 }
1141
1142 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1143 FastMathFlags FMF) const {
1144 if (!FPMD)
1145 FPMD = DefaultFPMathTag;
1146 if (FPMD)
1147 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1148 I->setFastMathFlags(FMF);
1149 return I;
1150 }
1151
1152 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1153 Value *R, const Twine &Name) const {
1154 auto *LC = dyn_cast<Constant>(L);
1155 auto *RC = dyn_cast<Constant>(R);
1156 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1157 }
1158
1159 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1160 RoundingMode UseRounding = DefaultConstrainedRounding;
1161
1162 if (Rounding.hasValue())
1163 UseRounding = Rounding.getValue();
1164
1165 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1166 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1166, __extension__ __PRETTY_FUNCTION__))
;
1167 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1168
1169 return MetadataAsValue::get(Context, RoundingMDS);
1170 }
1171
1172 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1173 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1174
1175 if (Except.hasValue())
1176 UseExcept = Except.getValue();
1177
1178 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1179 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1179, __extension__ __PRETTY_FUNCTION__))
;
1180 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1181
1182 return MetadataAsValue::get(Context, ExceptMDS);
1183 }
1184
1185 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1186 assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1187 Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1188 Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
1189 "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1189, __extension__ __PRETTY_FUNCTION__))
;
1190
1191 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1192 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1193
1194 return MetadataAsValue::get(Context, PredicateMDS);
1195 }
1196
1197public:
1198 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1199 bool HasNUW = false, bool HasNSW = false) {
1200 if (auto *LC = dyn_cast<Constant>(LHS))
1201 if (auto *RC = dyn_cast<Constant>(RHS))
1202 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1203 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1204 HasNUW, HasNSW);
1205 }
1206
1207 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1208 return CreateAdd(LHS, RHS, Name, false, true);
1209 }
1210
1211 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1212 return CreateAdd(LHS, RHS, Name, true, false);
1213 }
1214
1215 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1216 bool HasNUW = false, bool HasNSW = false) {
1217 if (auto *LC = dyn_cast<Constant>(LHS))
1218 if (auto *RC = dyn_cast<Constant>(RHS))
1219 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1220 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1221 HasNUW, HasNSW);
1222 }
1223
1224 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1225 return CreateSub(LHS, RHS, Name, false, true);
1226 }
1227
1228 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1229 return CreateSub(LHS, RHS, Name, true, false);
1230 }
1231
1232 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1233 bool HasNUW = false, bool HasNSW = false) {
1234 if (auto *LC = dyn_cast<Constant>(LHS))
1235 if (auto *RC = dyn_cast<Constant>(RHS))
1236 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1237 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1238 HasNUW, HasNSW);
1239 }
1240
1241 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1242 return CreateMul(LHS, RHS, Name, false, true);
1243 }
1244
1245 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1246 return CreateMul(LHS, RHS, Name, true, false);
1247 }
1248
1249 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1250 bool isExact = false) {
1251 if (auto *LC = dyn_cast<Constant>(LHS))
1252 if (auto *RC = dyn_cast<Constant>(RHS))
1253 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1254 if (!isExact)
1255 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1256 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1257 }
1258
1259 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1260 return CreateUDiv(LHS, RHS, Name, true);
1261 }
1262
1263 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1264 bool isExact = false) {
1265 if (auto *LC = dyn_cast<Constant>(LHS))
1266 if (auto *RC = dyn_cast<Constant>(RHS))
1267 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1268 if (!isExact)
1269 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1270 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1271 }
1272
1273 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1274 return CreateSDiv(LHS, RHS, Name, true);
1275 }
1276
1277 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1278 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1279 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1280 }
1281
1282 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1283 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1284 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1285 }
1286
1287 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1288 bool HasNUW = false, bool HasNSW = false) {
1289 if (auto *LC = dyn_cast<Constant>(LHS))
1290 if (auto *RC = dyn_cast<Constant>(RHS))
1291 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1292 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1293 HasNUW, HasNSW);
1294 }
1295
1296 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1297 bool HasNUW = false, bool HasNSW = false) {
1298 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1299 HasNUW, HasNSW);
1300 }
1301
1302 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1303 bool HasNUW = false, bool HasNSW = false) {
1304 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1305 HasNUW, HasNSW);
1306 }
1307
1308 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1309 bool isExact = false) {
1310 if (auto *LC = dyn_cast<Constant>(LHS))
1311 if (auto *RC = dyn_cast<Constant>(RHS))
1312 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1313 if (!isExact)
1314 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1315 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1316 }
1317
1318 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1319 bool isExact = false) {
1320 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1321 }
1322
1323 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1324 bool isExact = false) {
1325 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1326 }
1327
1328 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1329 bool isExact = false) {
1330 if (auto *LC = dyn_cast<Constant>(LHS))
1331 if (auto *RC = dyn_cast<Constant>(RHS))
1332 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1333 if (!isExact)
1334 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1335 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1336 }
1337
1338 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1339 bool isExact = false) {
1340 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1341 }
1342
1343 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1344 bool isExact = false) {
1345 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1346 }
1347
1348 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1349 if (auto *RC = dyn_cast<Constant>(RHS)) {
1350 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1351 return LHS; // LHS & -1 -> LHS
1352 if (auto *LC = dyn_cast<Constant>(LHS))
1353 return Insert(Folder.CreateAnd(LC, RC), Name);
1354 }
1355 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1356 }
1357
1358 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1359 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1360 }
1361
1362 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1363 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1364 }
1365
1366 Value *CreateAnd(ArrayRef<Value*> Ops) {
1367 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1367, __extension__ __PRETTY_FUNCTION__))
;
1368 Value *Accum = Ops[0];
1369 for (unsigned i = 1; i < Ops.size(); i++)
1370 Accum = CreateAnd(Accum, Ops[i]);
1371 return Accum;
1372 }
1373
1374 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1375 if (auto *RC = dyn_cast<Constant>(RHS)) {
1376 if (RC->isNullValue())
1377 return LHS; // LHS | 0 -> LHS
1378 if (auto *LC = dyn_cast<Constant>(LHS))
1379 return Insert(Folder.CreateOr(LC, RC), Name);
1380 }
1381 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1382 }
1383
1384 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1385 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1386 }
1387
1388 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1389 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1390 }
1391
1392 Value *CreateOr(ArrayRef<Value*> Ops) {
1393 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1393, __extension__ __PRETTY_FUNCTION__))
;
1394 Value *Accum = Ops[0];
1395 for (unsigned i = 1; i < Ops.size(); i++)
1396 Accum = CreateOr(Accum, Ops[i]);
1397 return Accum;
1398 }
1399
1400 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1401 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1402 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1403 }
1404
1405 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1406 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1407 }
1408
1409 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1410 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1411 }
1412
1413 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1414 MDNode *FPMD = nullptr) {
1415 if (IsFPConstrained)
1416 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1417 L, R, nullptr, Name, FPMD);
1418
1419 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1420 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1421 return Insert(I, Name);
1422 }
1423
1424 /// Copy fast-math-flags from an instruction rather than using the builder's
1425 /// default FMF.
1426 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1427 const Twine &Name = "") {
1428 if (IsFPConstrained)
1429 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1430 L, R, FMFSource, Name);
1431
1432 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1433 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1434 FMFSource->getFastMathFlags());
1435 return Insert(I, Name);
1436 }
1437
1438 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1439 MDNode *FPMD = nullptr) {
1440 if (IsFPConstrained)
1441 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1442 L, R, nullptr, Name, FPMD);
1443
1444 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1445 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1446 return Insert(I, Name);
1447 }
1448
1449 /// Copy fast-math-flags from an instruction rather than using the builder's
1450 /// default FMF.
1451 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1452 const Twine &Name = "") {
1453 if (IsFPConstrained)
1454 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1455 L, R, FMFSource, Name);
1456
1457 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1458 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1459 FMFSource->getFastMathFlags());
1460 return Insert(I, Name);
1461 }
1462
1463 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1464 MDNode *FPMD = nullptr) {
1465 if (IsFPConstrained)
1466 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1467 L, R, nullptr, Name, FPMD);
1468
1469 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1470 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1471 return Insert(I, Name);
1472 }
1473
1474 /// Copy fast-math-flags from an instruction rather than using the builder's
1475 /// default FMF.
1476 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1477 const Twine &Name = "") {
1478 if (IsFPConstrained)
1479 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1480 L, R, FMFSource, Name);
1481
1482 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1483 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1484 FMFSource->getFastMathFlags());
1485 return Insert(I, Name);
1486 }
1487
1488 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1489 MDNode *FPMD = nullptr) {
1490 if (IsFPConstrained)
1491 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1492 L, R, nullptr, Name, FPMD);
1493
1494 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1495 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1496 return Insert(I, Name);
1497 }
1498
1499 /// Copy fast-math-flags from an instruction rather than using the builder's
1500 /// default FMF.
1501 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1502 const Twine &Name = "") {
1503 if (IsFPConstrained)
1504 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1505 L, R, FMFSource, Name);
1506
1507 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1508 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1509 FMFSource->getFastMathFlags());
1510 return Insert(I, Name);
1511 }
1512
1513 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1514 MDNode *FPMD = nullptr) {
1515 if (IsFPConstrained)
1516 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1517 L, R, nullptr, Name, FPMD);
1518
1519 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1520 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1521 return Insert(I, Name);
1522 }
1523
1524 /// Copy fast-math-flags from an instruction rather than using the builder's
1525 /// default FMF.
1526 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1527 const Twine &Name = "") {
1528 if (IsFPConstrained)
1529 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1530 L, R, FMFSource, Name);
1531
1532 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1533 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1534 FMFSource->getFastMathFlags());
1535 return Insert(I, Name);
1536 }
1537
1538 Value *CreateBinOp(Instruction::BinaryOps Opc,
1539 Value *LHS, Value *RHS, const Twine &Name = "",
1540 MDNode *FPMathTag = nullptr) {
1541 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1542 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1543 if (isa<FPMathOperator>(BinOp))
1544 setFPAttrs(BinOp, FPMathTag, FMF);
1545 return Insert(BinOp, Name);
1546 }
1547
1548 Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1549 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1549, __extension__ __PRETTY_FUNCTION__))
;
1550 return CreateSelect(Cond1, Cond2,
1551 ConstantInt::getNullValue(Cond2->getType()), Name);
1552 }
1553
1554 Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1555 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1555, __extension__ __PRETTY_FUNCTION__))
;
1556 return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
1557 Cond2, Name);
1558 }
1559
1560 CallInst *CreateConstrainedFPBinOp(
1561 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1562 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1563 Optional<RoundingMode> Rounding = None,
1564 Optional<fp::ExceptionBehavior> Except = None);
1565
1566 Value *CreateNeg(Value *V, const Twine &Name = "",
1567 bool HasNUW = false, bool HasNSW = false) {
1568 if (auto *VC = dyn_cast<Constant>(V))
1569 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1570 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1571 if (HasNUW) BO->setHasNoUnsignedWrap();
1572 if (HasNSW) BO->setHasNoSignedWrap();
1573 return BO;
1574 }
1575
1576 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1577 return CreateNeg(V, Name, false, true);
1578 }
1579
1580 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1581 return CreateNeg(V, Name, true, false);
1582 }
1583
1584 Value *CreateFNeg(Value *V, const Twine &Name = "",
1585 MDNode *FPMathTag = nullptr) {
1586 if (auto *VC = dyn_cast<Constant>(V))
1587 return Insert(Folder.CreateFNeg(VC), Name);
1588 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1589 Name);
1590 }
1591
1592 /// Copy fast-math-flags from an instruction rather than using the builder's
1593 /// default FMF.
1594 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1595 const Twine &Name = "") {
1596 if (auto *VC = dyn_cast<Constant>(V))
1597 return Insert(Folder.CreateFNeg(VC), Name);
1598 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1599 FMFSource->getFastMathFlags()),
1600 Name);
1601 }
1602
1603 Value *CreateNot(Value *V, const Twine &Name = "") {
1604 if (auto *VC = dyn_cast<Constant>(V))
1605 return Insert(Folder.CreateNot(VC), Name);
1606 return Insert(BinaryOperator::CreateNot(V), Name);
1607 }
1608
1609 Value *CreateUnOp(Instruction::UnaryOps Opc,
1610 Value *V, const Twine &Name = "",
1611 MDNode *FPMathTag = nullptr) {
1612 if (auto *VC = dyn_cast<Constant>(V))
1613 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1614 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1615 if (isa<FPMathOperator>(UnOp))
1616 setFPAttrs(UnOp, FPMathTag, FMF);
1617 return Insert(UnOp, Name);
1618 }
1619
1620 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1621 /// Correct number of operands must be passed accordingly.
1622 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1623 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1624
1625 //===--------------------------------------------------------------------===//
1626 // Instruction creation methods: Memory Instructions
1627 //===--------------------------------------------------------------------===//
1628
1629 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1630 Value *ArraySize = nullptr, const Twine &Name = "") {
1631 const DataLayout &DL = BB->getModule()->getDataLayout();
1632 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1633 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1634 }
1635
1636 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1637 const Twine &Name = "") {
1638 const DataLayout &DL = BB->getModule()->getDataLayout();
1639 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1640 unsigned AddrSpace = DL.getAllocaAddrSpace();
1641 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1642 }
1643
1644 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1645 /// converting the string to 'bool' for the isVolatile parameter.
1646 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1647 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1648 }
1649
1650 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1651 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1652 }
1653
1654 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1655 const Twine &Name = "") {
1656 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1657 }
1658
1659 // Deprecated [opaque pointer types]
1660 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1661 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1662 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1663 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
{
1664 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1665 }
1666
1667 // Deprecated [opaque pointer types]
1668 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1669 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1670 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1671 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
{
1672 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1673 }
1674
1675 // Deprecated [opaque pointer types]
1676 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1677 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1678 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1679 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1680 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
{
1681 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1682 Name);
1683 }
1684
1685 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1686 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1687 }
1688
1689 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1690 const char *Name) {
1691 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1692 }
1693
1694 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1695 const Twine &Name = "") {
1696 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1697 }
1698
1699 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1700 bool isVolatile, const Twine &Name = "") {
1701 if (!Align) {
1702 const DataLayout &DL = BB->getModule()->getDataLayout();
1703 Align = DL.getABITypeAlign(Ty);
1704 }
1705 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1706 }
1707
1708 // Deprecated [opaque pointer types]
1709 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1710 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1711 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1712 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1713 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
{
1714 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1715 Align, Name);
1716 }
1717 // Deprecated [opaque pointer types]
1718 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1719 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1720 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1721 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1722 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
{
1723 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1724 Align, Name);
1725 }
1726 // Deprecated [opaque pointer types]
1727 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1728 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1729 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1730 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1731 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1732 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
{
1733 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1734 Align, isVolatile, Name);
1735 }
1736
1737 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1738 bool isVolatile = false) {
1739 if (!Align) {
1740 const DataLayout &DL = BB->getModule()->getDataLayout();
1741 Align = DL.getABITypeAlign(Val->getType());
1742 }
1743 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1744 }
1745 FenceInst *CreateFence(AtomicOrdering Ordering,
1746 SyncScope::ID SSID = SyncScope::System,
1747 const Twine &Name = "") {
1748 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1749 }
1750
1751 AtomicCmpXchgInst *
1752 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
1753 AtomicOrdering SuccessOrdering,
1754 AtomicOrdering FailureOrdering,
1755 SyncScope::ID SSID = SyncScope::System) {
1756 if (!Align) {
1757 const DataLayout &DL = BB->getModule()->getDataLayout();
1758 Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
1759 }
1760
1761 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
1762 FailureOrdering, SSID));
1763 }
1764
1765 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
1766 Value *Val, MaybeAlign Align,
1767 AtomicOrdering Ordering,
1768 SyncScope::ID SSID = SyncScope::System) {
1769 if (!Align) {
1770 const DataLayout &DL = BB->getModule()->getDataLayout();
1771 Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
1772 }
1773
1774 return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
1775 }
1776
1777 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1778 const Twine &Name = "") {
1779 return CreateGEP(nullptr, Ptr, IdxList, Name);
1780 }
1781
1782 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1783 const Twine &Name = "") {
1784 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1785 // Every index must be constant.
1786 size_t i, e;
1787 for (i = 0, e = IdxList.size(); i != e; ++i)
1788 if (!isa<Constant>(IdxList[i]))
1789 break;
1790 if (i == e)
1791 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1792 }
1793 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1794 }
1795
1796 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,
1797 const Twine &Name = "") {
1798 return CreateInBoundsGEP(nullptr, Ptr, IdxList, Name);
1799 }
1800
1801 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1802 const Twine &Name = "") {
1803 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1804 // Every index must be constant.
1805 size_t i, e;
1806 for (i = 0, e = IdxList.size(); i != e; ++i)
1807 if (!isa<Constant>(IdxList[i]))
1808 break;
1809 if (i == e)
1810 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1811 Name);
1812 }
1813 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1814 }
1815
1816 Value *CreateGEP(Value *Ptr, Value *Idx, const Twine &Name = "") {
1817 return CreateGEP(nullptr, Ptr, Idx, Name);
1818 }
1819
1820 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1821 if (auto *PC = dyn_cast<Constant>(Ptr))
1822 if (auto *IC = dyn_cast<Constant>(Idx))
1823 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1824 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1825 }
1826
1827 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1828 const Twine &Name = "") {
1829 if (auto *PC = dyn_cast<Constant>(Ptr))
1830 if (auto *IC = dyn_cast<Constant>(Idx))
1831 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1832 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1833 }
1834
1835 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const Twine &Name = "") {
1836 return CreateConstGEP1_32(nullptr, Ptr, Idx0, Name);
1837 }
1838
1839 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1840 const Twine &Name = "") {
1841 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1842
1843 if (auto *PC = dyn_cast<Constant>(Ptr))
1844 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1845
1846 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1847 }
1848
1849 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1850 const Twine &Name = "") {
1851 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1852
1853 if (auto *PC = dyn_cast<Constant>(Ptr))
1854 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1855
1856 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1857 }
1858
1859 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1860 const Twine &Name = "") {
1861 Value *Idxs[] = {
1862 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1863 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1864 };
1865
1866 if (auto *PC = dyn_cast<Constant>(Ptr))
1867 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1868
1869 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1870 }
1871
1872 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1873 unsigned Idx1, const Twine &Name = "") {
1874 Value *Idxs[] = {
1875 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1876 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1877 };
1878
1879 if (auto *PC = dyn_cast<Constant>(Ptr))
1880 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1881
1882 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1883 }
1884
1885 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1886 const Twine &Name = "") {
1887 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1888
1889 if (auto *PC = dyn_cast<Constant>(Ptr))
1890 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1891
1892 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1893 }
1894
1895 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const Twine &Name = "") {
1896 return CreateConstGEP1_64(nullptr, Ptr, Idx0, Name);
1897 }
1898
1899 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1900 const Twine &Name = "") {
1901 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1902
1903 if (auto *PC = dyn_cast<Constant>(Ptr))
1904 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1905
1906 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1907 }
1908
1909 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,
1910 const Twine &Name = "") {
1911 return CreateConstInBoundsGEP1_64(nullptr, Ptr, Idx0, Name);
1912 }
1913
1914 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1915 const Twine &Name = "") {
1916 Value *Idxs[] = {
1917 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1918 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1919 };
1920
1921 if (auto *PC = dyn_cast<Constant>(Ptr))
1922 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1923
1924 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1925 }
1926
1927 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1928 const Twine &Name = "") {
1929 return CreateConstGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1930 }
1931
1932 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1933 uint64_t Idx1, const Twine &Name = "") {
1934 Value *Idxs[] = {
1935 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1936 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1937 };
1938
1939 if (auto *PC = dyn_cast<Constant>(Ptr))
1940 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1941
1942 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1943 }
1944
1945 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1946 const Twine &Name = "") {
1947 return CreateConstInBoundsGEP2_64(nullptr, Ptr, Idx0, Idx1, Name);
1948 }
1949
1950 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1951 const Twine &Name = "") {
1952 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1953 }
1954
1955 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = "") {
1956 return CreateConstInBoundsGEP2_32(nullptr, Ptr, 0, Idx, Name);
1957 }
1958
1959 /// Same as CreateGlobalString, but return a pointer with "i8*" type
1960 /// instead of a pointer to array of i8.
1961 ///
1962 /// If no module is given via \p M, it is take from the insertion point basic
1963 /// block.
1964 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
1965 unsigned AddressSpace = 0,
1966 Module *M = nullptr) {
1967 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
1968 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1969 Constant *Indices[] = {Zero, Zero};
1970 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
1971 Indices);
1972 }
1973
1974 //===--------------------------------------------------------------------===//
1975 // Instruction creation methods: Cast/Conversion Operators
1976 //===--------------------------------------------------------------------===//
1977
1978 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
1979 return CreateCast(Instruction::Trunc, V, DestTy, Name);
1980 }
1981
1982 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
1983 return CreateCast(Instruction::ZExt, V, DestTy, Name);
1984 }
1985
1986 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
1987 return CreateCast(Instruction::SExt, V, DestTy, Name);
1988 }
1989
1990 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
1991 /// the value untouched if the type of V is already DestTy.
1992 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
1993 const Twine &Name = "") {
1994 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1996, __extension__ __PRETTY_FUNCTION__))
1995 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1996, __extension__ __PRETTY_FUNCTION__))
1996 "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 1996, __extension__ __PRETTY_FUNCTION__))
;
1997 Type *VTy = V->getType();
1998 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
1999 return CreateZExt(V, DestTy, Name);
2000 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2001 return CreateTrunc(V, DestTy, Name);
2002 return V;
2003 }
2004
2005 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2006 /// the value untouched if the type of V is already DestTy.
2007 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2008 const Twine &Name = "") {
2009 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 2011, __extension__ __PRETTY_FUNCTION__))
2010 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 2011, __extension__ __PRETTY_FUNCTION__))
2011 "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-13~++20210613111130+5be314f79ba7/llvm/include/llvm/IR/IRBuilder.h"
, 2011, __extension__ __PRETTY_FUNCTION__))
;
2012 Type *VTy = V->getType();
2013 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2014 return CreateSExt(V, DestTy, Name);
2015 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2016 return CreateTrunc(V, DestTy, Name);
2017 return V;
2018 }
2019
2020 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2021 if (IsFPConstrained)
2022 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2023 V, DestTy, nullptr, Name);
2024 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2025 }
2026
2027 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2028 if (IsFPConstrained)
2029 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2030 V, DestTy, nullptr, Name);
2031 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2032 }
2033
2034 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2035 if (IsFPConstrained)
2036 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2037 V, DestTy, nullptr, Name);
2038 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2039 }
2040
2041 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2042 if (IsFPConstrained)
2043 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2044 V, DestTy, nullptr, Name);
2045 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2046 }
2047
2048 Value *CreateFPTrunc(Value *V, Type *DestTy,
2049 const Twine &Name = "") {
2050 if (IsFPConstrained)
2051 return CreateConstrainedFPCast(
2052 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2053 Name);
2054 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2055 }
2056
2057 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2058 if (IsFPConstrained)
2059 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2060 V, DestTy, nullptr, Name);
2061 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2062 }
2063
2064 Value *CreatePtrToInt(Value *V, Type *DestTy,
2065 const Twine &Name = "") {
2066 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2067 }
2068
2069 Value *CreateIntToPtr(Value *V, Type *DestTy,
2070 const Twine &Name = "") {
2071 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2072 }
2073
2074 Value *CreateBitCast(Value *V, Type *DestTy,
2075 const Twine &Name = "") {
2076 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2077 }
2078
2079 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2080 const Twine &Name = "") {
2081 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2082 }
2083
2084 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2085 const Twine &Name = "") {
2086 if (V->getType() == DestTy)
2087 return V;
2088 if (auto *VC = dyn_cast<Constant>(V))
2089 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2090 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2091 }
2092
2093 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2094 const Twine &Name = "") {
2095 if (V->getType() == DestTy)
2096 return V;
2097 if (auto *VC = dyn_cast<Constant>(V))
2098 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2099 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2100 }
2101
2102 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2103 const Twine &Name = "") {
2104 if (V->getType() == DestTy)
2105 return V;
2106 if (auto *VC = dyn_cast<Constant>(V))
2107 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2108 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2109 }
2110
2111 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2112 const Twine &Name = "") {
2113 if (V->getType() == DestTy)
2114 return V;
2115 if (auto *VC = dyn_cast<Constant>(V))
2116 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2117 return Insert(CastInst::Create(Op, V, DestTy), Name);
2118 }
2119
2120 Value *CreatePointerCast(Value *V, Type *DestTy,
2121 const Twine &Name = "") {
2122 if (V->getType() == DestTy)
2123 return V;
2124 if (auto *VC = dyn_cast<Constant>(V))
2125 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2126 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2127 }
2128
2129 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2130 const Twine &Name = "") {
2131 if (V->getType() == DestTy)
2132 return V;
2133
2134 if (auto *VC = dyn_cast<Constant>(V)) {
2135 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2136 Name);
2137 }
2138
2139 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2140 Name);
2141 }
2142
2143 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2144 const Twine &Name = "") {
2145 if (V->getType() == DestTy)
2146 return V;
2147 if (auto *VC = dyn_cast<Constant>(V))
2148 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2149 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2150 }
2151
2152 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2153 const Twine &Name = "") {
2154 if (V->getType() == DestTy)
2155 return V;
2156 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2157 return CreatePtrToInt(V, DestTy, Name);
2158 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2159 return CreateIntToPtr(V, DestTy, Name);
2160
2161 return CreateBitCast(V, DestTy, Name);
2162 }
2163
2164 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2165 if (V->getType() == DestTy)
2166 return V;
2167 if (auto *VC = dyn_cast<Constant>(V))
2168 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2169 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2170 }
2171
2172 CallInst *CreateConstrainedFPCast(
2173 Intrinsic::ID ID, Value *V, Type *DestTy,
2174 Instruction *FMFSource = nullptr, const Twine &Name = "",
2175 MDNode *FPMathTag = nullptr,
2176 Optional<RoundingMode> Rounding = None,
2177 Optional<fp::ExceptionBehavior> Except = None);
2178
2179 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2180 // compile time error, instead of converting the string to bool for the
2181 // isSigned parameter.
2182 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2183
2184 //===--------------------------------------------------------------------===//
2185 // Instruction creation methods: Compare Instructions
2186 //===--------------------------------------------------------------------===//
2187
2188 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2189 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2190 }
2191
2192 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2193 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2194 }
2195
2196 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2197 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2198 }
2199
2200 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2201 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2202 }
2203
2204 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2205 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2206 }
2207
2208 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2209 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2210 }
2211
2212 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2213 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2214 }
2215
2216 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2217 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2218 }
2219
2220 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2221 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2222 }
2223
2224 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2225 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2226 }
2227
2228 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2229 MDNode *FPMathTag = nullptr) {
2230 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2231 }
2232
2233 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2234 MDNode *FPMathTag = nullptr) {
2235 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2236 }
2237
2238 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2239 MDNode *FPMathTag = nullptr) {
2240 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2241 }
2242
2243 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2244 MDNode *FPMathTag = nullptr) {
2245 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2246 }
2247
2248 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2249 MDNode *FPMathTag = nullptr) {
2250 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2251 }
2252
2253 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2254 MDNode *FPMathTag = nullptr) {
2255 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2256 }
2257
2258 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2259 MDNode *FPMathTag = nullptr) {
2260 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2261 }
2262
2263 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2264 MDNode *FPMathTag = nullptr) {
2265 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2266 }
2267
2268 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2269 MDNode *FPMathTag = nullptr) {
2270 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2271 }
2272
2273 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2274 MDNode *FPMathTag = nullptr) {
2275 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2276 }
2277
2278 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2279 MDNode *FPMathTag = nullptr) {
2280 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2281 }
2282
2283 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2284 MDNode *FPMathTag = nullptr) {
2285 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2286 }
2287
2288 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2289 MDNode *FPMathTag = nullptr) {
2290 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2291 }
2292
2293 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2294 MDNode *FPMathTag = nullptr) {
2295 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2296 }
2297
2298 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2299 const Twine &Name = "") {
2300 if (auto *LC = dyn_cast<Constant>(LHS))
2301 if (auto *RC = dyn_cast<Constant>(RHS))
2302 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2303 return Insert(new ICmpInst(P, LHS, RHS), Name);
2304 }
2305
2306 // Create a quiet floating-point comparison (i.e. one that raises an FP
2307 // exception only in the case where an input is a signaling NaN).
2308 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2309 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2310 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2311 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2312 }
2313
2314 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2315 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2316 return CmpInst::isFPPredicate(Pred)
2317 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2318 : CreateICmp(Pred, LHS, RHS, Name);
2319 }
2320
2321 // Create a signaling floating-point comparison (i.e. one that raises an FP
2322 // exception whenever an input is any NaN, signaling or quiet).
2323 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2324 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2325 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2326 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2327 }
2328
2329private:
2330 // Helper routine to create either a signaling or a quiet FP comparison.
2331 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2332 const Twine &Name, MDNode *FPMathTag,
2333 bool IsSignaling);
2334
2335public:
2336 CallInst *CreateConstrainedFPCmp(
2337 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2338 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2339
2340 //===--------------------------------------------------------------------===//
2341 // Instruction creation methods: Other Instructions
2342 //===--------------------------------------------------------------------===//
2343
2344 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2345 const Twine &Name = "") {
2346 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2347 if (isa<FPMathOperator>(Phi))
2348 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2349 return Insert(Phi, Name);
2350 }
2351
2352 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2353 ArrayRef<Value *> Args = None, const Twine &Name = "",
2354 MDNode *FPMathTag = nullptr) {
2355 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2356 if (IsFPConstrained)
2357 setConstrainedFPCallAttr(CI);
2358 if (isa<FPMathOperator>(CI))
2359 setFPAttrs(CI, FPMathTag, FMF);
2360 return Insert(CI, Name);
2361 }
2362
2363 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2364 ArrayRef<OperandBundleDef> OpBundles,
2365 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2366 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2367 if (IsFPConstrained)
2368 setConstrainedFPCallAttr(CI);
2369 if (isa<FPMathOperator>(CI))
2370 setFPAttrs(CI, FPMathTag, FMF);
2371 return Insert(CI, Name);
2372 }
2373
2374 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2375 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2376 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2377 FPMathTag);
2378 }
2379
2380 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2381 ArrayRef<OperandBundleDef> OpBundles,
2382 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2383 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2384 OpBundles, Name, FPMathTag);
2385 }
2386
2387 CallInst *CreateConstrainedFPCall(
2388 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2389 Optional<RoundingMode> Rounding = None,
2390 Optional<fp::ExceptionBehavior> Except = None);
2391
2392 Value *CreateSelect(Value *C, Value *True, Value *False,
2393 const Twine &Name = "", Instruction *MDFrom = nullptr);
2394
2395 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2396 return Insert(new VAArgInst(List, Ty), Name);
2397 }
2398
2399 Value *CreateExtractElement(Value *Vec, Value *Idx,
2400 const Twine &Name = "") {
2401 if (auto *VC = dyn_cast<Constant>(Vec))
2402 if (auto *IC = dyn_cast<Constant>(Idx))
2403 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2404 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2405 }
2406
2407 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2408 const Twine &Name = "") {
2409 return CreateExtractElement(Vec, getInt64(Idx), Name);
2410 }
2411
2412 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2413 const Twine &Name = "") {
2414 if (auto *VC = dyn_cast<Constant>(Vec))
2415 if (auto *NC = dyn_cast<Constant>(NewElt))
2416 if (auto *IC = dyn_cast<Constant>(Idx))
2417 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2418 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2419 }
2420
2421 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2422 const Twine &Name = "") {
2423 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2424 }
2425
2426 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2427 const Twine &Name = "") {
2428 SmallVector<int, 16> IntMask;
2429 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2430 return CreateShuffleVector(V1, V2, IntMask, Name);
2431 }
2432
2433 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2434 ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2435 const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2436 "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
{
2437 SmallVector<int, 16> IntMask;
2438 IntMask.assign(Mask.begin(), Mask.end());
2439 return CreateShuffleVector(V1, V2, IntMask, Name);
2440 }
2441
2442 /// See class ShuffleVectorInst for a description of the mask representation.
2443 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2444 const Twine &Name = "") {
2445 if (auto *V1C = dyn_cast<Constant>(V1))
2446 if (auto *V2C = dyn_cast<Constant>(V2))
2447 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2448 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2449 }
2450
2451 /// Create a unary shuffle. The second vector operand of the IR instruction
2452 /// is poison.
2453 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2454 const Twine &Name = "") {
2455 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2456 }
2457
2458 Value *CreateExtractValue(Value *Agg,
2459 ArrayRef<unsigned> Idxs,
2460 const Twine &Name = "") {
2461 if (auto *AggC = dyn_cast<Constant>(Agg))
2462 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2463 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2464 }
2465
2466 Value *CreateInsertValue(Value *Agg, Value *Val,
2467 ArrayRef<unsigned> Idxs,
2468 const Twine &Name = "") {
2469 if (auto *AggC = dyn_cast<Constant>(Agg))
2470 if (auto *ValC = dyn_cast<Constant>(Val))
2471 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2472 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2473 }
2474
2475 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2476 const Twine &Name = "") {
2477 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2478 }
2479
2480 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2481 return Insert(new FreezeInst(V), Name);
2482 }
2483
2484 //===--------------------------------------------------------------------===//
2485 // Utility creation methods
2486 //===--------------------------------------------------------------------===//
2487
2488 /// Return an i1 value testing if \p Arg is null.
2489 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2490 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2491 Name);
2492 }
2493
2494 /// Return an i1 value testing if \p Arg is not null.
2495 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2496 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2497 Name);
2498 }
2499
2500 /// Return the i64 difference between two pointer values, dividing out
2501 /// the size of the pointed-to objects.
2502 ///
2503 /// This is intended to implement C-style pointer subtraction. As such, the
2504 /// pointers must be appropriately aligned for their element types and
2505 /// pointing into the same object.
2506 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2507
2508 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2509 /// different from pointer to i8, it's casted to pointer to i8 in the same
2510 /// address space before call and casted back to Ptr type after call.
2511 Value *CreateLaunderInvariantGroup(Value *Ptr);
2512
2513 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2514 /// different from pointer to i8, it's casted to pointer to i8 in the same
2515 /// address space before call and casted back to Ptr type after call.
2516 Value *CreateStripInvariantGroup(Value *Ptr);
2517
2518 /// Return a vector value that contains the vector V reversed
2519 Value *CreateVectorReverse(Value *V, const Twine &Name = "");
2520
2521 /// Return a vector splice intrinsic if using scalable vectors, otherwise
2522 /// return a shufflevector. If the immediate is positive, a vector is
2523 /// extracted from concat(V1, V2), starting at Imm. If the immediate
2524 /// is negative, we extract -Imm elements from V1 and the remaining
2525 /// elements from V2. Imm is a signed integer in the range
2526 /// -VL <= Imm < VL (where VL is the runtime vector length of the
2527 /// source/result vector)
2528 Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
2529 const Twine &Name = "");
2530
2531 /// Return a vector value that contains \arg V broadcasted to \p
2532 /// NumElts elements.
2533 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2534
2535 /// Return a vector value that contains \arg V broadcasted to \p
2536 /// EC elements.
2537 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2538
2539 /// Return a value that has been extracted from a larger integer type.
2540 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2541 IntegerType *ExtractedTy, uint64_t Offset,
2542 const Twine &Name);
2543
2544 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2545 unsigned Dimension, unsigned LastIndex,
2546 MDNode *DbgInfo);
2547
2548 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2549 MDNode *DbgInfo);
2550
2551 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2552 unsigned Index, unsigned FieldIndex,
2553 MDNode *DbgInfo);
2554
2555private:
2556 /// Helper function that creates an assume intrinsic call that
2557 /// represents an alignment assumption on the provided pointer \p PtrValue
2558 /// with offset \p OffsetValue and alignment value \p AlignValue.
2559 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2560 Value *PtrValue, Value *AlignValue,
2561 Value *OffsetValue);
2562
2563public:
2564 /// Create an assume intrinsic call that represents an alignment
2565 /// assumption on the provided pointer.
2566 ///
2567 /// An optional offset can be provided, and if it is provided, the offset
2568 /// must be subtracted from the provided pointer to get the pointer with the
2569 /// specified alignment.
2570 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2571 unsigned Alignment,
2572 Value *OffsetValue = nullptr);
2573
2574 /// Create an assume intrinsic call that represents an alignment
2575 /// assumption on the provided pointer.
2576 ///
2577 /// An optional offset can be provided, and if it is provided, the offset
2578 /// must be subtracted from the provided pointer to get the pointer with the
2579 /// specified alignment.
2580 ///
2581 /// This overload handles the condition where the Alignment is dependent
2582 /// on an existing value rather than a static value.
2583 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2584 Value *Alignment,
2585 Value *OffsetValue = nullptr);
2586};
2587
2588/// This provides a uniform API for creating instructions and inserting
2589/// them into a basic block: either at the end of a BasicBlock, or at a specific
2590/// iterator location in a block.
2591///
2592/// Note that the builder does not expose the full generality of LLVM
2593/// instructions. For access to extra instruction properties, use the mutators
2594/// (e.g. setVolatile) on the instructions after they have been
2595/// created. Convenience state exists to specify fast-math flags and fp-math
2596/// tags.
2597///
2598/// The first template argument specifies a class to use for creating constants.
2599/// This defaults to creating minimally folded constants. The second template
2600/// argument allows clients to specify custom insertion hooks that are called on
2601/// every newly created insertion.
2602template <typename FolderTy = ConstantFolder,
2603 typename InserterTy = IRBuilderDefaultInserter>
2604class IRBuilder : public IRBuilderBase {
2605private:
2606 FolderTy Folder;
2607 InserterTy Inserter;
2608
2609public:
2610 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2611 MDNode *FPMathTag = nullptr,
2612 ArrayRef<OperandBundleDef> OpBundles = None)
2613 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2614 Folder(Folder), Inserter(Inserter) {}
2615
2616 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2617 ArrayRef<OperandBundleDef> OpBundles = None)
2618 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2619
2620 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2621 MDNode *FPMathTag = nullptr,
2622 ArrayRef<OperandBundleDef> OpBundles = None)
2623 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2624 FPMathTag, OpBundles), Folder(Folder) {
2625 SetInsertPoint(TheBB);
2626 }
2627
2628 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2629 ArrayRef<OperandBundleDef> OpBundles = None)
2630 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2631 FPMathTag, OpBundles) {
2632 SetInsertPoint(TheBB);
2633 }
2634
2635 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2636 ArrayRef<OperandBundleDef> OpBundles = None)
2637 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
22
Called C++ object pointer is null
2638 FPMathTag, OpBundles) {
2639 SetInsertPoint(IP);
2640 }
2641
2642 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2643 MDNode *FPMathTag = nullptr,
2644 ArrayRef<OperandBundleDef> OpBundles = None)
2645 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2646 FPMathTag, OpBundles), Folder(Folder) {
2647 SetInsertPoint(TheBB, IP);
2648 }
2649
2650 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2651 MDNode *FPMathTag = nullptr,
2652 ArrayRef<OperandBundleDef> OpBundles = None)
2653 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2654 FPMathTag, OpBundles) {
2655 SetInsertPoint(TheBB, IP);
2656 }
2657
2658 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2659 /// or FastMathFlagGuard instead.
2660 IRBuilder(const IRBuilder &) = delete;
2661
2662 InserterTy &getInserter() { return Inserter; }
2663};
2664
2665// Create wrappers for C Binding types (see CBindingWrapping.h).
2666DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2667
2668} // end namespace llvm
2669
2670#endif // LLVM_IR_IRBUILDER_H