Bug Summary

File:llvm/include/llvm/IR/IRBuilder.h
Warning:line 2680, column 23
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-pc-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name X86LowerAMXType.cpp -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model pic -pic-level 2 -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -ffunction-sections -fdata-sections -fcoverage-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -resource-dir /usr/lib/llvm-14/lib/clang/14.0.0 -D _DEBUG -D _GNU_SOURCE -D __STDC_CONSTANT_MACROS -D __STDC_FORMAT_MACROS -D __STDC_LIMIT_MACROS -I lib/Target/X86 -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86 -I include -I /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include -D NDEBUG -U NDEBUG -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/x86_64-linux-gnu/c++/10 -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../include/c++/10/backward -internal-isystem /usr/lib/llvm-14/lib/clang/14.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/lib/gcc/x86_64-linux-gnu/10/../../../../x86_64-linux-gnu/include -internal-externc-isystem /usr/include/x86_64-linux-gnu -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-unused-command-line-argument -Wno-unknown-warning-option -Wno-unused-parameter -Wwrite-strings -Wno-missing-field-initializers -Wno-long-long -Wno-maybe-uninitialized -Wno-class-memaccess -Wno-redundant-move -Wno-pessimizing-move -Wno-noexcept-type -Wno-comment -std=c++14 -fdeprecated-macro -fdebug-compilation-dir=/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/build-llvm -ferror-limit 19 -fvisibility hidden -fvisibility-inlines-hidden -fgnuc-version=4.2.1 -fcolor-diagnostics -vectorize-loops -vectorize-slp -analyzer-output=html -analyzer-config stable-report-filename=true -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /tmp/scan-build-2021-09-26-234817-15343-1 -x c++ /build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp

/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp

1//===- Target/X86/X86LowerAMXType.cpp - -------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9/// \file Pass to transform <256 x i32> load/store
10/// <256 x i32> is bitcasted to x86_amx on X86, and AMX instruction set only
11/// provides simple operation on x86_amx. The basic elementwise operation
12/// is not supported by AMX. Since x86_amx is bitcasted from vector <256 x i32>
13/// and only AMX intrinsics can operate on the type, we need transform
14/// load/store <256 x i32> instruction to AMX load/store. If the bitcast can
15/// not be combined with load/store, we transform the bitcast to amx load/store
16/// and <256 x i32> store/load.
17///
18/// If Front End not use O0 but the Mid/Back end use O0, (e.g. "Clang -O2 -S
19/// -emit-llvm t.c" + "llc t.ll") we should make sure the amx data is volatile,
20/// because that is necessary for AMX fast register allocation. (In Fast
21/// registera allocation, register will be allocated before spill/reload, so
22/// there is no additional register for amx to identify the step in spill.)
23/// The volatileTileData() will handle this case.
24/// e.g.
25/// ----------------------------------------------------------
26/// | def %td = ... |
27/// | ... |
28/// | "use %td" |
29/// ----------------------------------------------------------
30/// will transfer to -->
31/// ----------------------------------------------------------
32/// | def %td = ... |
33/// | call void @llvm.x86.tilestored64.internal(mem, %td) |
34/// | ... |
35/// | %td2 = call x86_amx @llvm.x86.tileloadd64.internal(mem)|
36/// | "use %td2" |
37/// ----------------------------------------------------------
38//
39//===----------------------------------------------------------------------===//
40//
41#include "X86.h"
42#include "llvm/ADT/PostOrderIterator.h"
43#include "llvm/ADT/SetVector.h"
44#include "llvm/ADT/SmallSet.h"
45#include "llvm/Analysis/OptimizationRemarkEmitter.h"
46#include "llvm/Analysis/TargetLibraryInfo.h"
47#include "llvm/Analysis/TargetTransformInfo.h"
48#include "llvm/CodeGen/Passes.h"
49#include "llvm/CodeGen/TargetPassConfig.h"
50#include "llvm/CodeGen/ValueTypes.h"
51#include "llvm/IR/DataLayout.h"
52#include "llvm/IR/Function.h"
53#include "llvm/IR/IRBuilder.h"
54#include "llvm/IR/Instructions.h"
55#include "llvm/IR/IntrinsicInst.h"
56#include "llvm/IR/IntrinsicsX86.h"
57#include "llvm/IR/PatternMatch.h"
58#include "llvm/InitializePasses.h"
59#include "llvm/Pass.h"
60#include "llvm/Target/TargetMachine.h"
61#include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
62#include "llvm/Transforms/Utils/Local.h"
63
64using namespace llvm;
65using namespace PatternMatch;
66
67#define DEBUG_TYPE"lower-amx-type" "lower-amx-type"
68
69static bool isAMXCast(Instruction *II) {
70 return match(II,
71 m_Intrinsic<Intrinsic::x86_cast_vector_to_tile>(m_Value())) ||
72 match(II, m_Intrinsic<Intrinsic::x86_cast_tile_to_vector>(m_Value()));
73}
74
75static AllocaInst *createAllocaInstAtEntry(IRBuilder<> &Builder, BasicBlock *BB,
76 Type *Ty) {
77 Function &F = *BB->getParent();
78 Module *M = BB->getModule();
79 const DataLayout &DL = M->getDataLayout();
80
81 LLVMContext &Ctx = Builder.getContext();
82 auto AllocaAlignment = DL.getPrefTypeAlign(Type::getX86_AMXTy(Ctx));
83 unsigned AllocaAS = DL.getAllocaAddrSpace();
84 AllocaInst *AllocaRes =
85 new AllocaInst(Ty, AllocaAS, "", &F.getEntryBlock().front());
86 AllocaRes->setAlignment(AllocaAlignment);
87 return AllocaRes;
88}
89
90static Instruction *getFirstNonAllocaInTheEntryBlock(Function &F) {
91 for (Instruction &I : F.getEntryBlock())
92 if (!isa<AllocaInst>(&I))
93 return &I;
94 llvm_unreachable("No terminator in the entry block!")::llvm::llvm_unreachable_internal("No terminator in the entry block!"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 94)
;
95}
96
97static std::pair<Value *, Value *> getShape(IntrinsicInst *II, unsigned OpNo) {
98 IRBuilder<> Builder(II);
99 Value *Row = nullptr, *Col = nullptr;
100 switch (II->getIntrinsicID()) {
101 default:
102 llvm_unreachable("Expect amx intrinsics")::llvm::llvm_unreachable_internal("Expect amx intrinsics", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 102)
;
103 case Intrinsic::x86_tileloadd64_internal:
104 case Intrinsic::x86_tileloaddt164_internal:
105 case Intrinsic::x86_tilestored64_internal: {
106 Row = II->getArgOperand(0);
107 Col = II->getArgOperand(1);
108 break;
109 }
110 // a * b + c
111 // The shape depends on which operand.
112 case Intrinsic::x86_tdpbssd_internal:
113 case Intrinsic::x86_tdpbsud_internal:
114 case Intrinsic::x86_tdpbusd_internal:
115 case Intrinsic::x86_tdpbuud_internal:
116 case Intrinsic::x86_tdpbf16ps_internal: {
117 switch (OpNo) {
118 case 3:
119 Row = II->getArgOperand(0);
120 Col = II->getArgOperand(1);
121 break;
122 case 4:
123 Row = II->getArgOperand(0);
124 Col = II->getArgOperand(2);
125 break;
126 case 5:
127 if (isa<ConstantInt>(II->getArgOperand(2)))
128 Row = Builder.getInt16(
129 (cast<ConstantInt>(II->getOperand(2))->getSExtValue()) / 4);
130 else if (isa<Instruction>(II->getArgOperand(2))) {
131 // When it is not a const value and it is not a function argument, we
132 // create Row after the definition of II->getOperand(2) instead of
133 // before II. For example, II is %118, we try to getshape for %117:
134 // %117 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x
135 // i32> %115).
136 // %118 = call x86_amx @llvm.x86.tdpbf16ps.internal(i16
137 // %104, i16 %105, i16 %106, x86_amx %110, x86_amx %114, x86_amx
138 // %117).
139 // If we create %row = udiv i16 %106, 4 before %118(aka. II), then its
140 // definition is after its user(new tileload for %117).
141 // So, the best choice is to create %row right after the definition of
142 // %106.
143 Builder.SetInsertPoint(cast<Instruction>(II->getOperand(2)));
144 Row = Builder.CreateUDiv(II->getOperand(2), Builder.getInt16(4));
145 cast<Instruction>(Row)->moveAfter(cast<Instruction>(II->getOperand(2)));
146 } else {
147 // When it is not a const value and it is a function argument, we create
148 // Row at the entry bb.
149 IRBuilder<> NewBuilder(
150 getFirstNonAllocaInTheEntryBlock(*II->getFunction()));
151 Row = NewBuilder.CreateUDiv(II->getOperand(2), NewBuilder.getInt16(4));
152 }
153 Col = II->getArgOperand(1);
154 break;
155 }
156 break;
157 }
158 }
159
160 return std::make_pair(Row, Col);
161}
162
163namespace {
164class X86LowerAMXType {
165 Function &Func;
166
167 // In AMX intrinsics we let Shape = {Row, Col}, but the
168 // RealCol = Col / ElementSize. We may use the RealCol
169 // as a new Row for other new created AMX intrinsics.
170 std::map<Value *, Value *> Col2Row;
171
172public:
173 X86LowerAMXType(Function &F) : Func(F) {}
174 bool visit();
175 void combineLoadBitcast(LoadInst *LD, BitCastInst *Bitcast);
176 void combineBitcastStore(BitCastInst *Bitcast, StoreInst *ST);
177 bool transformBitcast(BitCastInst *Bitcast);
178};
179
180// %src = load <256 x i32>, <256 x i32>* %addr, align 64
181// %2 = bitcast <256 x i32> %src to x86_amx
182// -->
183// %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col,
184// i8* %addr, i64 %stride64)
185void X86LowerAMXType::combineLoadBitcast(LoadInst *LD, BitCastInst *Bitcast) {
186 Value *Row = nullptr, *Col = nullptr;
187 Use &U = *(Bitcast->use_begin());
188 unsigned OpNo = U.getOperandNo();
189 auto *II = cast<IntrinsicInst>(U.getUser());
190 std::tie(Row, Col) = getShape(II, OpNo);
191 IRBuilder<> Builder(Bitcast);
192 // Use the maximun column as stride.
193 Value *Stride = Builder.getInt64(64);
194 Value *I8Ptr =
195 Builder.CreateBitCast(LD->getOperand(0), Builder.getInt8PtrTy());
196 std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride};
197
198 Value *NewInst =
199 Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, None, Args);
200 Bitcast->replaceAllUsesWith(NewInst);
201}
202
203// %src = call x86_amx @llvm.x86.tileloadd64.internal(%row, %col, %addr,
204// %stride);
205// %13 = bitcast x86_amx %src to <256 x i32>
206// store <256 x i32> %13, <256 x i32>* %addr, align 64
207// -->
208// call void @llvm.x86.tilestored64.internal(%row, %col, %addr,
209// %stride64, %13)
210void X86LowerAMXType::combineBitcastStore(BitCastInst *Bitcast, StoreInst *ST) {
211
212 Value *Tile = Bitcast->getOperand(0);
213 auto *II = cast<IntrinsicInst>(Tile);
214 // Tile is output from AMX intrinsic. The first operand of the
215 // intrinsic is row, the second operand of the intrinsic is column.
216 Value *Row = II->getOperand(0);
217 Value *Col = II->getOperand(1);
218 IRBuilder<> Builder(ST);
219 // Use the maximum column as stride. It must be the same with load
220 // stride.
221 Value *Stride = Builder.getInt64(64);
222 Value *I8Ptr =
223 Builder.CreateBitCast(ST->getOperand(1), Builder.getInt8PtrTy());
224 std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Tile};
225 Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args);
226 if (Bitcast->hasOneUse())
227 return;
228 // %13 = bitcast x86_amx %src to <256 x i32>
229 // store <256 x i32> %13, <256 x i32>* %addr, align 64
230 // %add = <256 x i32> %13, <256 x i32> %src2
231 // -->
232 // %13 = bitcast x86_amx %src to <256 x i32>
233 // call void @llvm.x86.tilestored64.internal(%row, %col, %addr,
234 // %stride64, %13)
235 // %14 = load <256 x i32>, %addr
236 // %add = <256 x i32> %14, <256 x i32> %src2
237 Value *Vec = Builder.CreateLoad(Bitcast->getType(), ST->getOperand(1));
238 Bitcast->replaceAllUsesWith(Vec);
239}
240
241// transform bitcast to <store, load> instructions.
242bool X86LowerAMXType::transformBitcast(BitCastInst *Bitcast) {
243 IRBuilder<> Builder(Bitcast);
244 AllocaInst *AllocaAddr;
245 Value *I8Ptr, *Stride;
246 auto *Src = Bitcast->getOperand(0);
247
248 auto Prepare = [&](Type *MemTy) {
249 AllocaAddr = createAllocaInstAtEntry(Builder, Bitcast->getParent(), MemTy);
250 I8Ptr = Builder.CreateBitCast(AllocaAddr, Builder.getInt8PtrTy());
251 Stride = Builder.getInt64(64);
252 };
253
254 if (Bitcast->getType()->isX86_AMXTy()) {
255 // %2 = bitcast <256 x i32> %src to x86_amx
256 // -->
257 // %addr = alloca <256 x i32>, align 64
258 // store <256 x i32> %src, <256 x i32>* %addr, align 64
259 // %addr2 = bitcast <256 x i32>* to i8*
260 // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col,
261 // i8* %addr2,
262 // i64 64)
263 Use &U = *(Bitcast->use_begin());
264 unsigned OpNo = U.getOperandNo();
265 auto *II = dyn_cast<IntrinsicInst>(U.getUser());
266 if (!II)
267 return false; // May be bitcast from x86amx to <256 x i32>.
268 Prepare(Bitcast->getOperand(0)->getType());
269 Builder.CreateStore(Src, AllocaAddr);
270 // TODO we can pick an constant operand for the shape.
271 Value *Row = nullptr, *Col = nullptr;
272 std::tie(Row, Col) = getShape(II, OpNo);
273 std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride};
274 Value *NewInst = Builder.CreateIntrinsic(
275 Intrinsic::x86_tileloadd64_internal, None, Args);
276 Bitcast->replaceAllUsesWith(NewInst);
277 } else {
278 // %2 = bitcast x86_amx %src to <256 x i32>
279 // -->
280 // %addr = alloca <256 x i32>, align 64
281 // %addr2 = bitcast <256 x i32>* to i8*
282 // call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col,
283 // i8* %addr2, i64 %stride)
284 // %2 = load <256 x i32>, <256 x i32>* %addr, align 64
285 auto *II = dyn_cast<IntrinsicInst>(Src);
286 if (!II)
287 return false; // May be bitcast from <256 x i32> to x86amx.
288 Prepare(Bitcast->getType());
289 Value *Row = II->getOperand(0);
290 Value *Col = II->getOperand(1);
291 std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Src};
292 Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args);
293 Value *NewInst = Builder.CreateLoad(Bitcast->getType(), AllocaAddr);
294 Bitcast->replaceAllUsesWith(NewInst);
295 }
296
297 return true;
298}
299
300bool X86LowerAMXType::visit() {
301 SmallVector<Instruction *, 8> DeadInsts;
302 Col2Row.clear();
303
304 for (BasicBlock *BB : post_order(&Func)) {
305 for (BasicBlock::reverse_iterator II = BB->rbegin(), IE = BB->rend();
306 II != IE;) {
307 Instruction &Inst = *II++;
308 auto *Bitcast = dyn_cast<BitCastInst>(&Inst);
309 if (!Bitcast)
310 continue;
311
312 Value *Src = Bitcast->getOperand(0);
313 if (Bitcast->getType()->isX86_AMXTy()) {
314 if (Bitcast->user_empty()) {
315 DeadInsts.push_back(Bitcast);
316 continue;
317 }
318 LoadInst *LD = dyn_cast<LoadInst>(Src);
319 if (!LD) {
320 if (transformBitcast(Bitcast))
321 DeadInsts.push_back(Bitcast);
322 continue;
323 }
324 // If load has mutli-user, duplicate a vector load.
325 // %src = load <256 x i32>, <256 x i32>* %addr, align 64
326 // %2 = bitcast <256 x i32> %src to x86_amx
327 // %add = add <256 x i32> %src, <256 x i32> %src2
328 // -->
329 // %src = load <256 x i32>, <256 x i32>* %addr, align 64
330 // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col,
331 // i8* %addr, i64 %stride64)
332 // %add = add <256 x i32> %src, <256 x i32> %src2
333
334 // If load has one user, the load will be eliminated in DAG ISel.
335 // %src = load <256 x i32>, <256 x i32>* %addr, align 64
336 // %2 = bitcast <256 x i32> %src to x86_amx
337 // -->
338 // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col,
339 // i8* %addr, i64 %stride64)
340 combineLoadBitcast(LD, Bitcast);
341 DeadInsts.push_back(Bitcast);
342 if (LD->hasOneUse())
343 DeadInsts.push_back(LD);
344 } else if (Src->getType()->isX86_AMXTy()) {
345 if (Bitcast->user_empty()) {
346 DeadInsts.push_back(Bitcast);
347 continue;
348 }
349 StoreInst *ST = nullptr;
350 for (auto UI = Bitcast->use_begin(), UE = Bitcast->use_end();
351 UI != UE;) {
352 Value *I = (UI++)->getUser();
353 ST = dyn_cast<StoreInst>(I);
354 if (ST)
355 break;
356 }
357 if (!ST) {
358 if (transformBitcast(Bitcast))
359 DeadInsts.push_back(Bitcast);
360 continue;
361 }
362 // If bitcast (%13) has one use, combine bitcast and store to amx store.
363 // %src = call x86_amx @llvm.x86.tileloadd64.internal(%row, %col, %addr,
364 // %stride);
365 // %13 = bitcast x86_amx %src to <256 x i32>
366 // store <256 x i32> %13, <256 x i32>* %addr, align 64
367 // -->
368 // call void @llvm.x86.tilestored64.internal(%row, %col, %addr,
369 // %stride64, %13)
370 //
371 // If bitcast (%13) has multi-use, transform as below.
372 // %13 = bitcast x86_amx %src to <256 x i32>
373 // store <256 x i32> %13, <256 x i32>* %addr, align 64
374 // %add = <256 x i32> %13, <256 x i32> %src2
375 // -->
376 // %13 = bitcast x86_amx %src to <256 x i32>
377 // call void @llvm.x86.tilestored64.internal(%row, %col, %addr,
378 // %stride64, %13)
379 // %14 = load <256 x i32>, %addr
380 // %add = <256 x i32> %14, <256 x i32> %src2
381 //
382 combineBitcastStore(Bitcast, ST);
383 // Delete user first.
384 DeadInsts.push_back(ST);
385 DeadInsts.push_back(Bitcast);
386 }
387 }
388 }
389
390 bool C = !DeadInsts.empty();
391
392 for (auto *Inst : DeadInsts)
393 Inst->eraseFromParent();
394
395 return C;
396}
397} // anonymous namespace
398
399static Value *getAllocaPos(BasicBlock *BB) {
400 Module *M = BB->getModule();
401 Function *F = BB->getParent();
402 IRBuilder<> Builder(&F->getEntryBlock().front());
403 const DataLayout &DL = M->getDataLayout();
404 unsigned AllocaAS = DL.getAllocaAddrSpace();
405 Type *V256I32Ty = VectorType::get(Builder.getInt32Ty(), 256, false);
406 AllocaInst *AllocaRes =
407 new AllocaInst(V256I32Ty, AllocaAS, "", &F->getEntryBlock().front());
408 BasicBlock::iterator Iter = AllocaRes->getIterator();
409 ++Iter;
410 Builder.SetInsertPoint(&*Iter);
411 Value *I8Ptr = Builder.CreateBitCast(AllocaRes, Builder.getInt8PtrTy());
412 return I8Ptr;
413}
414
415static Instruction *createTileStore(Instruction *TileDef, Value *Ptr) {
416 assert(TileDef->getType()->isX86_AMXTy() && "Not define tile!")(static_cast <bool> (TileDef->getType()->isX86_AMXTy
() && "Not define tile!") ? void (0) : __assert_fail (
"TileDef->getType()->isX86_AMXTy() && \"Not define tile!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 416, __extension__ __PRETTY_FUNCTION__))
;
417 auto *II = cast<IntrinsicInst>(TileDef);
418 assert(II && "Not tile intrinsic!")(static_cast <bool> (II && "Not tile intrinsic!"
) ? void (0) : __assert_fail ("II && \"Not tile intrinsic!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 418, __extension__ __PRETTY_FUNCTION__))
;
419 Value *Row = II->getOperand(0);
420 Value *Col = II->getOperand(1);
421
422 BasicBlock *BB = TileDef->getParent();
423 BasicBlock::iterator Iter = TileDef->getIterator();
424 IRBuilder<> Builder(BB, ++Iter);
425 Value *Stride = Builder.getInt64(64);
426 std::array<Value *, 5> Args = {Row, Col, Ptr, Stride, TileDef};
427
428 Instruction *TileStore =
429 Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args);
430 return TileStore;
431}
432
433static void replaceWithTileLoad(Use &U, Value *Ptr, bool IsPHI = false) {
434 Value *V = U.get();
435 assert(V->getType()->isX86_AMXTy() && "Not define tile!")(static_cast <bool> (V->getType()->isX86_AMXTy() &&
"Not define tile!") ? void (0) : __assert_fail ("V->getType()->isX86_AMXTy() && \"Not define tile!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 435, __extension__ __PRETTY_FUNCTION__))
;
14
'?' condition is true
436
437 // Get tile shape.
438 IntrinsicInst *II = nullptr;
439 if (IsPHI
14.1
'IsPHI' is true
14.1
'IsPHI' is true
) {
15
Taking true branch
440 Value *PhiOp = dyn_cast<PHINode>(V)->getIncomingValue(0);
16
Assuming 'V' is a 'PHINode'
441 II = cast<IntrinsicInst>(PhiOp);
17
'PhiOp' is a 'IntrinsicInst'
442 } else {
443 II = cast<IntrinsicInst>(V);
444 }
445 Value *Row = II->getOperand(0);
446 Value *Col = II->getOperand(1);
447
448 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
18
Assuming the object is not a 'Instruction'
19
'UserI' initialized to a null pointer value
449 IRBuilder<> Builder(UserI);
20
Passing null pointer value via 1st parameter 'IP'
21
Calling constructor for 'IRBuilder<llvm::ConstantFolder, llvm::IRBuilderDefaultInserter>'
450 Value *Stride = Builder.getInt64(64);
451 std::array<Value *, 4> Args = {Row, Col, Ptr, Stride};
452
453 Value *TileLoad =
454 Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, None, Args);
455 UserI->replaceUsesOfWith(V, TileLoad);
456}
457
458static bool isIncomingOfPHI(Instruction *I) {
459 for (Use &U : I->uses()) {
460 User *V = U.getUser();
461 if (isa<PHINode>(V))
462 return true;
463 }
464 return false;
465}
466
467// Let all AMX tile data become volatile data, shorten the life range
468// of each tile register before fast register allocation.
469namespace {
470class X86VolatileTileData {
471 Function &F;
472
473public:
474 X86VolatileTileData(Function &Func) : F(Func) {}
475 Value *updatePhiIncomings(BasicBlock *BB,
476 SmallVector<Instruction *, 2> &Incomings);
477 void replacePhiDefWithLoad(Instruction *PHI, Value *StorePtr);
478 bool volatileTileData();
479 void volatileTilePHI(PHINode *Inst);
480 void volatileTileNonPHI(Instruction *I);
481};
482
483Value *X86VolatileTileData::updatePhiIncomings(
484 BasicBlock *BB, SmallVector<Instruction *, 2> &Incomings) {
485 Value *I8Ptr = getAllocaPos(BB);
486
487 for (auto *I : Incomings) {
488 User *Store = createTileStore(I, I8Ptr);
489
490 // All its uses (except phi) should load from stored mem.
491 for (Use &U : I->uses()) {
492 User *V = U.getUser();
493 if (isa<PHINode>(V) || V == Store)
494 continue;
495 replaceWithTileLoad(U, I8Ptr);
496 }
497 }
498 return I8Ptr;
499}
500
501void X86VolatileTileData::replacePhiDefWithLoad(Instruction *PHI,
502 Value *StorePtr) {
503 for (Use &U : PHI->uses())
504 replaceWithTileLoad(U, StorePtr, true);
13
Calling 'replaceWithTileLoad'
505 PHI->eraseFromParent();
506}
507
508// Smilar with volatileTileNonPHI, this function only handle PHI Nodes
509// and their related AMX intrinsics.
510// 1) PHI Def should change to tileload.
511// 2) PHI Incoming Values should tilestored in just after their def.
512// 3) The mem of these tileload and tilestores should be same.
513// e.g.
514// ------------------------------------------------------
515// bb_dom:
516// ...
517// br i1 %bool.cond, label %if.else, label %if.then
518//
519// if.then:
520// def %t0 = ...
521// ...
522// use %t0
523// ...
524// br label %if.end
525//
526// if.else:
527// def %t1 = ...
528// br label %if.end
529//
530// if.end:
531// %td = phi x86_amx [ %t1, %if.else ], [ %t0, %if.then ]
532// ...
533// use %td
534// ------------------------------------------------------
535// -->
536// ------------------------------------------------------
537// bb_entry:
538// %mem = alloca <256 x i32>, align 1024 *
539// ...
540// bb_dom:
541// ...
542// br i1 %bool.cond, label %if.else, label %if.then
543//
544// if.then:
545// def %t0 = ...
546// call void @llvm.x86.tilestored64.internal(mem, %t0) *
547// ...
548// %t0` = call x86_amx @llvm.x86.tileloadd64.internal(mem)*
549// use %t0` *
550// ...
551// br label %if.end
552//
553// if.else:
554// def %t1 = ...
555// call void @llvm.x86.tilestored64.internal(mem, %t1) *
556// br label %if.end
557//
558// if.end:
559// ...
560// %td = call x86_amx @llvm.x86.tileloadd64.internal(mem) *
561// use %td
562// ------------------------------------------------------
563void X86VolatileTileData::volatileTilePHI(PHINode *PHI) {
564 BasicBlock *BB = PHI->getParent();
565 SmallVector<Instruction *, 2> Incomings;
566
567 for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) {
10
Assuming 'I' is equal to 'E'
11
Loop condition is false. Execution continues on line 574
568 Value *Op = PHI->getIncomingValue(I);
569 Instruction *Inst = dyn_cast<Instruction>(Op);
570 assert(Inst && "We shouldn't fold AMX instrution!")(static_cast <bool> (Inst && "We shouldn't fold AMX instrution!"
) ? void (0) : __assert_fail ("Inst && \"We shouldn't fold AMX instrution!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 570, __extension__ __PRETTY_FUNCTION__))
;
571 Incomings.push_back(Inst);
572 }
573
574 Value *StorePtr = updatePhiIncomings(BB, Incomings);
575 replacePhiDefWithLoad(PHI, StorePtr);
12
Calling 'X86VolatileTileData::replacePhiDefWithLoad'
576}
577
578// Store the defined tile and load it before use.
579// All its users are not PHI.
580// e.g.
581// ------------------------------------------------------
582// def %td = ...
583// ...
584// "use %td"
585// ------------------------------------------------------
586// -->
587// ------------------------------------------------------
588// def %td = ...
589// call void @llvm.x86.tilestored64.internal(mem, %td)
590// ...
591// %td2 = call x86_amx @llvm.x86.tileloadd64.internal(mem)
592// "use %td2"
593// ------------------------------------------------------
594void X86VolatileTileData::volatileTileNonPHI(Instruction *I) {
595 BasicBlock *BB = I->getParent();
596 Value *I8Ptr = getAllocaPos(BB);
597 User *Store = createTileStore(I, I8Ptr);
598
599 // All its uses should load from stored mem.
600 for (Use &U : I->uses()) {
601 User *V = U.getUser();
602 assert(!isa<PHINode>(V) && "PHI Nodes should be excluded!")(static_cast <bool> (!isa<PHINode>(V) && "PHI Nodes should be excluded!"
) ? void (0) : __assert_fail ("!isa<PHINode>(V) && \"PHI Nodes should be excluded!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 602, __extension__ __PRETTY_FUNCTION__))
;
603 if (V != Store)
604 replaceWithTileLoad(U, I8Ptr);
605 }
606}
607
608// Volatile Tile Model:
609// 1) All the uses of tile data comes from tileload in time.
610// 2) All the defs of tile data tilestore into mem immediately.
611// For example:
612// --------------------------------------------------------------------------
613// %t1 = call x86_amx @llvm.x86.tileloadd64.internal(m, k, ...) key
614// %t2 = call x86_amx @llvm.x86.tileloadd64.internal(k, n, ...)
615// %t3 = call x86_amx @llvm.x86.tileloadd64.internal(m, n, ...) amx
616// %td = tail call x86_amx @llvm.x86.tdpbssd.internal(m, n, k, t1, t2, t3)
617// call void @llvm.x86.tilestored64.internal(... td) area
618// --------------------------------------------------------------------------
619// 3) No terminator, call or other amx instructions in the key amx area.
620bool X86VolatileTileData::volatileTileData() {
621 bool Changed = false;
622 for (BasicBlock &BB : F) {
623 SmallVector<Instruction *, 2> PHIInsts;
624 SmallVector<Instruction *, 8> AMXDefInsts;
625
626 for (Instruction &I : BB) {
627 if (!I.getType()->isX86_AMXTy())
628 continue;
629 if (isa<PHINode>(&I))
630 PHIInsts.push_back(&I);
631 else
632 AMXDefInsts.push_back(&I);
633 }
634
635 // First we "volatile" the non-phi related amx intrinsics.
636 for (Instruction *I : AMXDefInsts) {
6
Assuming '__begin2' is equal to '__end2'
637 if (isIncomingOfPHI(I))
638 continue;
639 volatileTileNonPHI(I);
640 Changed = true;
641 }
642
643 for (Instruction *I : PHIInsts) {
7
Assuming '__begin2' is not equal to '__end2'
644 volatileTilePHI(dyn_cast<PHINode>(I));
8
Assuming 'I' is a 'PHINode'
9
Calling 'X86VolatileTileData::volatileTilePHI'
645 Changed = true;
646 }
647 }
648 return Changed;
649}
650
651} // anonymous namespace
652
653namespace {
654
655class X86LowerAMXCast {
656 Function &Func;
657
658public:
659 X86LowerAMXCast(Function &F) : Func(F) {}
660 bool combineAMXcast(TargetLibraryInfo *TLI);
661 bool transformAMXCast(IntrinsicInst *AMXCast);
662 bool transformAllAMXCast();
663 bool optimizeAMXCastFromPhi(IntrinsicInst *CI, PHINode *PN,
664 SmallSetVector<Instruction *, 16> &DeadInst);
665};
666
667static bool DCEInstruction(Instruction *I,
668 SmallSetVector<Instruction *, 16> &WorkList,
669 const TargetLibraryInfo *TLI) {
670 if (isInstructionTriviallyDead(I, TLI)) {
671 salvageDebugInfo(*I);
672 salvageKnowledge(I);
673
674 // Null out all of the instruction's operands to see if any operand becomes
675 // dead as we go.
676 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
677 Value *OpV = I->getOperand(i);
678 I->setOperand(i, nullptr);
679
680 if (!OpV->use_empty() || I == OpV)
681 continue;
682
683 // If the operand is an instruction that became dead as we nulled out the
684 // operand, and if it is 'trivially' dead, delete it in a future loop
685 // iteration.
686 if (Instruction *OpI = dyn_cast<Instruction>(OpV)) {
687 if (isInstructionTriviallyDead(OpI, TLI)) {
688 WorkList.insert(OpI);
689 }
690 }
691 }
692 I->eraseFromParent();
693 return true;
694 }
695 return false;
696}
697
698/// This function handles following case
699///
700/// A -> B amxcast
701/// PHI
702/// B -> A amxcast
703///
704/// All the related PHI nodes can be replaced by new PHI nodes with type A.
705/// The uses of \p CI can be changed to the new PHI node corresponding to \p PN.
706bool X86LowerAMXCast::optimizeAMXCastFromPhi(
707 IntrinsicInst *CI, PHINode *PN,
708 SmallSetVector<Instruction *, 16> &DeadInst) {
709 IRBuilder<> Builder(CI);
710 Value *Src = CI->getOperand(0);
711 Type *SrcTy = Src->getType(); // Type B
712 Type *DestTy = CI->getType(); // Type A
713
714 SmallVector<PHINode *, 4> PhiWorklist;
715 SmallSetVector<PHINode *, 4> OldPhiNodes;
716
717 // Find all of the A->B casts and PHI nodes.
718 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so
719 // OldPhiNodes is used to track all known PHI nodes, before adding a new
720 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first.
721 PhiWorklist.push_back(PN);
722 OldPhiNodes.insert(PN);
723 while (!PhiWorklist.empty()) {
724 auto *OldPN = PhiWorklist.pop_back_val();
725 for (Value *IncValue : OldPN->incoming_values()) {
726 // TODO: currently, We ignore cases where it is a const. In the future, we
727 // might support const.
728 if (isa<Constant>(IncValue))
729 return false;
730
731 if (auto *PNode = dyn_cast<PHINode>(IncValue)) {
732 if (OldPhiNodes.insert(PNode))
733 PhiWorklist.push_back(PNode);
734 continue;
735 }
736 Instruction *ACI = dyn_cast<Instruction>(IncValue);
737 if (ACI && isAMXCast(ACI)) {
738 // Verify it's a A->B cast.
739 Type *TyA = ACI->getOperand(0)->getType();
740 Type *TyB = ACI->getType();
741 if (TyA != DestTy || TyB != SrcTy)
742 return false;
743 continue;
744 }
745 return false;
746 }
747 }
748
749 // Check that each user of each old PHI node is something that we can
750 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards.
751 for (auto *OldPN : OldPhiNodes) {
752 for (User *V : OldPN->users()) {
753 Instruction *ACI = dyn_cast<Instruction>(V);
754 if (ACI && isAMXCast(ACI)) {
755 // Verify it's a B->A cast.
756 Type *TyB = ACI->getOperand(0)->getType();
757 Type *TyA = ACI->getType();
758 if (TyA != DestTy || TyB != SrcTy)
759 return false;
760 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
761 // As long as the user is another old PHI node, then even if we don't
762 // rewrite it, the PHI web we're considering won't have any users
763 // outside itself, so it'll be dead.
764 // example:
765 // bb.0:
766 // %0 = amxcast ...
767 // bb.1:
768 // %1 = amxcast ...
769 // bb.2:
770 // %goodphi = phi %0, %1
771 // %3 = amxcast %goodphi
772 // bb.3:
773 // %goodphi2 = phi %0, %goodphi
774 // %4 = amxcast %goodphi2
775 // When optimizeAMXCastFromPhi process %3 and %goodphi, %goodphi2 is
776 // outside the phi-web, so the combination stop When
777 // optimizeAMXCastFromPhi process %4 and %goodphi2, the optimization
778 // will be done.
779 if (OldPhiNodes.count(PHI) == 0)
780 return false;
781 } else
782 return false;
783 }
784 }
785
786 // For each old PHI node, create a corresponding new PHI node with a type A.
787 SmallDenseMap<PHINode *, PHINode *> NewPNodes;
788 for (auto *OldPN : OldPhiNodes) {
789 Builder.SetInsertPoint(OldPN);
790 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
791 NewPNodes[OldPN] = NewPN;
792 }
793
794 // Fill in the operands of new PHI nodes.
795 for (auto *OldPN : OldPhiNodes) {
796 PHINode *NewPN = NewPNodes[OldPN];
797 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
798 Value *V = OldPN->getOperand(j);
799 Value *NewV = nullptr;
800 Instruction *ACI = dyn_cast<Instruction>(V);
801 // There should not be a AMXcast from a const.
802 if (ACI && isAMXCast(ACI))
803 NewV = ACI->getOperand(0);
804 else if (auto *PrevPN = dyn_cast<PHINode>(V))
805 NewV = NewPNodes[PrevPN];
806 assert(NewV)(static_cast <bool> (NewV) ? void (0) : __assert_fail (
"NewV", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 806, __extension__ __PRETTY_FUNCTION__))
;
807 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j));
808 }
809 }
810
811 // Traverse all accumulated PHI nodes and process its users,
812 // which are Stores and BitcCasts. Without this processing
813 // NewPHI nodes could be replicated and could lead to extra
814 // moves generated after DeSSA.
815 // If there is a store with type B, change it to type A.
816
817 // Replace users of BitCast B->A with NewPHI. These will help
818 // later to get rid of a closure formed by OldPHI nodes.
819 for (auto *OldPN : OldPhiNodes) {
820 PHINode *NewPN = NewPNodes[OldPN];
821 for (User *V : make_early_inc_range(OldPN->users())) {
822 Instruction *ACI = dyn_cast<Instruction>(V);
823 if (ACI && isAMXCast(ACI)) {
824 Type *TyB = ACI->getOperand(0)->getType();
825 Type *TyA = ACI->getType();
826 assert(TyA == DestTy && TyB == SrcTy)(static_cast <bool> (TyA == DestTy && TyB == SrcTy
) ? void (0) : __assert_fail ("TyA == DestTy && TyB == SrcTy"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 826, __extension__ __PRETTY_FUNCTION__))
;
827 (void)TyA;
828 (void)TyB;
829 ACI->replaceAllUsesWith(NewPN);
830 DeadInst.insert(ACI);
831 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
832 // We don't need to push PHINode into DeadInst since they are operands
833 // of rootPN DCE can safely delete rootPN's operands if rootPN is dead.
834 assert(OldPhiNodes.contains(PHI))(static_cast <bool> (OldPhiNodes.contains(PHI)) ? void (
0) : __assert_fail ("OldPhiNodes.contains(PHI)", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 834, __extension__ __PRETTY_FUNCTION__))
;
835 (void)PHI;
836 } else
837 llvm_unreachable("all uses should be handled")::llvm::llvm_unreachable_internal("all uses should be handled"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/lib/Target/X86/X86LowerAMXType.cpp"
, 837)
;
838 }
839 }
840 return true;
841}
842
843bool X86LowerAMXCast::combineAMXcast(TargetLibraryInfo *TLI) {
844 bool Change = false;
845 // Collect tile cast instruction.
846 SmallVector<Instruction *, 8> Vec2TileInsts;
847 SmallVector<Instruction *, 8> Tile2VecInsts;
848 SmallVector<Instruction *, 8> PhiCastWorkList;
849 SmallSetVector<Instruction *, 16> DeadInst;
850 for (BasicBlock &BB : Func) {
851 for (Instruction &I : BB) {
852 Value *Vec;
853 if (match(&I,
854 m_Intrinsic<Intrinsic::x86_cast_vector_to_tile>(m_Value(Vec))))
855 Vec2TileInsts.push_back(&I);
856 else if (match(&I, m_Intrinsic<Intrinsic::x86_cast_tile_to_vector>(
857 m_Value(Vec))))
858 Tile2VecInsts.push_back(&I);
859 }
860 }
861
862 auto Convert = [&](SmallVectorImpl<Instruction *> &Insts, Intrinsic::ID IID) {
863 for (auto *Inst : Insts) {
864 for (User *U : Inst->users()) {
865 IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
866 if (!II || II->getIntrinsicID() != IID)
867 continue;
868 // T1 = vec2tile V0
869 // V2 = tile2vec T1
870 // V3 = OP V2
871 // -->
872 // T1 = vec2tile V0
873 // V2 = tile2vec T1
874 // V3 = OP V0
875 II->replaceAllUsesWith(Inst->getOperand(0));
876 Change = true;
877 }
878 }
879 };
880
881 Convert(Vec2TileInsts, Intrinsic::x86_cast_tile_to_vector);
882 Convert(Tile2VecInsts, Intrinsic::x86_cast_vector_to_tile);
883
884 auto EraseInst = [&](SmallVectorImpl<Instruction *> &Insts) {
885 for (auto *Inst : Insts) {
886 if (Inst->use_empty()) {
887 Inst->eraseFromParent();
888 Change = true;
889 }
890 }
891 };
892
893 EraseInst(Vec2TileInsts);
894 EraseInst(Tile2VecInsts);
895
896 // Handle the A->B->A cast, and there is an intervening PHI node.
897 for (BasicBlock &BB : Func) {
898 for (Instruction &I : BB) {
899 if (isAMXCast(&I)) {
900 if (isa<PHINode>(I.getOperand(0)))
901 PhiCastWorkList.push_back(&I);
902 }
903 }
904 }
905 for (auto *I : PhiCastWorkList) {
906 // We skip the dead Amxcast.
907 if (DeadInst.contains(I))
908 continue;
909 PHINode *PN = cast<PHINode>(I->getOperand(0));
910 if (optimizeAMXCastFromPhi(cast<IntrinsicInst>(I), PN, DeadInst)) {
911 DeadInst.insert(PN);
912 Change = true;
913 }
914 }
915
916 // Since we create new phi and merge AMXCast, some old phis and AMXCast might
917 // have no uses. We do some DeadCodeElimination for them.
918 while (!DeadInst.empty()) {
919 Instruction *I = DeadInst.pop_back_val();
920 Change |= DCEInstruction(I, DeadInst, TLI);
921 }
922 return Change;
923}
924
925// There might be remaining AMXcast after combineAMXcast and they should be
926// handled elegantly.
927bool X86LowerAMXCast::transformAMXCast(IntrinsicInst *AMXCast) {
928 IRBuilder<> Builder(AMXCast);
929 AllocaInst *AllocaAddr;
930 Value *I8Ptr, *Stride;
931 auto *Src = AMXCast->getOperand(0);
932
933 auto Prepare = [&](Type *MemTy) {
934 AllocaAddr = createAllocaInstAtEntry(Builder, AMXCast->getParent(), MemTy);
935 I8Ptr = Builder.CreateBitCast(AllocaAddr, Builder.getInt8PtrTy());
936 Stride = Builder.getInt64(64);
937 };
938
939 if (AMXCast->getType()->isX86_AMXTy()) {
940 // %2 = amxcast <225 x i32> %src to x86_amx
941 // call void @llvm.x86.tilestored64.internal(i16 15, i16 60,
942 // i8* %addr3, i64 60, x86_amx %2)
943 // -->
944 // %addr = alloca <225 x i32>, align 64
945 // store <225 x i32> %src, <225 x i32>* %addr, align 64
946 // %addr2 = bitcast <225 x i32>* %addr to i8*
947 // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 15, i16 60,
948 // i8* %addr2,
949 // i64 60)
950 // call void @llvm.x86.tilestored64.internal(i16 15, i16 60,
951 // i8* %addr3, i64 60, x86_amx %2)
952 Use &U = *(AMXCast->use_begin());
953 unsigned OpNo = U.getOperandNo();
954 auto *II = dyn_cast<IntrinsicInst>(U.getUser());
955 if (!II)
956 return false; // May be bitcast from x86amx to <256 x i32>.
957 Prepare(AMXCast->getOperand(0)->getType());
958 Builder.CreateStore(Src, AllocaAddr);
959 // TODO we can pick an constant operand for the shape.
960 Value *Row = nullptr, *Col = nullptr;
961 std::tie(Row, Col) = getShape(II, OpNo);
962 std::array<Value *, 4> Args = {
963 Row, Col, I8Ptr, Builder.CreateSExt(Col, Builder.getInt64Ty())};
964 Value *NewInst = Builder.CreateIntrinsic(
965 Intrinsic::x86_tileloadd64_internal, None, Args);
966 AMXCast->replaceAllUsesWith(NewInst);
967 AMXCast->eraseFromParent();
968 } else {
969 // %2 = amxcast x86_amx %src to <225 x i32>
970 // -->
971 // %addr = alloca <225 x i32>, align 64
972 // %addr2 = bitcast <225 x i32>* to i8*
973 // call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col,
974 // i8* %addr2, i64 %stride)
975 // %2 = load <225 x i32>, <225 x i32>* %addr, align 64
976 auto *II = dyn_cast<IntrinsicInst>(Src);
977 if (!II)
978 return false; // May be bitcast from <256 x i32> to x86amx.
979 Prepare(AMXCast->getType());
980 Value *Row = II->getOperand(0);
981 Value *Col = II->getOperand(1);
982 std::array<Value *, 5> Args = {
983 Row, Col, I8Ptr, Builder.CreateSExt(Col, Builder.getInt64Ty()), Src};
984 Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args);
985 Value *NewInst = Builder.CreateLoad(AMXCast->getType(), AllocaAddr);
986 AMXCast->replaceAllUsesWith(NewInst);
987 AMXCast->eraseFromParent();
988 }
989
990 return true;
991}
992
993bool X86LowerAMXCast::transformAllAMXCast() {
994 bool Change = false;
995 // Collect tile cast instruction.
996 SmallVector<Instruction *, 8> WorkLists;
997 for (BasicBlock &BB : Func) {
998 for (Instruction &I : BB) {
999 if (isAMXCast(&I))
1000 WorkLists.push_back(&I);
1001 }
1002 }
1003
1004 for (auto *Inst : WorkLists) {
1005 Change |= transformAMXCast(cast<IntrinsicInst>(Inst));
1006 }
1007
1008 return Change;
1009}
1010
1011} // anonymous namespace
1012
1013namespace {
1014
1015class X86LowerAMXTypeLegacyPass : public FunctionPass {
1016public:
1017 static char ID;
1018
1019 X86LowerAMXTypeLegacyPass() : FunctionPass(ID) {
1020 initializeX86LowerAMXTypeLegacyPassPass(*PassRegistry::getPassRegistry());
1021 }
1022
1023 bool runOnFunction(Function &F) override {
1024 bool C = false;
1025 TargetMachine *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>();
1026 TargetLibraryInfo *TLI =
1027 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1028 X86LowerAMXCast LAC(F);
1029 C |= LAC.combineAMXcast(TLI);
1030 // There might be remaining AMXcast after combineAMXcast and they should be
1031 // handled elegantly.
1032 C |= LAC.transformAllAMXCast();
1033
1034 X86LowerAMXType LAT(F);
1035 C |= LAT.visit();
1036
1037 // Prepare for fast register allocation at O0.
1038 // Todo: May better check the volatile model of AMX code, not just
1039 // by checking Attribute::OptimizeNone and CodeGenOpt::None.
1040 if (TM->getOptLevel() == CodeGenOpt::None) {
1
Assuming the condition is true
2
Taking true branch
1041 // If Front End not use O0 but the Mid/Back end use O0, (e.g.
1042 // "Clang -O2 -S -emit-llvm t.c" + "llc t.ll") we should make
1043 // sure the amx data is volatile, that is nessary for AMX fast
1044 // register allocation.
1045 if (!F.hasFnAttribute(Attribute::OptimizeNone)) {
3
Assuming the condition is true
4
Taking true branch
1046 X86VolatileTileData VTD(F);
1047 C = VTD.volatileTileData() || C;
5
Calling 'X86VolatileTileData::volatileTileData'
1048 }
1049 }
1050
1051 return C;
1052 }
1053
1054 void getAnalysisUsage(AnalysisUsage &AU) const override {
1055 AU.setPreservesCFG();
1056 AU.addRequired<TargetPassConfig>();
1057 AU.addRequired<TargetLibraryInfoWrapperPass>();
1058 }
1059};
1060
1061} // anonymous namespace
1062
1063static const char PassName[] = "Lower AMX type for load/store";
1064char X86LowerAMXTypeLegacyPass::ID = 0;
1065INITIALIZE_PASS_BEGIN(X86LowerAMXTypeLegacyPass, DEBUG_TYPE, PassName, false,static void *initializeX86LowerAMXTypeLegacyPassPassOnce(PassRegistry
&Registry) {
1066 false)static void *initializeX86LowerAMXTypeLegacyPassPassOnce(PassRegistry
&Registry) {
1067INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry);
1068INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry);
1069INITIALIZE_PASS_END(X86LowerAMXTypeLegacyPass, DEBUG_TYPE, PassName, false,PassInfo *PI = new PassInfo( PassName, "lower-amx-type", &
X86LowerAMXTypeLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<X86LowerAMXTypeLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeX86LowerAMXTypeLegacyPassPassFlag
; void llvm::initializeX86LowerAMXTypeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeX86LowerAMXTypeLegacyPassPassFlag
, initializeX86LowerAMXTypeLegacyPassPassOnce, std::ref(Registry
)); }
1070 false)PassInfo *PI = new PassInfo( PassName, "lower-amx-type", &
X86LowerAMXTypeLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor
<X86LowerAMXTypeLegacyPass>), false, false); Registry.registerPass
(*PI, true); return PI; } static llvm::once_flag InitializeX86LowerAMXTypeLegacyPassPassFlag
; void llvm::initializeX86LowerAMXTypeLegacyPassPass(PassRegistry
&Registry) { llvm::call_once(InitializeX86LowerAMXTypeLegacyPassPassFlag
, initializeX86LowerAMXTypeLegacyPassPassOnce, std::ref(Registry
)); }
1071
1072FunctionPass *llvm::createX86LowerAMXTypePass() {
1073 return new X86LowerAMXTypeLegacyPass();
1074}

/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h

1//===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#ifndef LLVM_IR_IRBUILDER_H
15#define LLVM_IR_IRBUILDER_H
16
17#include "llvm-c/Types.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/ADT/None.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/StringRef.h"
22#include "llvm/ADT/Twine.h"
23#include "llvm/IR/BasicBlock.h"
24#include "llvm/IR/Constant.h"
25#include "llvm/IR/ConstantFolder.h"
26#include "llvm/IR/Constants.h"
27#include "llvm/IR/DataLayout.h"
28#include "llvm/IR/DebugInfoMetadata.h"
29#include "llvm/IR/DebugLoc.h"
30#include "llvm/IR/DerivedTypes.h"
31#include "llvm/IR/Function.h"
32#include "llvm/IR/GlobalVariable.h"
33#include "llvm/IR/InstrTypes.h"
34#include "llvm/IR/Instruction.h"
35#include "llvm/IR/Instructions.h"
36#include "llvm/IR/IntrinsicInst.h"
37#include "llvm/IR/LLVMContext.h"
38#include "llvm/IR/Module.h"
39#include "llvm/IR/Operator.h"
40#include "llvm/IR/Type.h"
41#include "llvm/IR/Value.h"
42#include "llvm/IR/ValueHandle.h"
43#include "llvm/Support/AtomicOrdering.h"
44#include "llvm/Support/CBindingWrapping.h"
45#include "llvm/Support/Casting.h"
46#include <cassert>
47#include <cstddef>
48#include <cstdint>
49#include <functional>
50#include <utility>
51
52namespace llvm {
53
54class APInt;
55class MDNode;
56class Use;
57
58/// This provides the default implementation of the IRBuilder
59/// 'InsertHelper' method that is called whenever an instruction is created by
60/// IRBuilder and needs to be inserted.
61///
62/// By default, this inserts the instruction at the insertion point.
63class IRBuilderDefaultInserter {
64public:
65 virtual ~IRBuilderDefaultInserter();
66
67 virtual void InsertHelper(Instruction *I, const Twine &Name,
68 BasicBlock *BB,
69 BasicBlock::iterator InsertPt) const {
70 if (BB) BB->getInstList().insert(InsertPt, I);
71 I->setName(Name);
72 }
73};
74
75/// Provides an 'InsertHelper' that calls a user-provided callback after
76/// performing the default insertion.
77class IRBuilderCallbackInserter : public IRBuilderDefaultInserter {
78 std::function<void(Instruction *)> Callback;
79
80public:
81 virtual ~IRBuilderCallbackInserter();
82
83 IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback)
84 : Callback(std::move(Callback)) {}
85
86 void InsertHelper(Instruction *I, const Twine &Name,
87 BasicBlock *BB,
88 BasicBlock::iterator InsertPt) const override {
89 IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
90 Callback(I);
91 }
92};
93
94/// Common base class shared among various IRBuilders.
95class IRBuilderBase {
96 /// Pairs of (metadata kind, MDNode *) that should be added to all newly
97 /// created instructions, like !dbg metadata.
98 SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy;
99
100 /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not
101 /// null. If \p MD is null, remove the entry with \p Kind.
102 void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) {
103 if (!MD) {
104 erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) {
105 return KV.first == Kind;
106 });
107 return;
108 }
109
110 for (auto &KV : MetadataToCopy)
111 if (KV.first == Kind) {
112 KV.second = MD;
113 return;
114 }
115
116 MetadataToCopy.emplace_back(Kind, MD);
117 }
118
119protected:
120 BasicBlock *BB;
121 BasicBlock::iterator InsertPt;
122 LLVMContext &Context;
123 const IRBuilderFolder &Folder;
124 const IRBuilderDefaultInserter &Inserter;
125
126 MDNode *DefaultFPMathTag;
127 FastMathFlags FMF;
128
129 bool IsFPConstrained;
130 fp::ExceptionBehavior DefaultConstrainedExcept;
131 RoundingMode DefaultConstrainedRounding;
132
133 ArrayRef<OperandBundleDef> DefaultOperandBundles;
134
135public:
136 IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder,
137 const IRBuilderDefaultInserter &Inserter,
138 MDNode *FPMathTag, ArrayRef<OperandBundleDef> OpBundles)
139 : Context(context), Folder(Folder), Inserter(Inserter),
140 DefaultFPMathTag(FPMathTag), IsFPConstrained(false),
141 DefaultConstrainedExcept(fp::ebStrict),
142 DefaultConstrainedRounding(RoundingMode::Dynamic),
143 DefaultOperandBundles(OpBundles) {
144 ClearInsertionPoint();
145 }
146
147 /// Insert and return the specified instruction.
148 template<typename InstTy>
149 InstTy *Insert(InstTy *I, const Twine &Name = "") const {
150 Inserter.InsertHelper(I, Name, BB, InsertPt);
151 AddMetadataToInst(I);
152 return I;
153 }
154
155 /// No-op overload to handle constants.
156 Constant *Insert(Constant *C, const Twine& = "") const {
157 return C;
158 }
159
160 Value *Insert(Value *V, const Twine &Name = "") const {
161 if (Instruction *I = dyn_cast<Instruction>(V))
162 return Insert(I, Name);
163 assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0)
: __assert_fail ("isa<Constant>(V)", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 163, __extension__ __PRETTY_FUNCTION__))
;
164 return V;
165 }
166
167 //===--------------------------------------------------------------------===//
168 // Builder configuration methods
169 //===--------------------------------------------------------------------===//
170
171 /// Clear the insertion point: created instructions will not be
172 /// inserted into a block.
173 void ClearInsertionPoint() {
174 BB = nullptr;
175 InsertPt = BasicBlock::iterator();
176 }
177
178 BasicBlock *GetInsertBlock() const { return BB; }
179 BasicBlock::iterator GetInsertPoint() const { return InsertPt; }
180 LLVMContext &getContext() const { return Context; }
181
182 /// This specifies that created instructions should be appended to the
183 /// end of the specified block.
184 void SetInsertPoint(BasicBlock *TheBB) {
185 BB = TheBB;
186 InsertPt = BB->end();
187 }
188
189 /// This specifies that created instructions should be inserted before
190 /// the specified instruction.
191 void SetInsertPoint(Instruction *I) {
192 BB = I->getParent();
193 InsertPt = I->getIterator();
194 assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() &&
"Can't read debug loc from end()") ? void (0) : __assert_fail
("InsertPt != BB->end() && \"Can't read debug loc from end()\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 194, __extension__ __PRETTY_FUNCTION__))
;
195 SetCurrentDebugLocation(I->getDebugLoc());
196 }
197
198 /// This specifies that created instructions should be inserted at the
199 /// specified point.
200 void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) {
201 BB = TheBB;
202 InsertPt = IP;
203 if (IP != TheBB->end())
204 SetCurrentDebugLocation(IP->getDebugLoc());
205 }
206
207 /// Set location information used by debugging information.
208 void SetCurrentDebugLocation(DebugLoc L) {
209 AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode());
210 }
211
212 /// Collect metadata with IDs \p MetadataKinds from \p Src which should be
213 /// added to all created instructions. Entries present in MedataDataToCopy but
214 /// not on \p Src will be dropped from MetadataToCopy.
215 void CollectMetadataToCopy(Instruction *Src,
216 ArrayRef<unsigned> MetadataKinds) {
217 for (unsigned K : MetadataKinds)
218 AddOrRemoveMetadataToCopy(K, Src->getMetadata(K));
219 }
220
221 /// Get location information used by debugging information.
222 DebugLoc getCurrentDebugLocation() const {
223 for (auto &KV : MetadataToCopy)
224 if (KV.first == LLVMContext::MD_dbg)
225 return {cast<DILocation>(KV.second)};
226
227 return {};
228 }
229
230 /// If this builder has a current debug location, set it on the
231 /// specified instruction.
232 void SetInstDebugLocation(Instruction *I) const {
233 for (const auto &KV : MetadataToCopy)
234 if (KV.first == LLVMContext::MD_dbg) {
235 I->setDebugLoc(DebugLoc(KV.second));
236 return;
237 }
238 }
239
240 /// Add all entries in MetadataToCopy to \p I.
241 void AddMetadataToInst(Instruction *I) const {
242 for (auto &KV : MetadataToCopy)
243 I->setMetadata(KV.first, KV.second);
244 }
245
246 /// Get the return type of the current function that we're emitting
247 /// into.
248 Type *getCurrentFunctionReturnType() const;
249
250 /// InsertPoint - A saved insertion point.
251 class InsertPoint {
252 BasicBlock *Block = nullptr;
253 BasicBlock::iterator Point;
254
255 public:
256 /// Creates a new insertion point which doesn't point to anything.
257 InsertPoint() = default;
258
259 /// Creates a new insertion point at the given location.
260 InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint)
261 : Block(InsertBlock), Point(InsertPoint) {}
262
263 /// Returns true if this insert point is set.
264 bool isSet() const { return (Block != nullptr); }
265
266 BasicBlock *getBlock() const { return Block; }
267 BasicBlock::iterator getPoint() const { return Point; }
268 };
269
270 /// Returns the current insert point.
271 InsertPoint saveIP() const {
272 return InsertPoint(GetInsertBlock(), GetInsertPoint());
273 }
274
275 /// Returns the current insert point, clearing it in the process.
276 InsertPoint saveAndClearIP() {
277 InsertPoint IP(GetInsertBlock(), GetInsertPoint());
278 ClearInsertionPoint();
279 return IP;
280 }
281
282 /// Sets the current insert point to a previously-saved location.
283 void restoreIP(InsertPoint IP) {
284 if (IP.isSet())
285 SetInsertPoint(IP.getBlock(), IP.getPoint());
286 else
287 ClearInsertionPoint();
288 }
289
290 /// Get the floating point math metadata being used.
291 MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; }
292
293 /// Get the flags to be applied to created floating point ops
294 FastMathFlags getFastMathFlags() const { return FMF; }
295
296 FastMathFlags &getFastMathFlags() { return FMF; }
297
298 /// Clear the fast-math flags.
299 void clearFastMathFlags() { FMF.clear(); }
300
301 /// Set the floating point math metadata to be used.
302 void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; }
303
304 /// Set the fast-math flags to be used with generated fp-math operators
305 void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; }
306
307 /// Enable/Disable use of constrained floating point math. When
308 /// enabled the CreateF<op>() calls instead create constrained
309 /// floating point intrinsic calls. Fast math flags are unaffected
310 /// by this setting.
311 void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; }
312
313 /// Query for the use of constrained floating point math
314 bool getIsFPConstrained() { return IsFPConstrained; }
315
316 /// Set the exception handling to be used with constrained floating point
317 void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) {
318#ifndef NDEBUG
319 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(NewExcept);
320 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 320, __extension__ __PRETTY_FUNCTION__))
;
321#endif
322 DefaultConstrainedExcept = NewExcept;
323 }
324
325 /// Set the rounding mode handling to be used with constrained floating point
326 void setDefaultConstrainedRounding(RoundingMode NewRounding) {
327#ifndef NDEBUG
328 Optional<StringRef> RoundingStr = RoundingModeToStr(NewRounding);
329 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 329, __extension__ __PRETTY_FUNCTION__))
;
330#endif
331 DefaultConstrainedRounding = NewRounding;
332 }
333
334 /// Get the exception handling used with constrained floating point
335 fp::ExceptionBehavior getDefaultConstrainedExcept() {
336 return DefaultConstrainedExcept;
337 }
338
339 /// Get the rounding mode handling used with constrained floating point
340 RoundingMode getDefaultConstrainedRounding() {
341 return DefaultConstrainedRounding;
342 }
343
344 void setConstrainedFPFunctionAttr() {
345 assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!"
) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 345, __extension__ __PRETTY_FUNCTION__))
;
346
347 Function *F = BB->getParent();
348 if (!F->hasFnAttribute(Attribute::StrictFP)) {
349 F->addFnAttr(Attribute::StrictFP);
350 }
351 }
352
353 void setConstrainedFPCallAttr(CallBase *I) {
354 I->addFnAttr(Attribute::StrictFP);
355 }
356
357 void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) {
358 DefaultOperandBundles = OpBundles;
359 }
360
361 //===--------------------------------------------------------------------===//
362 // RAII helpers.
363 //===--------------------------------------------------------------------===//
364
365 // RAII object that stores the current insertion point and restores it
366 // when the object is destroyed. This includes the debug location.
367 class InsertPointGuard {
368 IRBuilderBase &Builder;
369 AssertingVH<BasicBlock> Block;
370 BasicBlock::iterator Point;
371 DebugLoc DbgLoc;
372
373 public:
374 InsertPointGuard(IRBuilderBase &B)
375 : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()),
376 DbgLoc(B.getCurrentDebugLocation()) {}
377
378 InsertPointGuard(const InsertPointGuard &) = delete;
379 InsertPointGuard &operator=(const InsertPointGuard &) = delete;
380
381 ~InsertPointGuard() {
382 Builder.restoreIP(InsertPoint(Block, Point));
383 Builder.SetCurrentDebugLocation(DbgLoc);
384 }
385 };
386
387 // RAII object that stores the current fast math settings and restores
388 // them when the object is destroyed.
389 class FastMathFlagGuard {
390 IRBuilderBase &Builder;
391 FastMathFlags FMF;
392 MDNode *FPMathTag;
393 bool IsFPConstrained;
394 fp::ExceptionBehavior DefaultConstrainedExcept;
395 RoundingMode DefaultConstrainedRounding;
396
397 public:
398 FastMathFlagGuard(IRBuilderBase &B)
399 : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag),
400 IsFPConstrained(B.IsFPConstrained),
401 DefaultConstrainedExcept(B.DefaultConstrainedExcept),
402 DefaultConstrainedRounding(B.DefaultConstrainedRounding) {}
403
404 FastMathFlagGuard(const FastMathFlagGuard &) = delete;
405 FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete;
406
407 ~FastMathFlagGuard() {
408 Builder.FMF = FMF;
409 Builder.DefaultFPMathTag = FPMathTag;
410 Builder.IsFPConstrained = IsFPConstrained;
411 Builder.DefaultConstrainedExcept = DefaultConstrainedExcept;
412 Builder.DefaultConstrainedRounding = DefaultConstrainedRounding;
413 }
414 };
415
416 // RAII object that stores the current default operand bundles and restores
417 // them when the object is destroyed.
418 class OperandBundlesGuard {
419 IRBuilderBase &Builder;
420 ArrayRef<OperandBundleDef> DefaultOperandBundles;
421
422 public:
423 OperandBundlesGuard(IRBuilderBase &B)
424 : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {}
425
426 OperandBundlesGuard(const OperandBundlesGuard &) = delete;
427 OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete;
428
429 ~OperandBundlesGuard() {
430 Builder.DefaultOperandBundles = DefaultOperandBundles;
431 }
432 };
433
434
435 //===--------------------------------------------------------------------===//
436 // Miscellaneous creation methods.
437 //===--------------------------------------------------------------------===//
438
439 /// Make a new global variable with initializer type i8*
440 ///
441 /// Make a new global variable with an initializer that has array of i8 type
442 /// filled in with the null terminated string value specified. The new global
443 /// variable will be marked mergable with any others of the same contents. If
444 /// Name is specified, it is the name of the global variable created.
445 ///
446 /// If no module is given via \p M, it is take from the insertion point basic
447 /// block.
448 GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "",
449 unsigned AddressSpace = 0,
450 Module *M = nullptr);
451
452 /// Get a constant value representing either true or false.
453 ConstantInt *getInt1(bool V) {
454 return ConstantInt::get(getInt1Ty(), V);
455 }
456
457 /// Get the constant value for i1 true.
458 ConstantInt *getTrue() {
459 return ConstantInt::getTrue(Context);
460 }
461
462 /// Get the constant value for i1 false.
463 ConstantInt *getFalse() {
464 return ConstantInt::getFalse(Context);
465 }
466
467 /// Get a constant 8-bit value.
468 ConstantInt *getInt8(uint8_t C) {
469 return ConstantInt::get(getInt8Ty(), C);
470 }
471
472 /// Get a constant 16-bit value.
473 ConstantInt *getInt16(uint16_t C) {
474 return ConstantInt::get(getInt16Ty(), C);
475 }
476
477 /// Get a constant 32-bit value.
478 ConstantInt *getInt32(uint32_t C) {
479 return ConstantInt::get(getInt32Ty(), C);
480 }
481
482 /// Get a constant 64-bit value.
483 ConstantInt *getInt64(uint64_t C) {
484 return ConstantInt::get(getInt64Ty(), C);
485 }
486
487 /// Get a constant N-bit value, zero extended or truncated from
488 /// a 64-bit value.
489 ConstantInt *getIntN(unsigned N, uint64_t C) {
490 return ConstantInt::get(getIntNTy(N), C);
491 }
492
493 /// Get a constant integer value.
494 ConstantInt *getInt(const APInt &AI) {
495 return ConstantInt::get(Context, AI);
496 }
497
498 //===--------------------------------------------------------------------===//
499 // Type creation methods
500 //===--------------------------------------------------------------------===//
501
502 /// Fetch the type representing a single bit
503 IntegerType *getInt1Ty() {
504 return Type::getInt1Ty(Context);
505 }
506
507 /// Fetch the type representing an 8-bit integer.
508 IntegerType *getInt8Ty() {
509 return Type::getInt8Ty(Context);
510 }
511
512 /// Fetch the type representing a 16-bit integer.
513 IntegerType *getInt16Ty() {
514 return Type::getInt16Ty(Context);
515 }
516
517 /// Fetch the type representing a 32-bit integer.
518 IntegerType *getInt32Ty() {
519 return Type::getInt32Ty(Context);
520 }
521
522 /// Fetch the type representing a 64-bit integer.
523 IntegerType *getInt64Ty() {
524 return Type::getInt64Ty(Context);
525 }
526
527 /// Fetch the type representing a 128-bit integer.
528 IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); }
529
530 /// Fetch the type representing an N-bit integer.
531 IntegerType *getIntNTy(unsigned N) {
532 return Type::getIntNTy(Context, N);
533 }
534
535 /// Fetch the type representing a 16-bit floating point value.
536 Type *getHalfTy() {
537 return Type::getHalfTy(Context);
538 }
539
540 /// Fetch the type representing a 16-bit brain floating point value.
541 Type *getBFloatTy() {
542 return Type::getBFloatTy(Context);
543 }
544
545 /// Fetch the type representing a 32-bit floating point value.
546 Type *getFloatTy() {
547 return Type::getFloatTy(Context);
548 }
549
550 /// Fetch the type representing a 64-bit floating point value.
551 Type *getDoubleTy() {
552 return Type::getDoubleTy(Context);
553 }
554
555 /// Fetch the type representing void.
556 Type *getVoidTy() {
557 return Type::getVoidTy(Context);
558 }
559
560 /// Fetch the type representing a pointer to an 8-bit integer value.
561 PointerType *getInt8PtrTy(unsigned AddrSpace = 0) {
562 return Type::getInt8PtrTy(Context, AddrSpace);
563 }
564
565 /// Fetch the type representing a pointer to an integer value.
566 IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) {
567 return DL.getIntPtrType(Context, AddrSpace);
568 }
569
570 //===--------------------------------------------------------------------===//
571 // Intrinsic creation methods
572 //===--------------------------------------------------------------------===//
573
574 /// Create and insert a memset to the specified pointer and the
575 /// specified value.
576 ///
577 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
578 /// specified, it will be added to the instruction. Likewise with alias.scope
579 /// and noalias tags.
580 CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size,
581 MaybeAlign Align, bool isVolatile = false,
582 MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr,
583 MDNode *NoAliasTag = nullptr) {
584 return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile,
585 TBAATag, ScopeTag, NoAliasTag);
586 }
587
588 CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align,
589 bool isVolatile = false, MDNode *TBAATag = nullptr,
590 MDNode *ScopeTag = nullptr,
591 MDNode *NoAliasTag = nullptr);
592
593 /// Create and insert an element unordered-atomic memset of the region of
594 /// memory starting at the given pointer to the given value.
595 ///
596 /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is
597 /// specified, it will be added to the instruction. Likewise with alias.scope
598 /// and noalias tags.
599 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
600 uint64_t Size, Align Alignment,
601 uint32_t ElementSize,
602 MDNode *TBAATag = nullptr,
603 MDNode *ScopeTag = nullptr,
604 MDNode *NoAliasTag = nullptr) {
605 return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size),
606 Align(Alignment), ElementSize,
607 TBAATag, ScopeTag, NoAliasTag);
608 }
609
610 CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val,
611 Value *Size, Align Alignment,
612 uint32_t ElementSize,
613 MDNode *TBAATag = nullptr,
614 MDNode *ScopeTag = nullptr,
615 MDNode *NoAliasTag = nullptr);
616
617 /// Create and insert a memcpy between the specified pointers.
618 ///
619 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
620 /// specified, it will be added to the instruction. Likewise with alias.scope
621 /// and noalias tags.
622 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
623 MaybeAlign SrcAlign, uint64_t Size,
624 bool isVolatile = false, MDNode *TBAATag = nullptr,
625 MDNode *TBAAStructTag = nullptr,
626 MDNode *ScopeTag = nullptr,
627 MDNode *NoAliasTag = nullptr) {
628 return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
629 isVolatile, TBAATag, TBAAStructTag, ScopeTag,
630 NoAliasTag);
631 }
632
633 CallInst *CreateMemTransferInst(
634 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
635 MaybeAlign SrcAlign, Value *Size, bool isVolatile = false,
636 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
637 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
638
639 CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src,
640 MaybeAlign SrcAlign, Value *Size,
641 bool isVolatile = false, MDNode *TBAATag = nullptr,
642 MDNode *TBAAStructTag = nullptr,
643 MDNode *ScopeTag = nullptr,
644 MDNode *NoAliasTag = nullptr) {
645 return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src,
646 SrcAlign, Size, isVolatile, TBAATag,
647 TBAAStructTag, ScopeTag, NoAliasTag);
648 }
649
650 CallInst *
651 CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src,
652 MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false,
653 MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
654 MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
655
656 /// Create and insert an element unordered-atomic memcpy between the
657 /// specified pointers.
658 ///
659 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively.
660 ///
661 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
662 /// specified, it will be added to the instruction. Likewise with alias.scope
663 /// and noalias tags.
664 CallInst *CreateElementUnorderedAtomicMemCpy(
665 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
666 uint32_t ElementSize, MDNode *TBAATag = nullptr,
667 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
668 MDNode *NoAliasTag = nullptr);
669
670 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
671 MaybeAlign SrcAlign, uint64_t Size,
672 bool isVolatile = false, MDNode *TBAATag = nullptr,
673 MDNode *ScopeTag = nullptr,
674 MDNode *NoAliasTag = nullptr) {
675 return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size),
676 isVolatile, TBAATag, ScopeTag, NoAliasTag);
677 }
678
679 CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src,
680 MaybeAlign SrcAlign, Value *Size,
681 bool isVolatile = false, MDNode *TBAATag = nullptr,
682 MDNode *ScopeTag = nullptr,
683 MDNode *NoAliasTag = nullptr);
684
685 /// \brief Create and insert an element unordered-atomic memmove between the
686 /// specified pointers.
687 ///
688 /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers,
689 /// respectively.
690 ///
691 /// If the pointers aren't i8*, they will be converted. If a TBAA tag is
692 /// specified, it will be added to the instruction. Likewise with alias.scope
693 /// and noalias tags.
694 CallInst *CreateElementUnorderedAtomicMemMove(
695 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
696 uint32_t ElementSize, MDNode *TBAATag = nullptr,
697 MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr,
698 MDNode *NoAliasTag = nullptr);
699
700 /// Create a sequential vector fadd reduction intrinsic of the source vector.
701 /// The first parameter is a scalar accumulator value. An unordered reduction
702 /// can be created by adding the reassoc fast-math flag to the resulting
703 /// sequential reduction.
704 CallInst *CreateFAddReduce(Value *Acc, Value *Src);
705
706 /// Create a sequential vector fmul reduction intrinsic of the source vector.
707 /// The first parameter is a scalar accumulator value. An unordered reduction
708 /// can be created by adding the reassoc fast-math flag to the resulting
709 /// sequential reduction.
710 CallInst *CreateFMulReduce(Value *Acc, Value *Src);
711
712 /// Create a vector int add reduction intrinsic of the source vector.
713 CallInst *CreateAddReduce(Value *Src);
714
715 /// Create a vector int mul reduction intrinsic of the source vector.
716 CallInst *CreateMulReduce(Value *Src);
717
718 /// Create a vector int AND reduction intrinsic of the source vector.
719 CallInst *CreateAndReduce(Value *Src);
720
721 /// Create a vector int OR reduction intrinsic of the source vector.
722 CallInst *CreateOrReduce(Value *Src);
723
724 /// Create a vector int XOR reduction intrinsic of the source vector.
725 CallInst *CreateXorReduce(Value *Src);
726
727 /// Create a vector integer max reduction intrinsic of the source
728 /// vector.
729 CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false);
730
731 /// Create a vector integer min reduction intrinsic of the source
732 /// vector.
733 CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false);
734
735 /// Create a vector float max reduction intrinsic of the source
736 /// vector.
737 CallInst *CreateFPMaxReduce(Value *Src);
738
739 /// Create a vector float min reduction intrinsic of the source
740 /// vector.
741 CallInst *CreateFPMinReduce(Value *Src);
742
743 /// Create a lifetime.start intrinsic.
744 ///
745 /// If the pointer isn't i8* it will be converted.
746 CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr);
747
748 /// Create a lifetime.end intrinsic.
749 ///
750 /// If the pointer isn't i8* it will be converted.
751 CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr);
752
753 /// Create a call to invariant.start intrinsic.
754 ///
755 /// If the pointer isn't i8* it will be converted.
756 CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr);
757
758 /// Create a call to Masked Load intrinsic
759 CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask,
760 Value *PassThru = nullptr, const Twine &Name = "");
761
762 /// Create a call to Masked Store intrinsic
763 CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment,
764 Value *Mask);
765
766 /// Create a call to Masked Gather intrinsic
767 CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment,
768 Value *Mask = nullptr, Value *PassThru = nullptr,
769 const Twine &Name = "");
770
771 /// Create a call to Masked Scatter intrinsic
772 CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment,
773 Value *Mask = nullptr);
774
775 /// Create an assume intrinsic call that allows the optimizer to
776 /// assume that the provided condition will be true.
777 ///
778 /// The optional argument \p OpBundles specifies operand bundles that are
779 /// added to the call instruction.
780 CallInst *CreateAssumption(Value *Cond,
781 ArrayRef<OperandBundleDef> OpBundles = llvm::None);
782
783 /// Create a llvm.experimental.noalias.scope.decl intrinsic call.
784 Instruction *CreateNoAliasScopeDeclaration(Value *Scope);
785 Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) {
786 return CreateNoAliasScopeDeclaration(
787 MetadataAsValue::get(Context, ScopeTag));
788 }
789
790 /// Create a call to the experimental.gc.statepoint intrinsic to
791 /// start a new statepoint sequence.
792 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
793 Value *ActualCallee,
794 ArrayRef<Value *> CallArgs,
795 Optional<ArrayRef<Value *>> DeoptArgs,
796 ArrayRef<Value *> GCArgs,
797 const Twine &Name = "");
798
799 /// Create a call to the experimental.gc.statepoint intrinsic to
800 /// start a new statepoint sequence.
801 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
802 Value *ActualCallee, uint32_t Flags,
803 ArrayRef<Value *> CallArgs,
804 Optional<ArrayRef<Use>> TransitionArgs,
805 Optional<ArrayRef<Use>> DeoptArgs,
806 ArrayRef<Value *> GCArgs,
807 const Twine &Name = "");
808
809 /// Conveninence function for the common case when CallArgs are filled
810 /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be
811 /// .get()'ed to get the Value pointer.
812 CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes,
813 Value *ActualCallee, ArrayRef<Use> CallArgs,
814 Optional<ArrayRef<Value *>> DeoptArgs,
815 ArrayRef<Value *> GCArgs,
816 const Twine &Name = "");
817
818 /// Create an invoke to the experimental.gc.statepoint intrinsic to
819 /// start a new statepoint sequence.
820 InvokeInst *
821 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
822 Value *ActualInvokee, BasicBlock *NormalDest,
823 BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs,
824 Optional<ArrayRef<Value *>> DeoptArgs,
825 ArrayRef<Value *> GCArgs, const Twine &Name = "");
826
827 /// Create an invoke to the experimental.gc.statepoint intrinsic to
828 /// start a new statepoint sequence.
829 InvokeInst *CreateGCStatepointInvoke(
830 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
831 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
832 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
833 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
834 const Twine &Name = "");
835
836 // Convenience function for the common case when CallArgs are filled in using
837 // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to
838 // get the Value *.
839 InvokeInst *
840 CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes,
841 Value *ActualInvokee, BasicBlock *NormalDest,
842 BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
843 Optional<ArrayRef<Value *>> DeoptArgs,
844 ArrayRef<Value *> GCArgs, const Twine &Name = "");
845
846 /// Create a call to the experimental.gc.result intrinsic to extract
847 /// the result from a call wrapped in a statepoint.
848 CallInst *CreateGCResult(Instruction *Statepoint,
849 Type *ResultType,
850 const Twine &Name = "");
851
852 /// Create a call to the experimental.gc.relocate intrinsics to
853 /// project the relocated value of one pointer from the statepoint.
854 CallInst *CreateGCRelocate(Instruction *Statepoint,
855 int BaseOffset,
856 int DerivedOffset,
857 Type *ResultType,
858 const Twine &Name = "");
859
860 /// Create a call to the experimental.gc.pointer.base intrinsic to get the
861 /// base pointer for the specified derived pointer.
862 CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = "");
863
864 /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get
865 /// the offset of the specified derived pointer from its base.
866 CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = "");
867
868 /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale
869 /// will be the same type as that of \p Scaling.
870 Value *CreateVScale(Constant *Scaling, const Twine &Name = "");
871
872 /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...>
873 Value *CreateStepVector(Type *DstType, const Twine &Name = "");
874
875 /// Create a call to intrinsic \p ID with 1 operand which is mangled on its
876 /// type.
877 CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
878 Instruction *FMFSource = nullptr,
879 const Twine &Name = "");
880
881 /// Create a call to intrinsic \p ID with 2 operands which is mangled on the
882 /// first type.
883 CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS,
884 Instruction *FMFSource = nullptr,
885 const Twine &Name = "");
886
887 /// Create a call to intrinsic \p ID with \p args, mangled using \p Types. If
888 /// \p FMFSource is provided, copy fast-math-flags from that instruction to
889 /// the intrinsic.
890 CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types,
891 ArrayRef<Value *> Args,
892 Instruction *FMFSource = nullptr,
893 const Twine &Name = "");
894
895 /// Create call to the minnum intrinsic.
896 CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") {
897 return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name);
898 }
899
900 /// Create call to the maxnum intrinsic.
901 CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") {
902 return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name);
903 }
904
905 /// Create call to the minimum intrinsic.
906 CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") {
907 return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name);
908 }
909
910 /// Create call to the maximum intrinsic.
911 CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") {
912 return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name);
913 }
914
915 /// Create a call to the arithmetic_fence intrinsic.
916 CallInst *CreateArithmeticFence(Value *Val, Type *DstType,
917 const Twine &Name = "") {
918 return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr,
919 Name);
920 }
921
922 /// Create a call to the experimental.vector.extract intrinsic.
923 CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx,
924 const Twine &Name = "") {
925 return CreateIntrinsic(Intrinsic::experimental_vector_extract,
926 {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr,
927 Name);
928 }
929
930 /// Create a call to the experimental.vector.insert intrinsic.
931 CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec,
932 Value *Idx, const Twine &Name = "") {
933 return CreateIntrinsic(Intrinsic::experimental_vector_insert,
934 {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx},
935 nullptr, Name);
936 }
937
938private:
939 /// Create a call to a masked intrinsic with given Id.
940 CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops,
941 ArrayRef<Type *> OverloadedTypes,
942 const Twine &Name = "");
943
944 Value *getCastedInt8PtrValue(Value *Ptr);
945
946 //===--------------------------------------------------------------------===//
947 // Instruction creation methods: Terminators
948 //===--------------------------------------------------------------------===//
949
950private:
951 /// Helper to add branch weight and unpredictable metadata onto an
952 /// instruction.
953 /// \returns The annotated instruction.
954 template <typename InstTy>
955 InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) {
956 if (Weights)
957 I->setMetadata(LLVMContext::MD_prof, Weights);
958 if (Unpredictable)
959 I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable);
960 return I;
961 }
962
963public:
964 /// Create a 'ret void' instruction.
965 ReturnInst *CreateRetVoid() {
966 return Insert(ReturnInst::Create(Context));
967 }
968
969 /// Create a 'ret <val>' instruction.
970 ReturnInst *CreateRet(Value *V) {
971 return Insert(ReturnInst::Create(Context, V));
972 }
973
974 /// Create a sequence of N insertvalue instructions,
975 /// with one Value from the retVals array each, that build a aggregate
976 /// return value one value at a time, and a ret instruction to return
977 /// the resulting aggregate value.
978 ///
979 /// This is a convenience function for code that uses aggregate return values
980 /// as a vehicle for having multiple return values.
981 ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) {
982 Value *V = UndefValue::get(getCurrentFunctionReturnType());
983 for (unsigned i = 0; i != N; ++i)
984 V = CreateInsertValue(V, retVals[i], i, "mrv");
985 return Insert(ReturnInst::Create(Context, V));
986 }
987
988 /// Create an unconditional 'br label X' instruction.
989 BranchInst *CreateBr(BasicBlock *Dest) {
990 return Insert(BranchInst::Create(Dest));
991 }
992
993 /// Create a conditional 'br Cond, TrueDest, FalseDest'
994 /// instruction.
995 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
996 MDNode *BranchWeights = nullptr,
997 MDNode *Unpredictable = nullptr) {
998 return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond),
999 BranchWeights, Unpredictable));
1000 }
1001
1002 /// Create a conditional 'br Cond, TrueDest, FalseDest'
1003 /// instruction. Copy branch meta data if available.
1004 BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False,
1005 Instruction *MDSrc) {
1006 BranchInst *Br = BranchInst::Create(True, False, Cond);
1007 if (MDSrc) {
1008 unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable,
1009 LLVMContext::MD_make_implicit, LLVMContext::MD_dbg};
1010 Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4));
1011 }
1012 return Insert(Br);
1013 }
1014
1015 /// Create a switch instruction with the specified value, default dest,
1016 /// and with a hint for the number of cases that will be added (for efficient
1017 /// allocation).
1018 SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10,
1019 MDNode *BranchWeights = nullptr,
1020 MDNode *Unpredictable = nullptr) {
1021 return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases),
1022 BranchWeights, Unpredictable));
1023 }
1024
1025 /// Create an indirect branch instruction with the specified address
1026 /// operand, with an optional hint for the number of destinations that will be
1027 /// added (for efficient allocation).
1028 IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) {
1029 return Insert(IndirectBrInst::Create(Addr, NumDests));
1030 }
1031
1032 /// Create an invoke instruction.
1033 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1034 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1035 ArrayRef<Value *> Args,
1036 ArrayRef<OperandBundleDef> OpBundles,
1037 const Twine &Name = "") {
1038 InvokeInst *II =
1039 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles);
1040 if (IsFPConstrained)
1041 setConstrainedFPCallAttr(II);
1042 return Insert(II, Name);
1043 }
1044 InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee,
1045 BasicBlock *NormalDest, BasicBlock *UnwindDest,
1046 ArrayRef<Value *> Args = None,
1047 const Twine &Name = "") {
1048 InvokeInst *II =
1049 InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args);
1050 if (IsFPConstrained)
1051 setConstrainedFPCallAttr(II);
1052 return Insert(II, Name);
1053 }
1054
1055 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1056 BasicBlock *UnwindDest, ArrayRef<Value *> Args,
1057 ArrayRef<OperandBundleDef> OpBundles,
1058 const Twine &Name = "") {
1059 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1060 NormalDest, UnwindDest, Args, OpBundles, Name);
1061 }
1062
1063 InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest,
1064 BasicBlock *UnwindDest,
1065 ArrayRef<Value *> Args = None,
1066 const Twine &Name = "") {
1067 return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(),
1068 NormalDest, UnwindDest, Args, Name);
1069 }
1070
1071 /// \brief Create a callbr instruction.
1072 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1073 BasicBlock *DefaultDest,
1074 ArrayRef<BasicBlock *> IndirectDests,
1075 ArrayRef<Value *> Args = None,
1076 const Twine &Name = "") {
1077 return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests,
1078 Args), Name);
1079 }
1080 CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee,
1081 BasicBlock *DefaultDest,
1082 ArrayRef<BasicBlock *> IndirectDests,
1083 ArrayRef<Value *> Args,
1084 ArrayRef<OperandBundleDef> OpBundles,
1085 const Twine &Name = "") {
1086 return Insert(
1087 CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args,
1088 OpBundles), Name);
1089 }
1090
1091 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1092 ArrayRef<BasicBlock *> IndirectDests,
1093 ArrayRef<Value *> Args = None,
1094 const Twine &Name = "") {
1095 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1096 DefaultDest, IndirectDests, Args, Name);
1097 }
1098 CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest,
1099 ArrayRef<BasicBlock *> IndirectDests,
1100 ArrayRef<Value *> Args,
1101 ArrayRef<OperandBundleDef> OpBundles,
1102 const Twine &Name = "") {
1103 return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(),
1104 DefaultDest, IndirectDests, Args, Name);
1105 }
1106
1107 ResumeInst *CreateResume(Value *Exn) {
1108 return Insert(ResumeInst::Create(Exn));
1109 }
1110
1111 CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad,
1112 BasicBlock *UnwindBB = nullptr) {
1113 return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB));
1114 }
1115
1116 CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB,
1117 unsigned NumHandlers,
1118 const Twine &Name = "") {
1119 return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers),
1120 Name);
1121 }
1122
1123 CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args,
1124 const Twine &Name = "") {
1125 return Insert(CatchPadInst::Create(ParentPad, Args), Name);
1126 }
1127
1128 CleanupPadInst *CreateCleanupPad(Value *ParentPad,
1129 ArrayRef<Value *> Args = None,
1130 const Twine &Name = "") {
1131 return Insert(CleanupPadInst::Create(ParentPad, Args), Name);
1132 }
1133
1134 CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) {
1135 return Insert(CatchReturnInst::Create(CatchPad, BB));
1136 }
1137
1138 UnreachableInst *CreateUnreachable() {
1139 return Insert(new UnreachableInst(Context));
1140 }
1141
1142 //===--------------------------------------------------------------------===//
1143 // Instruction creation methods: Binary Operators
1144 //===--------------------------------------------------------------------===//
1145private:
1146 BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc,
1147 Value *LHS, Value *RHS,
1148 const Twine &Name,
1149 bool HasNUW, bool HasNSW) {
1150 BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name);
1151 if (HasNUW) BO->setHasNoUnsignedWrap();
1152 if (HasNSW) BO->setHasNoSignedWrap();
1153 return BO;
1154 }
1155
1156 Instruction *setFPAttrs(Instruction *I, MDNode *FPMD,
1157 FastMathFlags FMF) const {
1158 if (!FPMD)
1159 FPMD = DefaultFPMathTag;
1160 if (FPMD)
1161 I->setMetadata(LLVMContext::MD_fpmath, FPMD);
1162 I->setFastMathFlags(FMF);
1163 return I;
1164 }
1165
1166 Value *foldConstant(Instruction::BinaryOps Opc, Value *L,
1167 Value *R, const Twine &Name) const {
1168 auto *LC = dyn_cast<Constant>(L);
1169 auto *RC = dyn_cast<Constant>(R);
1170 return (LC && RC) ? Insert(Folder.CreateBinOp(Opc, LC, RC), Name) : nullptr;
1171 }
1172
1173 Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) {
1174 RoundingMode UseRounding = DefaultConstrainedRounding;
1175
1176 if (Rounding.hasValue())
1177 UseRounding = Rounding.getValue();
1178
1179 Optional<StringRef> RoundingStr = RoundingModeToStr(UseRounding);
1180 assert(RoundingStr.hasValue() && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr.hasValue() && "Garbage strict rounding mode!"
) ? void (0) : __assert_fail ("RoundingStr.hasValue() && \"Garbage strict rounding mode!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1180, __extension__ __PRETTY_FUNCTION__))
;
1181 auto *RoundingMDS = MDString::get(Context, RoundingStr.getValue());
1182
1183 return MetadataAsValue::get(Context, RoundingMDS);
1184 }
1185
1186 Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) {
1187 fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept;
1188
1189 if (Except.hasValue())
1190 UseExcept = Except.getValue();
1191
1192 Optional<StringRef> ExceptStr = ExceptionBehaviorToStr(UseExcept);
1193 assert(ExceptStr.hasValue() && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr.hasValue() && "Garbage strict exception behavior!"
) ? void (0) : __assert_fail ("ExceptStr.hasValue() && \"Garbage strict exception behavior!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1193, __extension__ __PRETTY_FUNCTION__))
;
1194 auto *ExceptMDS = MDString::get(Context, ExceptStr.getValue());
1195
1196 return MetadataAsValue::get(Context, ExceptMDS);
1197 }
1198
1199 Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) {
1200 assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1203, __extension__ __PRETTY_FUNCTION__))
1201 Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1203, __extension__ __PRETTY_FUNCTION__))
1202 Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1203, __extension__ __PRETTY_FUNCTION__))
1203 "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) &&
Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst
::FCMP_TRUE && "Invalid constrained FP comparison predicate!"
) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1203, __extension__ __PRETTY_FUNCTION__))
;
1204
1205 StringRef PredicateStr = CmpInst::getPredicateName(Predicate);
1206 auto *PredicateMDS = MDString::get(Context, PredicateStr);
1207
1208 return MetadataAsValue::get(Context, PredicateMDS);
1209 }
1210
1211public:
1212 Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "",
1213 bool HasNUW = false, bool HasNSW = false) {
1214 if (auto *LC = dyn_cast<Constant>(LHS))
1215 if (auto *RC = dyn_cast<Constant>(RHS))
1216 return Insert(Folder.CreateAdd(LC, RC, HasNUW, HasNSW), Name);
1217 return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name,
1218 HasNUW, HasNSW);
1219 }
1220
1221 Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1222 return CreateAdd(LHS, RHS, Name, false, true);
1223 }
1224
1225 Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") {
1226 return CreateAdd(LHS, RHS, Name, true, false);
1227 }
1228
1229 Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "",
1230 bool HasNUW = false, bool HasNSW = false) {
1231 if (auto *LC = dyn_cast<Constant>(LHS))
1232 if (auto *RC = dyn_cast<Constant>(RHS))
1233 return Insert(Folder.CreateSub(LC, RC, HasNUW, HasNSW), Name);
1234 return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name,
1235 HasNUW, HasNSW);
1236 }
1237
1238 Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1239 return CreateSub(LHS, RHS, Name, false, true);
1240 }
1241
1242 Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") {
1243 return CreateSub(LHS, RHS, Name, true, false);
1244 }
1245
1246 Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "",
1247 bool HasNUW = false, bool HasNSW = false) {
1248 if (auto *LC = dyn_cast<Constant>(LHS))
1249 if (auto *RC = dyn_cast<Constant>(RHS))
1250 return Insert(Folder.CreateMul(LC, RC, HasNUW, HasNSW), Name);
1251 return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name,
1252 HasNUW, HasNSW);
1253 }
1254
1255 Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1256 return CreateMul(LHS, RHS, Name, false, true);
1257 }
1258
1259 Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") {
1260 return CreateMul(LHS, RHS, Name, true, false);
1261 }
1262
1263 Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1264 bool isExact = false) {
1265 if (auto *LC = dyn_cast<Constant>(LHS))
1266 if (auto *RC = dyn_cast<Constant>(RHS))
1267 return Insert(Folder.CreateUDiv(LC, RC, isExact), Name);
1268 if (!isExact)
1269 return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name);
1270 return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name);
1271 }
1272
1273 Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1274 return CreateUDiv(LHS, RHS, Name, true);
1275 }
1276
1277 Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "",
1278 bool isExact = false) {
1279 if (auto *LC = dyn_cast<Constant>(LHS))
1280 if (auto *RC = dyn_cast<Constant>(RHS))
1281 return Insert(Folder.CreateSDiv(LC, RC, isExact), Name);
1282 if (!isExact)
1283 return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name);
1284 return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name);
1285 }
1286
1287 Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") {
1288 return CreateSDiv(LHS, RHS, Name, true);
1289 }
1290
1291 Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") {
1292 if (Value *V = foldConstant(Instruction::URem, LHS, RHS, Name)) return V;
1293 return Insert(BinaryOperator::CreateURem(LHS, RHS), Name);
1294 }
1295
1296 Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") {
1297 if (Value *V = foldConstant(Instruction::SRem, LHS, RHS, Name)) return V;
1298 return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name);
1299 }
1300
1301 Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "",
1302 bool HasNUW = false, bool HasNSW = false) {
1303 if (auto *LC = dyn_cast<Constant>(LHS))
1304 if (auto *RC = dyn_cast<Constant>(RHS))
1305 return Insert(Folder.CreateShl(LC, RC, HasNUW, HasNSW), Name);
1306 return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name,
1307 HasNUW, HasNSW);
1308 }
1309
1310 Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "",
1311 bool HasNUW = false, bool HasNSW = false) {
1312 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1313 HasNUW, HasNSW);
1314 }
1315
1316 Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "",
1317 bool HasNUW = false, bool HasNSW = false) {
1318 return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name,
1319 HasNUW, HasNSW);
1320 }
1321
1322 Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "",
1323 bool isExact = false) {
1324 if (auto *LC = dyn_cast<Constant>(LHS))
1325 if (auto *RC = dyn_cast<Constant>(RHS))
1326 return Insert(Folder.CreateLShr(LC, RC, isExact), Name);
1327 if (!isExact)
1328 return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name);
1329 return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name);
1330 }
1331
1332 Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1333 bool isExact = false) {
1334 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1335 }
1336
1337 Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1338 bool isExact = false) {
1339 return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1340 }
1341
1342 Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "",
1343 bool isExact = false) {
1344 if (auto *LC = dyn_cast<Constant>(LHS))
1345 if (auto *RC = dyn_cast<Constant>(RHS))
1346 return Insert(Folder.CreateAShr(LC, RC, isExact), Name);
1347 if (!isExact)
1348 return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name);
1349 return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name);
1350 }
1351
1352 Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "",
1353 bool isExact = false) {
1354 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1355 }
1356
1357 Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "",
1358 bool isExact = false) {
1359 return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact);
1360 }
1361
1362 Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") {
1363 if (auto *RC = dyn_cast<Constant>(RHS)) {
1364 if (isa<ConstantInt>(RC) && cast<ConstantInt>(RC)->isMinusOne())
1365 return LHS; // LHS & -1 -> LHS
1366 if (auto *LC = dyn_cast<Constant>(LHS))
1367 return Insert(Folder.CreateAnd(LC, RC), Name);
1368 }
1369 return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name);
1370 }
1371
1372 Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1373 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1374 }
1375
1376 Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1377 return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1378 }
1379
1380 Value *CreateAnd(ArrayRef<Value*> Ops) {
1381 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1381, __extension__ __PRETTY_FUNCTION__))
;
1382 Value *Accum = Ops[0];
1383 for (unsigned i = 1; i < Ops.size(); i++)
1384 Accum = CreateAnd(Accum, Ops[i]);
1385 return Accum;
1386 }
1387
1388 Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") {
1389 if (auto *RC = dyn_cast<Constant>(RHS)) {
1390 if (RC->isNullValue())
1391 return LHS; // LHS | 0 -> LHS
1392 if (auto *LC = dyn_cast<Constant>(LHS))
1393 return Insert(Folder.CreateOr(LC, RC), Name);
1394 }
1395 return Insert(BinaryOperator::CreateOr(LHS, RHS), Name);
1396 }
1397
1398 Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1399 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1400 }
1401
1402 Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1403 return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1404 }
1405
1406 Value *CreateOr(ArrayRef<Value*> Ops) {
1407 assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail
("!Ops.empty()", "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1407, __extension__ __PRETTY_FUNCTION__))
;
1408 Value *Accum = Ops[0];
1409 for (unsigned i = 1; i < Ops.size(); i++)
1410 Accum = CreateOr(Accum, Ops[i]);
1411 return Accum;
1412 }
1413
1414 Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") {
1415 if (Value *V = foldConstant(Instruction::Xor, LHS, RHS, Name)) return V;
1416 return Insert(BinaryOperator::CreateXor(LHS, RHS), Name);
1417 }
1418
1419 Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") {
1420 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1421 }
1422
1423 Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") {
1424 return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name);
1425 }
1426
1427 Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "",
1428 MDNode *FPMD = nullptr) {
1429 if (IsFPConstrained)
1430 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1431 L, R, nullptr, Name, FPMD);
1432
1433 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1434 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF);
1435 return Insert(I, Name);
1436 }
1437
1438 /// Copy fast-math-flags from an instruction rather than using the builder's
1439 /// default FMF.
1440 Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource,
1441 const Twine &Name = "") {
1442 if (IsFPConstrained)
1443 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd,
1444 L, R, FMFSource, Name);
1445
1446 if (Value *V = foldConstant(Instruction::FAdd, L, R, Name)) return V;
1447 Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr,
1448 FMFSource->getFastMathFlags());
1449 return Insert(I, Name);
1450 }
1451
1452 Value *CreateFSub(Value *L, Value *R, const Twine &Name = "",
1453 MDNode *FPMD = nullptr) {
1454 if (IsFPConstrained)
1455 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1456 L, R, nullptr, Name, FPMD);
1457
1458 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1459 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF);
1460 return Insert(I, Name);
1461 }
1462
1463 /// Copy fast-math-flags from an instruction rather than using the builder's
1464 /// default FMF.
1465 Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource,
1466 const Twine &Name = "") {
1467 if (IsFPConstrained)
1468 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub,
1469 L, R, FMFSource, Name);
1470
1471 if (Value *V = foldConstant(Instruction::FSub, L, R, Name)) return V;
1472 Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr,
1473 FMFSource->getFastMathFlags());
1474 return Insert(I, Name);
1475 }
1476
1477 Value *CreateFMul(Value *L, Value *R, const Twine &Name = "",
1478 MDNode *FPMD = nullptr) {
1479 if (IsFPConstrained)
1480 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1481 L, R, nullptr, Name, FPMD);
1482
1483 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1484 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF);
1485 return Insert(I, Name);
1486 }
1487
1488 /// Copy fast-math-flags from an instruction rather than using the builder's
1489 /// default FMF.
1490 Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource,
1491 const Twine &Name = "") {
1492 if (IsFPConstrained)
1493 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul,
1494 L, R, FMFSource, Name);
1495
1496 if (Value *V = foldConstant(Instruction::FMul, L, R, Name)) return V;
1497 Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr,
1498 FMFSource->getFastMathFlags());
1499 return Insert(I, Name);
1500 }
1501
1502 Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "",
1503 MDNode *FPMD = nullptr) {
1504 if (IsFPConstrained)
1505 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1506 L, R, nullptr, Name, FPMD);
1507
1508 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1509 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF);
1510 return Insert(I, Name);
1511 }
1512
1513 /// Copy fast-math-flags from an instruction rather than using the builder's
1514 /// default FMF.
1515 Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource,
1516 const Twine &Name = "") {
1517 if (IsFPConstrained)
1518 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv,
1519 L, R, FMFSource, Name);
1520
1521 if (Value *V = foldConstant(Instruction::FDiv, L, R, Name)) return V;
1522 Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr,
1523 FMFSource->getFastMathFlags());
1524 return Insert(I, Name);
1525 }
1526
1527 Value *CreateFRem(Value *L, Value *R, const Twine &Name = "",
1528 MDNode *FPMD = nullptr) {
1529 if (IsFPConstrained)
1530 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1531 L, R, nullptr, Name, FPMD);
1532
1533 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1534 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF);
1535 return Insert(I, Name);
1536 }
1537
1538 /// Copy fast-math-flags from an instruction rather than using the builder's
1539 /// default FMF.
1540 Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource,
1541 const Twine &Name = "") {
1542 if (IsFPConstrained)
1543 return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem,
1544 L, R, FMFSource, Name);
1545
1546 if (Value *V = foldConstant(Instruction::FRem, L, R, Name)) return V;
1547 Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr,
1548 FMFSource->getFastMathFlags());
1549 return Insert(I, Name);
1550 }
1551
1552 Value *CreateBinOp(Instruction::BinaryOps Opc,
1553 Value *LHS, Value *RHS, const Twine &Name = "",
1554 MDNode *FPMathTag = nullptr) {
1555 if (Value *V = foldConstant(Opc, LHS, RHS, Name)) return V;
1556 Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS);
1557 if (isa<FPMathOperator>(BinOp))
1558 setFPAttrs(BinOp, FPMathTag, FMF);
1559 return Insert(BinOp, Name);
1560 }
1561
1562 Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1563 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1563, __extension__ __PRETTY_FUNCTION__))
;
1564 return CreateSelect(Cond1, Cond2,
1565 ConstantInt::getNullValue(Cond2->getType()), Name);
1566 }
1567
1568 Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") {
1569 assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy
(1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)"
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 1569, __extension__ __PRETTY_FUNCTION__))
;
1570 return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()),
1571 Cond2, Name);
1572 }
1573
1574 CallInst *CreateConstrainedFPBinOp(
1575 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr,
1576 const Twine &Name = "", MDNode *FPMathTag = nullptr,
1577 Optional<RoundingMode> Rounding = None,
1578 Optional<fp::ExceptionBehavior> Except = None);
1579
1580 Value *CreateNeg(Value *V, const Twine &Name = "",
1581 bool HasNUW = false, bool HasNSW = false) {
1582 if (auto *VC = dyn_cast<Constant>(V))
1583 return Insert(Folder.CreateNeg(VC, HasNUW, HasNSW), Name);
1584 BinaryOperator *BO = Insert(BinaryOperator::CreateNeg(V), Name);
1585 if (HasNUW) BO->setHasNoUnsignedWrap();
1586 if (HasNSW) BO->setHasNoSignedWrap();
1587 return BO;
1588 }
1589
1590 Value *CreateNSWNeg(Value *V, const Twine &Name = "") {
1591 return CreateNeg(V, Name, false, true);
1592 }
1593
1594 Value *CreateNUWNeg(Value *V, const Twine &Name = "") {
1595 return CreateNeg(V, Name, true, false);
1596 }
1597
1598 Value *CreateFNeg(Value *V, const Twine &Name = "",
1599 MDNode *FPMathTag = nullptr) {
1600 if (auto *VC = dyn_cast<Constant>(V))
1601 return Insert(Folder.CreateFNeg(VC), Name);
1602 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF),
1603 Name);
1604 }
1605
1606 /// Copy fast-math-flags from an instruction rather than using the builder's
1607 /// default FMF.
1608 Value *CreateFNegFMF(Value *V, Instruction *FMFSource,
1609 const Twine &Name = "") {
1610 if (auto *VC = dyn_cast<Constant>(V))
1611 return Insert(Folder.CreateFNeg(VC), Name);
1612 return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr,
1613 FMFSource->getFastMathFlags()),
1614 Name);
1615 }
1616
1617 Value *CreateNot(Value *V, const Twine &Name = "") {
1618 if (auto *VC = dyn_cast<Constant>(V))
1619 return Insert(Folder.CreateNot(VC), Name);
1620 return Insert(BinaryOperator::CreateNot(V), Name);
1621 }
1622
1623 Value *CreateUnOp(Instruction::UnaryOps Opc,
1624 Value *V, const Twine &Name = "",
1625 MDNode *FPMathTag = nullptr) {
1626 if (auto *VC = dyn_cast<Constant>(V))
1627 return Insert(Folder.CreateUnOp(Opc, VC), Name);
1628 Instruction *UnOp = UnaryOperator::Create(Opc, V);
1629 if (isa<FPMathOperator>(UnOp))
1630 setFPAttrs(UnOp, FPMathTag, FMF);
1631 return Insert(UnOp, Name);
1632 }
1633
1634 /// Create either a UnaryOperator or BinaryOperator depending on \p Opc.
1635 /// Correct number of operands must be passed accordingly.
1636 Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1637 const Twine &Name = "", MDNode *FPMathTag = nullptr);
1638
1639 //===--------------------------------------------------------------------===//
1640 // Instruction creation methods: Memory Instructions
1641 //===--------------------------------------------------------------------===//
1642
1643 AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace,
1644 Value *ArraySize = nullptr, const Twine &Name = "") {
1645 const DataLayout &DL = BB->getModule()->getDataLayout();
1646 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1647 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1648 }
1649
1650 AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr,
1651 const Twine &Name = "") {
1652 const DataLayout &DL = BB->getModule()->getDataLayout();
1653 Align AllocaAlign = DL.getPrefTypeAlign(Ty);
1654 unsigned AddrSpace = DL.getAllocaAddrSpace();
1655 return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name);
1656 }
1657
1658 /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of
1659 /// converting the string to 'bool' for the isVolatile parameter.
1660 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) {
1661 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1662 }
1663
1664 LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") {
1665 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name);
1666 }
1667
1668 LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile,
1669 const Twine &Name = "") {
1670 return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name);
1671 }
1672
1673 // Deprecated [opaque pointer types]
1674 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1675 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1676 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
1677 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
char *Name)
{
1678 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1679 }
1680
1681 // Deprecated [opaque pointer types]
1682 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1683 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1684 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
1685 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, const
Twine &Name = "")
{
1686 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, Name);
1687 }
1688
1689 // Deprecated [opaque pointer types]
1690 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1691 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1692 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1693 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
1694 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateLoad(Value *Ptr, bool
isVolatile, const Twine &Name = "")
{
1695 return CreateLoad(Ptr->getType()->getPointerElementType(), Ptr, isVolatile,
1696 Name);
1697 }
1698
1699 StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) {
1700 return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile);
1701 }
1702
1703 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1704 const char *Name) {
1705 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1706 }
1707
1708 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1709 const Twine &Name = "") {
1710 return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name);
1711 }
1712
1713 LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align,
1714 bool isVolatile, const Twine &Name = "") {
1715 if (!Align) {
1716 const DataLayout &DL = BB->getModule()->getDataLayout();
1717 Align = DL.getABITypeAlign(Ty);
1718 }
1719 return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name);
1720 }
1721
1722 // Deprecated [opaque pointer types]
1723 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1724 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1725 const char *Name),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1726 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
1727 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const char *Name)
{
1728 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1729 Align, Name);
1730 }
1731 // Deprecated [opaque pointer types]
1732 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1733 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1734 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1735 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
1736 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, const Twine &Name = "")
{
1737 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1738 Align, Name);
1739 }
1740 // Deprecated [opaque pointer types]
1741 LLVM_ATTRIBUTE_DEPRECATED(LoadInst *CreateAlignedLoad(Value *Ptr,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1742 MaybeAlign Align,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1743 bool isVolatile,[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1744 const Twine &Name = ""),[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1745 "Use the version that explicitly specifies the "[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
1746 "loaded type instead")[[deprecated("Use the version that explicitly specifies the "
"loaded type instead")]] LoadInst *CreateAlignedLoad(Value *
Ptr, MaybeAlign Align, bool isVolatile, const Twine &Name
= "")
{
1747 return CreateAlignedLoad(Ptr->getType()->getPointerElementType(), Ptr,
1748 Align, isVolatile, Name);
1749 }
1750
1751 StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align,
1752 bool isVolatile = false) {
1753 if (!Align) {
1754 const DataLayout &DL = BB->getModule()->getDataLayout();
1755 Align = DL.getABITypeAlign(Val->getType());
1756 }
1757 return Insert(new StoreInst(Val, Ptr, isVolatile, *Align));
1758 }
1759 FenceInst *CreateFence(AtomicOrdering Ordering,
1760 SyncScope::ID SSID = SyncScope::System,
1761 const Twine &Name = "") {
1762 return Insert(new FenceInst(Context, Ordering, SSID), Name);
1763 }
1764
1765 AtomicCmpXchgInst *
1766 CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align,
1767 AtomicOrdering SuccessOrdering,
1768 AtomicOrdering FailureOrdering,
1769 SyncScope::ID SSID = SyncScope::System) {
1770 if (!Align) {
1771 const DataLayout &DL = BB->getModule()->getDataLayout();
1772 Align = llvm::Align(DL.getTypeStoreSize(New->getType()));
1773 }
1774
1775 return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering,
1776 FailureOrdering, SSID));
1777 }
1778
1779 AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr,
1780 Value *Val, MaybeAlign Align,
1781 AtomicOrdering Ordering,
1782 SyncScope::ID SSID = SyncScope::System) {
1783 if (!Align) {
1784 const DataLayout &DL = BB->getModule()->getDataLayout();
1785 Align = llvm::Align(DL.getTypeStoreSize(Val->getType()));
1786 }
1787
1788 return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID));
1789 }
1790
1791 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1792 Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1793 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
1794 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateGEP(Value *Ptr, ArrayRef<Value *> IdxList
, const Twine &Name = "")
{
1795 return CreateGEP(Ptr->getType()->getScalarType()->getPointerElementType(),
1796 Ptr, IdxList, Name);
1797 }
1798
1799 Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1800 const Twine &Name = "") {
1801 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1802 // Every index must be constant.
1803 size_t i, e;
1804 for (i = 0, e = IdxList.size(); i != e; ++i)
1805 if (!isa<Constant>(IdxList[i]))
1806 break;
1807 if (i == e)
1808 return Insert(Folder.CreateGetElementPtr(Ty, PC, IdxList), Name);
1809 }
1810 return Insert(GetElementPtrInst::Create(Ty, Ptr, IdxList), Name);
1811 }
1812
1813 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1814 Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *> IdxList,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1815 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
1816 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateInBoundsGEP(Value *Ptr, ArrayRef<Value *>
IdxList, const Twine &Name = "")
{
1817 return CreateInBoundsGEP(
1818 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, IdxList,
1819 Name);
1820 }
1821
1822 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
1823 const Twine &Name = "") {
1824 if (auto *PC = dyn_cast<Constant>(Ptr)) {
1825 // Every index must be constant.
1826 size_t i, e;
1827 for (i = 0, e = IdxList.size(); i != e; ++i)
1828 if (!isa<Constant>(IdxList[i]))
1829 break;
1830 if (i == e)
1831 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IdxList),
1832 Name);
1833 }
1834 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList), Name);
1835 }
1836
1837 Value *CreateGEP(Type *Ty, Value *Ptr, Value *Idx, const Twine &Name = "") {
1838 if (auto *PC = dyn_cast<Constant>(Ptr))
1839 if (auto *IC = dyn_cast<Constant>(Idx))
1840 return Insert(Folder.CreateGetElementPtr(Ty, PC, IC), Name);
1841 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1842 }
1843
1844 Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, Value *Idx,
1845 const Twine &Name = "") {
1846 if (auto *PC = dyn_cast<Constant>(Ptr))
1847 if (auto *IC = dyn_cast<Constant>(Idx))
1848 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, IC), Name);
1849 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1850 }
1851
1852 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1853 Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1854 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
1855 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_32(Value *Ptr, unsigned Idx0, const
Twine &Name = "")
{
1856 return CreateConstGEP1_32(
1857 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1858 Name);
1859 }
1860
1861 Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1862 const Twine &Name = "") {
1863 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1864
1865 if (auto *PC = dyn_cast<Constant>(Ptr))
1866 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1867
1868 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1869 }
1870
1871 Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0,
1872 const Twine &Name = "") {
1873 Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0);
1874
1875 if (auto *PC = dyn_cast<Constant>(Ptr))
1876 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1877
1878 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1879 }
1880
1881 Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1,
1882 const Twine &Name = "") {
1883 Value *Idxs[] = {
1884 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1885 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1886 };
1887
1888 if (auto *PC = dyn_cast<Constant>(Ptr))
1889 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1890
1891 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1892 }
1893
1894 Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0,
1895 unsigned Idx1, const Twine &Name = "") {
1896 Value *Idxs[] = {
1897 ConstantInt::get(Type::getInt32Ty(Context), Idx0),
1898 ConstantInt::get(Type::getInt32Ty(Context), Idx1)
1899 };
1900
1901 if (auto *PC = dyn_cast<Constant>(Ptr))
1902 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1903
1904 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1905 }
1906
1907 Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1908 const Twine &Name = "") {
1909 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1910
1911 if (auto *PC = dyn_cast<Constant>(Ptr))
1912 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idx), Name);
1913
1914 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name);
1915 }
1916
1917 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1918 Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1919 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
1920 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP1_64(Value *Ptr, uint64_t Idx0, const
Twine &Name = "")
{
1921 return CreateConstGEP1_64(
1922 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1923 Name);
1924 }
1925
1926 Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1927 const Twine &Name = "") {
1928 Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0);
1929
1930 if (auto *PC = dyn_cast<Constant>(Ptr))
1931 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idx), Name);
1932
1933 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name);
1934 }
1935
1936 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1937 Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1938 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
1939 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP1_64(Value *Ptr, uint64_t Idx0
, const Twine &Name = "")
{
1940 return CreateConstInBoundsGEP1_64(
1941 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1942 Name);
1943 }
1944
1945 Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1,
1946 const Twine &Name = "") {
1947 Value *Idxs[] = {
1948 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1949 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1950 };
1951
1952 if (auto *PC = dyn_cast<Constant>(Ptr))
1953 return Insert(Folder.CreateGetElementPtr(Ty, PC, Idxs), Name);
1954
1955 return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name);
1956 }
1957
1958 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1959 Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t Idx1,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1960 const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
1961 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstGEP2_64(Value *Ptr, uint64_t Idx0, uint64_t
Idx1, const Twine &Name = "")
{
1962 return CreateConstGEP2_64(
1963 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1964 Idx1, Name);
1965 }
1966
1967 Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0,
1968 uint64_t Idx1, const Twine &Name = "") {
1969 Value *Idxs[] = {
1970 ConstantInt::get(Type::getInt64Ty(Context), Idx0),
1971 ConstantInt::get(Type::getInt64Ty(Context), Idx1)
1972 };
1973
1974 if (auto *PC = dyn_cast<Constant>(Ptr))
1975 return Insert(Folder.CreateInBoundsGetElementPtr(Ty, PC, Idxs), Name);
1976
1977 return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name);
1978 }
1979
1980 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1981 Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0,[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1982 uint64_t Idx1, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
1983 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateConstInBoundsGEP2_64(Value *Ptr, uint64_t Idx0
, uint64_t Idx1, const Twine &Name = "")
{
1984 return CreateConstInBoundsGEP2_64(
1985 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, Idx0,
1986 Idx1, Name);
1987 }
1988
1989 Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx,
1990 const Twine &Name = "") {
1991 return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name);
1992 }
1993
1994 LLVM_ATTRIBUTE_DEPRECATED([[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1995 Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine &Name = ""),[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
1996 "Use the version with explicit element type instead")[[deprecated("Use the version with explicit element type instead"
)]] Value *CreateStructGEP(Value *Ptr, unsigned Idx, const Twine
&Name = "")
{
1997 return CreateConstInBoundsGEP2_32(
1998 Ptr->getType()->getScalarType()->getPointerElementType(), Ptr, 0, Idx,
1999 Name);
2000 }
2001
2002 /// Same as CreateGlobalString, but return a pointer with "i8*" type
2003 /// instead of a pointer to array of i8.
2004 ///
2005 /// If no module is given via \p M, it is take from the insertion point basic
2006 /// block.
2007 Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "",
2008 unsigned AddressSpace = 0,
2009 Module *M = nullptr) {
2010 GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M);
2011 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
2012 Constant *Indices[] = {Zero, Zero};
2013 return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV,
2014 Indices);
2015 }
2016
2017 //===--------------------------------------------------------------------===//
2018 // Instruction creation methods: Cast/Conversion Operators
2019 //===--------------------------------------------------------------------===//
2020
2021 Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") {
2022 return CreateCast(Instruction::Trunc, V, DestTy, Name);
2023 }
2024
2025 Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") {
2026 return CreateCast(Instruction::ZExt, V, DestTy, Name);
2027 }
2028
2029 Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") {
2030 return CreateCast(Instruction::SExt, V, DestTy, Name);
2031 }
2032
2033 /// Create a ZExt or Trunc from the integer value V to DestTy. Return
2034 /// the value untouched if the type of V is already DestTy.
2035 Value *CreateZExtOrTrunc(Value *V, Type *DestTy,
2036 const Twine &Name = "") {
2037 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 2039, __extension__ __PRETTY_FUNCTION__))
2038 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 2039, __extension__ __PRETTY_FUNCTION__))
2039 "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 2039, __extension__ __PRETTY_FUNCTION__))
;
2040 Type *VTy = V->getType();
2041 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2042 return CreateZExt(V, DestTy, Name);
2043 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2044 return CreateTrunc(V, DestTy, Name);
2045 return V;
2046 }
2047
2048 /// Create a SExt or Trunc from the integer value V to DestTy. Return
2049 /// the value untouched if the type of V is already DestTy.
2050 Value *CreateSExtOrTrunc(Value *V, Type *DestTy,
2051 const Twine &Name = "") {
2052 assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 2054, __extension__ __PRETTY_FUNCTION__))
2053 DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 2054, __extension__ __PRETTY_FUNCTION__))
2054 "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy
() && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!"
) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\""
, "/build/llvm-toolchain-snapshot-14~++20210926122410+d23fd8ae8906/llvm/include/llvm/IR/IRBuilder.h"
, 2054, __extension__ __PRETTY_FUNCTION__))
;
2055 Type *VTy = V->getType();
2056 if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits())
2057 return CreateSExt(V, DestTy, Name);
2058 if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits())
2059 return CreateTrunc(V, DestTy, Name);
2060 return V;
2061 }
2062
2063 Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") {
2064 if (IsFPConstrained)
2065 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui,
2066 V, DestTy, nullptr, Name);
2067 return CreateCast(Instruction::FPToUI, V, DestTy, Name);
2068 }
2069
2070 Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") {
2071 if (IsFPConstrained)
2072 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi,
2073 V, DestTy, nullptr, Name);
2074 return CreateCast(Instruction::FPToSI, V, DestTy, Name);
2075 }
2076
2077 Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2078 if (IsFPConstrained)
2079 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp,
2080 V, DestTy, nullptr, Name);
2081 return CreateCast(Instruction::UIToFP, V, DestTy, Name);
2082 }
2083
2084 Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){
2085 if (IsFPConstrained)
2086 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp,
2087 V, DestTy, nullptr, Name);
2088 return CreateCast(Instruction::SIToFP, V, DestTy, Name);
2089 }
2090
2091 Value *CreateFPTrunc(Value *V, Type *DestTy,
2092 const Twine &Name = "") {
2093 if (IsFPConstrained)
2094 return CreateConstrainedFPCast(
2095 Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr,
2096 Name);
2097 return CreateCast(Instruction::FPTrunc, V, DestTy, Name);
2098 }
2099
2100 Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") {
2101 if (IsFPConstrained)
2102 return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext,
2103 V, DestTy, nullptr, Name);
2104 return CreateCast(Instruction::FPExt, V, DestTy, Name);
2105 }
2106
2107 Value *CreatePtrToInt(Value *V, Type *DestTy,
2108 const Twine &Name = "") {
2109 return CreateCast(Instruction::PtrToInt, V, DestTy, Name);
2110 }
2111
2112 Value *CreateIntToPtr(Value *V, Type *DestTy,
2113 const Twine &Name = "") {
2114 return CreateCast(Instruction::IntToPtr, V, DestTy, Name);
2115 }
2116
2117 Value *CreateBitCast(Value *V, Type *DestTy,
2118 const Twine &Name = "") {
2119 return CreateCast(Instruction::BitCast, V, DestTy, Name);
2120 }
2121
2122 Value *CreateAddrSpaceCast(Value *V, Type *DestTy,
2123 const Twine &Name = "") {
2124 return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name);
2125 }
2126
2127 Value *CreateZExtOrBitCast(Value *V, Type *DestTy,
2128 const Twine &Name = "") {
2129 if (V->getType() == DestTy)
2130 return V;
2131 if (auto *VC = dyn_cast<Constant>(V))
2132 return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name);
2133 return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name);
2134 }
2135
2136 Value *CreateSExtOrBitCast(Value *V, Type *DestTy,
2137 const Twine &Name = "") {
2138 if (V->getType() == DestTy)
2139 return V;
2140 if (auto *VC = dyn_cast<Constant>(V))
2141 return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name);
2142 return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name);
2143 }
2144
2145 Value *CreateTruncOrBitCast(Value *V, Type *DestTy,
2146 const Twine &Name = "") {
2147 if (V->getType() == DestTy)
2148 return V;
2149 if (auto *VC = dyn_cast<Constant>(V))
2150 return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name);
2151 return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name);
2152 }
2153
2154 Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy,
2155 const Twine &Name = "") {
2156 if (V->getType() == DestTy)
2157 return V;
2158 if (auto *VC = dyn_cast<Constant>(V))
2159 return Insert(Folder.CreateCast(Op, VC, DestTy), Name);
2160 return Insert(CastInst::Create(Op, V, DestTy), Name);
2161 }
2162
2163 Value *CreatePointerCast(Value *V, Type *DestTy,
2164 const Twine &Name = "") {
2165 if (V->getType() == DestTy)
2166 return V;
2167 if (auto *VC = dyn_cast<Constant>(V))
2168 return Insert(Folder.CreatePointerCast(VC, DestTy), Name);
2169 return Insert(CastInst::CreatePointerCast(V, DestTy), Name);
2170 }
2171
2172 Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy,
2173 const Twine &Name = "") {
2174 if (V->getType() == DestTy)
2175 return V;
2176
2177 if (auto *VC = dyn_cast<Constant>(V)) {
2178 return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy),
2179 Name);
2180 }
2181
2182 return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy),
2183 Name);
2184 }
2185
2186 Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned,
2187 const Twine &Name = "") {
2188 if (V->getType() == DestTy)
2189 return V;
2190 if (auto *VC = dyn_cast<Constant>(V))
2191 return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name);
2192 return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name);
2193 }
2194
2195 Value *CreateBitOrPointerCast(Value *V, Type *DestTy,
2196 const Twine &Name = "") {
2197 if (V->getType() == DestTy)
2198 return V;
2199 if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy())
2200 return CreatePtrToInt(V, DestTy, Name);
2201 if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy())
2202 return CreateIntToPtr(V, DestTy, Name);
2203
2204 return CreateBitCast(V, DestTy, Name);
2205 }
2206
2207 Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") {
2208 if (V->getType() == DestTy)
2209 return V;
2210 if (auto *VC = dyn_cast<Constant>(V))
2211 return Insert(Folder.CreateFPCast(VC, DestTy), Name);
2212 return Insert(CastInst::CreateFPCast(V, DestTy), Name);
2213 }
2214
2215 CallInst *CreateConstrainedFPCast(
2216 Intrinsic::ID ID, Value *V, Type *DestTy,
2217 Instruction *FMFSource = nullptr, const Twine &Name = "",
2218 MDNode *FPMathTag = nullptr,
2219 Optional<RoundingMode> Rounding = None,
2220 Optional<fp::ExceptionBehavior> Except = None);
2221
2222 // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a
2223 // compile time error, instead of converting the string to bool for the
2224 // isSigned parameter.
2225 Value *CreateIntCast(Value *, Type *, const char *) = delete;
2226
2227 //===--------------------------------------------------------------------===//
2228 // Instruction creation methods: Compare Instructions
2229 //===--------------------------------------------------------------------===//
2230
2231 Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") {
2232 return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name);
2233 }
2234
2235 Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") {
2236 return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name);
2237 }
2238
2239 Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2240 return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name);
2241 }
2242
2243 Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2244 return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name);
2245 }
2246
2247 Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") {
2248 return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name);
2249 }
2250
2251 Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") {
2252 return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name);
2253 }
2254
2255 Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") {
2256 return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name);
2257 }
2258
2259 Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") {
2260 return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name);
2261 }
2262
2263 Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") {
2264 return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name);
2265 }
2266
2267 Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") {
2268 return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name);
2269 }
2270
2271 Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2272 MDNode *FPMathTag = nullptr) {
2273 return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag);
2274 }
2275
2276 Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "",
2277 MDNode *FPMathTag = nullptr) {
2278 return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag);
2279 }
2280
2281 Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "",
2282 MDNode *FPMathTag = nullptr) {
2283 return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag);
2284 }
2285
2286 Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "",
2287 MDNode *FPMathTag = nullptr) {
2288 return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag);
2289 }
2290
2291 Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "",
2292 MDNode *FPMathTag = nullptr) {
2293 return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag);
2294 }
2295
2296 Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "",
2297 MDNode *FPMathTag = nullptr) {
2298 return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag);
2299 }
2300
2301 Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "",
2302 MDNode *FPMathTag = nullptr) {
2303 return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag);
2304 }
2305
2306 Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "",
2307 MDNode *FPMathTag = nullptr) {
2308 return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag);
2309 }
2310
2311 Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "",
2312 MDNode *FPMathTag = nullptr) {
2313 return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag);
2314 }
2315
2316 Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "",
2317 MDNode *FPMathTag = nullptr) {
2318 return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag);
2319 }
2320
2321 Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "",
2322 MDNode *FPMathTag = nullptr) {
2323 return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag);
2324 }
2325
2326 Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "",
2327 MDNode *FPMathTag = nullptr) {
2328 return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag);
2329 }
2330
2331 Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "",
2332 MDNode *FPMathTag = nullptr) {
2333 return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag);
2334 }
2335
2336 Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "",
2337 MDNode *FPMathTag = nullptr) {
2338 return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag);
2339 }
2340
2341 Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2342 const Twine &Name = "") {
2343 if (auto *LC = dyn_cast<Constant>(LHS))
2344 if (auto *RC = dyn_cast<Constant>(RHS))
2345 return Insert(Folder.CreateICmp(P, LC, RC), Name);
2346 return Insert(new ICmpInst(P, LHS, RHS), Name);
2347 }
2348
2349 // Create a quiet floating-point comparison (i.e. one that raises an FP
2350 // exception only in the case where an input is a signaling NaN).
2351 // Note that this differs from CreateFCmpS only if IsFPConstrained is true.
2352 Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
2353 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2354 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false);
2355 }
2356
2357 Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2358 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2359 return CmpInst::isFPPredicate(Pred)
2360 ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag)
2361 : CreateICmp(Pred, LHS, RHS, Name);
2362 }
2363
2364 // Create a signaling floating-point comparison (i.e. one that raises an FP
2365 // exception whenever an input is any NaN, signaling or quiet).
2366 // Note that this differs from CreateFCmp only if IsFPConstrained is true.
2367 Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS,
2368 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2369 return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true);
2370 }
2371
2372private:
2373 // Helper routine to create either a signaling or a quiet FP comparison.
2374 Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS,
2375 const Twine &Name, MDNode *FPMathTag,
2376 bool IsSignaling);
2377
2378public:
2379 CallInst *CreateConstrainedFPCmp(
2380 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
2381 const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None);
2382
2383 //===--------------------------------------------------------------------===//
2384 // Instruction creation methods: Other Instructions
2385 //===--------------------------------------------------------------------===//
2386
2387 PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues,
2388 const Twine &Name = "") {
2389 PHINode *Phi = PHINode::Create(Ty, NumReservedValues);
2390 if (isa<FPMathOperator>(Phi))
2391 setFPAttrs(Phi, nullptr /* MDNode* */, FMF);
2392 return Insert(Phi, Name);
2393 }
2394
2395 CallInst *CreateCall(FunctionType *FTy, Value *Callee,
2396 ArrayRef<Value *> Args = None, const Twine &Name = "",
2397 MDNode *FPMathTag = nullptr) {
2398 CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles);
2399 if (IsFPConstrained)
2400 setConstrainedFPCallAttr(CI);
2401 if (isa<FPMathOperator>(CI))
2402 setFPAttrs(CI, FPMathTag, FMF);
2403 return Insert(CI, Name);
2404 }
2405
2406 CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args,
2407 ArrayRef<OperandBundleDef> OpBundles,
2408 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2409 CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles);
2410 if (IsFPConstrained)
2411 setConstrainedFPCallAttr(CI);
2412 if (isa<FPMathOperator>(CI))
2413 setFPAttrs(CI, FPMathTag, FMF);
2414 return Insert(CI, Name);
2415 }
2416
2417 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None,
2418 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2419 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name,
2420 FPMathTag);
2421 }
2422
2423 CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args,
2424 ArrayRef<OperandBundleDef> OpBundles,
2425 const Twine &Name = "", MDNode *FPMathTag = nullptr) {
2426 return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args,
2427 OpBundles, Name, FPMathTag);
2428 }
2429
2430 CallInst *CreateConstrainedFPCall(
2431 Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "",
2432 Optional<RoundingMode> Rounding = None,
2433 Optional<fp::ExceptionBehavior> Except = None);
2434
2435 Value *CreateSelect(Value *C, Value *True, Value *False,
2436 const Twine &Name = "", Instruction *MDFrom = nullptr);
2437
2438 VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") {
2439 return Insert(new VAArgInst(List, Ty), Name);
2440 }
2441
2442 Value *CreateExtractElement(Value *Vec, Value *Idx,
2443 const Twine &Name = "") {
2444 if (auto *VC = dyn_cast<Constant>(Vec))
2445 if (auto *IC = dyn_cast<Constant>(Idx))
2446 return Insert(Folder.CreateExtractElement(VC, IC), Name);
2447 return Insert(ExtractElementInst::Create(Vec, Idx), Name);
2448 }
2449
2450 Value *CreateExtractElement(Value *Vec, uint64_t Idx,
2451 const Twine &Name = "") {
2452 return CreateExtractElement(Vec, getInt64(Idx), Name);
2453 }
2454
2455 Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx,
2456 const Twine &Name = "") {
2457 if (auto *VC = dyn_cast<Constant>(Vec))
2458 if (auto *NC = dyn_cast<Constant>(NewElt))
2459 if (auto *IC = dyn_cast<Constant>(Idx))
2460 return Insert(Folder.CreateInsertElement(VC, NC, IC), Name);
2461 return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name);
2462 }
2463
2464 Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx,
2465 const Twine &Name = "") {
2466 return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name);
2467 }
2468
2469 Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask,
2470 const Twine &Name = "") {
2471 SmallVector<int, 16> IntMask;
2472 ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask);
2473 return CreateShuffleVector(V1, V2, IntMask, Name);
2474 }
2475
2476 LLVM_ATTRIBUTE_DEPRECATED(Value *CreateShuffleVector(Value *V1, Value *V2,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2477 ArrayRef<uint32_t> Mask,[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2478 const Twine &Name = ""),[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
2479 "Pass indices as 'int' instead")[[deprecated("Pass indices as 'int' instead")]] Value *CreateShuffleVector
(Value *V1, Value *V2, ArrayRef<uint32_t> Mask, const Twine
&Name = "")
{
2480 SmallVector<int, 16> IntMask;
2481 IntMask.assign(Mask.begin(), Mask.end());
2482 return CreateShuffleVector(V1, V2, IntMask, Name);
2483 }
2484
2485 /// See class ShuffleVectorInst for a description of the mask representation.
2486 Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask,
2487 const Twine &Name = "") {
2488 if (auto *V1C = dyn_cast<Constant>(V1))
2489 if (auto *V2C = dyn_cast<Constant>(V2))
2490 return Insert(Folder.CreateShuffleVector(V1C, V2C, Mask), Name);
2491 return Insert(new ShuffleVectorInst(V1, V2, Mask), Name);
2492 }
2493
2494 /// Create a unary shuffle. The second vector operand of the IR instruction
2495 /// is poison.
2496 Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask,
2497 const Twine &Name = "") {
2498 return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name);
2499 }
2500
2501 Value *CreateExtractValue(Value *Agg,
2502 ArrayRef<unsigned> Idxs,
2503 const Twine &Name = "") {
2504 if (auto *AggC = dyn_cast<Constant>(Agg))
2505 return Insert(Folder.CreateExtractValue(AggC, Idxs), Name);
2506 return Insert(ExtractValueInst::Create(Agg, Idxs), Name);
2507 }
2508
2509 Value *CreateInsertValue(Value *Agg, Value *Val,
2510 ArrayRef<unsigned> Idxs,
2511 const Twine &Name = "") {
2512 if (auto *AggC = dyn_cast<Constant>(Agg))
2513 if (auto *ValC = dyn_cast<Constant>(Val))
2514 return Insert(Folder.CreateInsertValue(AggC, ValC, Idxs), Name);
2515 return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name);
2516 }
2517
2518 LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses,
2519 const Twine &Name = "") {
2520 return Insert(LandingPadInst::Create(Ty, NumClauses), Name);
2521 }
2522
2523 Value *CreateFreeze(Value *V, const Twine &Name = "") {
2524 return Insert(new FreezeInst(V), Name);
2525 }
2526
2527 //===--------------------------------------------------------------------===//
2528 // Utility creation methods
2529 //===--------------------------------------------------------------------===//
2530
2531 /// Return an i1 value testing if \p Arg is null.
2532 Value *CreateIsNull(Value *Arg, const Twine &Name = "") {
2533 return CreateICmpEQ(Arg, Constant::getNullValue(Arg->getType()),
2534 Name);
2535 }
2536
2537 /// Return an i1 value testing if \p Arg is not null.
2538 Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") {
2539 return CreateICmpNE(Arg, Constant::getNullValue(Arg->getType()),
2540 Name);
2541 }
2542
2543 /// Return the i64 difference between two pointer values, dividing out
2544 /// the size of the pointed-to objects.
2545 ///
2546 /// This is intended to implement C-style pointer subtraction. As such, the
2547 /// pointers must be appropriately aligned for their element types and
2548 /// pointing into the same object.
2549 Value *CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name = "");
2550
2551 /// Create a launder.invariant.group intrinsic call. If Ptr type is
2552 /// different from pointer to i8, it's casted to pointer to i8 in the same
2553 /// address space before call and casted back to Ptr type after call.
2554 Value *CreateLaunderInvariantGroup(Value *Ptr);
2555
2556 /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
2557 /// different from pointer to i8, it's casted to pointer to i8 in the same
2558 /// address space before call and casted back to Ptr type after call.
2559 Value *CreateStripInvariantGroup(Value *Ptr);
2560
2561 /// Return a vector value that contains the vector V reversed
2562 Value *CreateVectorReverse(Value *V, const Twine &Name = "");
2563
2564 /// Return a vector splice intrinsic if using scalable vectors, otherwise
2565 /// return a shufflevector. If the immediate is positive, a vector is
2566 /// extracted from concat(V1, V2), starting at Imm. If the immediate
2567 /// is negative, we extract -Imm elements from V1 and the remaining
2568 /// elements from V2. Imm is a signed integer in the range
2569 /// -VL <= Imm < VL (where VL is the runtime vector length of the
2570 /// source/result vector)
2571 Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
2572 const Twine &Name = "");
2573
2574 /// Return a vector value that contains \arg V broadcasted to \p
2575 /// NumElts elements.
2576 Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = "");
2577
2578 /// Return a vector value that contains \arg V broadcasted to \p
2579 /// EC elements.
2580 Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = "");
2581
2582 /// Return a value that has been extracted from a larger integer type.
2583 Value *CreateExtractInteger(const DataLayout &DL, Value *From,
2584 IntegerType *ExtractedTy, uint64_t Offset,
2585 const Twine &Name);
2586
2587 Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base,
2588 unsigned Dimension, unsigned LastIndex,
2589 MDNode *DbgInfo);
2590
2591 Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex,
2592 MDNode *DbgInfo);
2593
2594 Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base,
2595 unsigned Index, unsigned FieldIndex,
2596 MDNode *DbgInfo);
2597
2598private:
2599 /// Helper function that creates an assume intrinsic call that
2600 /// represents an alignment assumption on the provided pointer \p PtrValue
2601 /// with offset \p OffsetValue and alignment value \p AlignValue.
2602 CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,
2603 Value *PtrValue, Value *AlignValue,
2604 Value *OffsetValue);
2605
2606public:
2607 /// Create an assume intrinsic call that represents an alignment
2608 /// assumption on the provided pointer.
2609 ///
2610 /// An optional offset can be provided, and if it is provided, the offset
2611 /// must be subtracted from the provided pointer to get the pointer with the
2612 /// specified alignment.
2613 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2614 unsigned Alignment,
2615 Value *OffsetValue = nullptr);
2616
2617 /// Create an assume intrinsic call that represents an alignment
2618 /// assumption on the provided pointer.
2619 ///
2620 /// An optional offset can be provided, and if it is provided, the offset
2621 /// must be subtracted from the provided pointer to get the pointer with the
2622 /// specified alignment.
2623 ///
2624 /// This overload handles the condition where the Alignment is dependent
2625 /// on an existing value rather than a static value.
2626 CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,
2627 Value *Alignment,
2628 Value *OffsetValue = nullptr);
2629};
2630
2631/// This provides a uniform API for creating instructions and inserting
2632/// them into a basic block: either at the end of a BasicBlock, or at a specific
2633/// iterator location in a block.
2634///
2635/// Note that the builder does not expose the full generality of LLVM
2636/// instructions. For access to extra instruction properties, use the mutators
2637/// (e.g. setVolatile) on the instructions after they have been
2638/// created. Convenience state exists to specify fast-math flags and fp-math
2639/// tags.
2640///
2641/// The first template argument specifies a class to use for creating constants.
2642/// This defaults to creating minimally folded constants. The second template
2643/// argument allows clients to specify custom insertion hooks that are called on
2644/// every newly created insertion.
2645template <typename FolderTy = ConstantFolder,
2646 typename InserterTy = IRBuilderDefaultInserter>
2647class IRBuilder : public IRBuilderBase {
2648private:
2649 FolderTy Folder;
2650 InserterTy Inserter;
2651
2652public:
2653 IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(),
2654 MDNode *FPMathTag = nullptr,
2655 ArrayRef<OperandBundleDef> OpBundles = None)
2656 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles),
2657 Folder(Folder), Inserter(Inserter) {}
2658
2659 explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr,
2660 ArrayRef<OperandBundleDef> OpBundles = None)
2661 : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {}
2662
2663 explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder,
2664 MDNode *FPMathTag = nullptr,
2665 ArrayRef<OperandBundleDef> OpBundles = None)
2666 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2667 FPMathTag, OpBundles), Folder(Folder) {
2668 SetInsertPoint(TheBB);
2669 }
2670
2671 explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr,
2672 ArrayRef<OperandBundleDef> OpBundles = None)
2673 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2674 FPMathTag, OpBundles) {
2675 SetInsertPoint(TheBB);
2676 }
2677
2678 explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr,
2679 ArrayRef<OperandBundleDef> OpBundles = None)
2680 : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter,
22
Called C++ object pointer is null
2681 FPMathTag, OpBundles) {
2682 SetInsertPoint(IP);
2683 }
2684
2685 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder,
2686 MDNode *FPMathTag = nullptr,
2687 ArrayRef<OperandBundleDef> OpBundles = None)
2688 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2689 FPMathTag, OpBundles), Folder(Folder) {
2690 SetInsertPoint(TheBB, IP);
2691 }
2692
2693 IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP,
2694 MDNode *FPMathTag = nullptr,
2695 ArrayRef<OperandBundleDef> OpBundles = None)
2696 : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter,
2697 FPMathTag, OpBundles) {
2698 SetInsertPoint(TheBB, IP);
2699 }
2700
2701 /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard
2702 /// or FastMathFlagGuard instead.
2703 IRBuilder(const IRBuilder &) = delete;
2704
2705 InserterTy &getInserter() { return Inserter; }
2706};
2707
2708// Create wrappers for C Binding types (see CBindingWrapping.h).
2709DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast
<IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const
IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef
>(const_cast<IRBuilder<>*>(P)); }
2710
2711} // end namespace llvm
2712
2713#endif // LLVM_IR_IRBUILDER_H