File: | build/source/llvm/include/llvm/IR/IRBuilder.h |
Warning: | line 2558, column 23 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | //===- Target/X86/X86LowerAMXType.cpp - -------------------------*- C++ -*-===// | ||||
2 | // | ||||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | ||||
4 | // See https://llvm.org/LICENSE.txt for license information. | ||||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | ||||
6 | // | ||||
7 | //===----------------------------------------------------------------------===// | ||||
8 | // | ||||
9 | /// \file Pass to transform <256 x i32> load/store | ||||
10 | /// <256 x i32> is bitcasted to x86_amx on X86, and AMX instruction set only | ||||
11 | /// provides simple operation on x86_amx. The basic elementwise operation | ||||
12 | /// is not supported by AMX. Since x86_amx is bitcasted from vector <256 x i32> | ||||
13 | /// and only AMX intrinsics can operate on the type, we need transform | ||||
14 | /// load/store <256 x i32> instruction to AMX load/store. If the bitcast can | ||||
15 | /// not be combined with load/store, we transform the bitcast to amx load/store | ||||
16 | /// and <256 x i32> store/load. | ||||
17 | /// | ||||
18 | /// If Front End not use O0 but the Mid/Back end use O0, (e.g. "Clang -O2 -S | ||||
19 | /// -emit-llvm t.c" + "llc t.ll") we should make sure the amx data is volatile, | ||||
20 | /// because that is necessary for AMX fast register allocation. (In Fast | ||||
21 | /// registera allocation, register will be allocated before spill/reload, so | ||||
22 | /// there is no additional register for amx to identify the step in spill.) | ||||
23 | /// The volatileTileData() will handle this case. | ||||
24 | /// e.g. | ||||
25 | /// ---------------------------------------------------------- | ||||
26 | /// | def %td = ... | | ||||
27 | /// | ... | | ||||
28 | /// | "use %td" | | ||||
29 | /// ---------------------------------------------------------- | ||||
30 | /// will transfer to --> | ||||
31 | /// ---------------------------------------------------------- | ||||
32 | /// | def %td = ... | | ||||
33 | /// | call void @llvm.x86.tilestored64.internal(mem, %td) | | ||||
34 | /// | ... | | ||||
35 | /// | %td2 = call x86_amx @llvm.x86.tileloadd64.internal(mem)| | ||||
36 | /// | "use %td2" | | ||||
37 | /// ---------------------------------------------------------- | ||||
38 | // | ||||
39 | //===----------------------------------------------------------------------===// | ||||
40 | // | ||||
41 | #include "X86.h" | ||||
42 | #include "llvm/ADT/PostOrderIterator.h" | ||||
43 | #include "llvm/ADT/SetVector.h" | ||||
44 | #include "llvm/ADT/SmallSet.h" | ||||
45 | #include "llvm/Analysis/OptimizationRemarkEmitter.h" | ||||
46 | #include "llvm/Analysis/TargetLibraryInfo.h" | ||||
47 | #include "llvm/Analysis/TargetTransformInfo.h" | ||||
48 | #include "llvm/CodeGen/Passes.h" | ||||
49 | #include "llvm/CodeGen/TargetPassConfig.h" | ||||
50 | #include "llvm/CodeGen/ValueTypes.h" | ||||
51 | #include "llvm/IR/DataLayout.h" | ||||
52 | #include "llvm/IR/Function.h" | ||||
53 | #include "llvm/IR/IRBuilder.h" | ||||
54 | #include "llvm/IR/Instructions.h" | ||||
55 | #include "llvm/IR/IntrinsicInst.h" | ||||
56 | #include "llvm/IR/IntrinsicsX86.h" | ||||
57 | #include "llvm/IR/PatternMatch.h" | ||||
58 | #include "llvm/InitializePasses.h" | ||||
59 | #include "llvm/Pass.h" | ||||
60 | #include "llvm/Target/TargetMachine.h" | ||||
61 | #include "llvm/Transforms/Utils/AssumeBundleBuilder.h" | ||||
62 | #include "llvm/Transforms/Utils/Local.h" | ||||
63 | |||||
64 | #include <map> | ||||
65 | |||||
66 | using namespace llvm; | ||||
67 | using namespace PatternMatch; | ||||
68 | |||||
69 | #define DEBUG_TYPE"lower-amx-type" "lower-amx-type" | ||||
70 | |||||
71 | static bool isAMXCast(Instruction *II) { | ||||
72 | return match(II, | ||||
73 | m_Intrinsic<Intrinsic::x86_cast_vector_to_tile>(m_Value())) || | ||||
74 | match(II, m_Intrinsic<Intrinsic::x86_cast_tile_to_vector>(m_Value())); | ||||
75 | } | ||||
76 | |||||
77 | static bool isAMXIntrinsic(Value *I) { | ||||
78 | auto *II = dyn_cast<IntrinsicInst>(I); | ||||
79 | if (!II) | ||||
80 | return false; | ||||
81 | if (isAMXCast(II)) | ||||
82 | return false; | ||||
83 | // Check if return type or parameter is x86_amx. If it is x86_amx | ||||
84 | // the intrinsic must be x86 amx intrinsics. | ||||
85 | if (II->getType()->isX86_AMXTy()) | ||||
86 | return true; | ||||
87 | for (Value *V : II->args()) { | ||||
88 | if (V->getType()->isX86_AMXTy()) | ||||
89 | return true; | ||||
90 | } | ||||
91 | |||||
92 | return false; | ||||
93 | } | ||||
94 | |||||
95 | static AllocaInst *createAllocaInstAtEntry(IRBuilder<> &Builder, BasicBlock *BB, | ||||
96 | Type *Ty) { | ||||
97 | Function &F = *BB->getParent(); | ||||
98 | Module *M = BB->getModule(); | ||||
99 | const DataLayout &DL = M->getDataLayout(); | ||||
100 | |||||
101 | LLVMContext &Ctx = Builder.getContext(); | ||||
102 | auto AllocaAlignment = DL.getPrefTypeAlign(Type::getX86_AMXTy(Ctx)); | ||||
103 | unsigned AllocaAS = DL.getAllocaAddrSpace(); | ||||
104 | AllocaInst *AllocaRes = | ||||
105 | new AllocaInst(Ty, AllocaAS, "", &F.getEntryBlock().front()); | ||||
106 | AllocaRes->setAlignment(AllocaAlignment); | ||||
107 | return AllocaRes; | ||||
108 | } | ||||
109 | |||||
110 | static Instruction *getFirstNonAllocaInTheEntryBlock(Function &F) { | ||||
111 | for (Instruction &I : F.getEntryBlock()) | ||||
112 | if (!isa<AllocaInst>(&I)) | ||||
113 | return &I; | ||||
114 | llvm_unreachable("No terminator in the entry block!")::llvm::llvm_unreachable_internal("No terminator in the entry block!" , "llvm/lib/Target/X86/X86LowerAMXType.cpp", 114); | ||||
115 | } | ||||
116 | |||||
117 | static std::pair<Value *, Value *> getShape(IntrinsicInst *II, unsigned OpNo) { | ||||
118 | IRBuilder<> Builder(II); | ||||
119 | Value *Row = nullptr, *Col = nullptr; | ||||
120 | switch (II->getIntrinsicID()) { | ||||
121 | default: | ||||
122 | llvm_unreachable("Expect amx intrinsics")::llvm::llvm_unreachable_internal("Expect amx intrinsics", "llvm/lib/Target/X86/X86LowerAMXType.cpp" , 122); | ||||
123 | case Intrinsic::x86_tileloadd64_internal: | ||||
124 | case Intrinsic::x86_tileloaddt164_internal: | ||||
125 | case Intrinsic::x86_tilestored64_internal: { | ||||
126 | Row = II->getArgOperand(0); | ||||
127 | Col = II->getArgOperand(1); | ||||
128 | break; | ||||
129 | } | ||||
130 | // a * b + c | ||||
131 | // The shape depends on which operand. | ||||
132 | case Intrinsic::x86_tdpbssd_internal: | ||||
133 | case Intrinsic::x86_tdpbsud_internal: | ||||
134 | case Intrinsic::x86_tdpbusd_internal: | ||||
135 | case Intrinsic::x86_tdpbuud_internal: | ||||
136 | case Intrinsic::x86_tdpbf16ps_internal: { | ||||
137 | switch (OpNo) { | ||||
138 | case 3: | ||||
139 | Row = II->getArgOperand(0); | ||||
140 | Col = II->getArgOperand(1); | ||||
141 | break; | ||||
142 | case 4: | ||||
143 | Row = II->getArgOperand(0); | ||||
144 | Col = II->getArgOperand(2); | ||||
145 | break; | ||||
146 | case 5: | ||||
147 | if (isa<ConstantInt>(II->getArgOperand(2))) | ||||
148 | Row = Builder.getInt16( | ||||
149 | (cast<ConstantInt>(II->getOperand(2))->getSExtValue()) / 4); | ||||
150 | else if (isa<Instruction>(II->getArgOperand(2))) { | ||||
151 | // When it is not a const value and it is not a function argument, we | ||||
152 | // create Row after the definition of II->getOperand(2) instead of | ||||
153 | // before II. For example, II is %118, we try to getshape for %117: | ||||
154 | // %117 = call x86_amx @llvm.x86.cast.vector.to.tile.v256i32(<256 x | ||||
155 | // i32> %115). | ||||
156 | // %118 = call x86_amx @llvm.x86.tdpbf16ps.internal(i16 | ||||
157 | // %104, i16 %105, i16 %106, x86_amx %110, x86_amx %114, x86_amx | ||||
158 | // %117). | ||||
159 | // If we create %row = udiv i16 %106, 4 before %118(aka. II), then its | ||||
160 | // definition is after its user(new tileload for %117). | ||||
161 | // So, the best choice is to create %row right after the definition of | ||||
162 | // %106. | ||||
163 | Builder.SetInsertPoint(cast<Instruction>(II->getOperand(2))); | ||||
164 | Row = Builder.CreateUDiv(II->getOperand(2), Builder.getInt16(4)); | ||||
165 | cast<Instruction>(Row)->moveAfter(cast<Instruction>(II->getOperand(2))); | ||||
166 | } else { | ||||
167 | // When it is not a const value and it is a function argument, we create | ||||
168 | // Row at the entry bb. | ||||
169 | IRBuilder<> NewBuilder( | ||||
170 | getFirstNonAllocaInTheEntryBlock(*II->getFunction())); | ||||
171 | Row = NewBuilder.CreateUDiv(II->getOperand(2), NewBuilder.getInt16(4)); | ||||
172 | } | ||||
173 | Col = II->getArgOperand(1); | ||||
174 | break; | ||||
175 | } | ||||
176 | break; | ||||
177 | } | ||||
178 | } | ||||
179 | |||||
180 | return std::make_pair(Row, Col); | ||||
181 | } | ||||
182 | |||||
183 | static std::pair<Value *, Value *> getShape(PHINode *Phi) { | ||||
184 | Use &U = *(Phi->use_begin()); | ||||
185 | unsigned OpNo = U.getOperandNo(); | ||||
186 | User *V = U.getUser(); | ||||
187 | // TODO We don't traverse all users. To make the algorithm simple, here we | ||||
188 | // just traverse the first user. If we can find shape, then return the shape, | ||||
189 | // otherwise just return nullptr and the optimization for undef/zero will be | ||||
190 | // abandoned. | ||||
191 | while (V) { | ||||
192 | if (isAMXCast(dyn_cast<Instruction>(V))) { | ||||
193 | if (V->use_empty()) | ||||
194 | break; | ||||
195 | Use &U = *(V->use_begin()); | ||||
196 | OpNo = U.getOperandNo(); | ||||
197 | V = U.getUser(); | ||||
198 | } else if (isAMXIntrinsic(V)) { | ||||
199 | return getShape(cast<IntrinsicInst>(V), OpNo); | ||||
200 | } else if (isa<PHINode>(V)) { | ||||
201 | if (V->use_empty()) | ||||
202 | break; | ||||
203 | Use &U = *(V->use_begin()); | ||||
204 | V = U.getUser(); | ||||
205 | } else { | ||||
206 | break; | ||||
207 | } | ||||
208 | } | ||||
209 | |||||
210 | return std::make_pair(nullptr, nullptr); | ||||
211 | } | ||||
212 | |||||
213 | namespace { | ||||
214 | class X86LowerAMXType { | ||||
215 | Function &Func; | ||||
216 | |||||
217 | // In AMX intrinsics we let Shape = {Row, Col}, but the | ||||
218 | // RealCol = Col / ElementSize. We may use the RealCol | ||||
219 | // as a new Row for other new created AMX intrinsics. | ||||
220 | std::map<Value *, Value *> Col2Row; | ||||
221 | |||||
222 | public: | ||||
223 | X86LowerAMXType(Function &F) : Func(F) {} | ||||
224 | bool visit(); | ||||
225 | void combineLoadBitcast(LoadInst *LD, BitCastInst *Bitcast); | ||||
226 | void combineBitcastStore(BitCastInst *Bitcast, StoreInst *ST); | ||||
227 | bool transformBitcast(BitCastInst *Bitcast); | ||||
228 | }; | ||||
229 | |||||
230 | // %src = load <256 x i32>, <256 x i32>* %addr, align 64 | ||||
231 | // %2 = bitcast <256 x i32> %src to x86_amx | ||||
232 | // --> | ||||
233 | // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, | ||||
234 | // i8* %addr, i64 %stride64) | ||||
235 | void X86LowerAMXType::combineLoadBitcast(LoadInst *LD, BitCastInst *Bitcast) { | ||||
236 | Value *Row = nullptr, *Col = nullptr; | ||||
237 | Use &U = *(Bitcast->use_begin()); | ||||
238 | unsigned OpNo = U.getOperandNo(); | ||||
239 | auto *II = cast<IntrinsicInst>(U.getUser()); | ||||
240 | std::tie(Row, Col) = getShape(II, OpNo); | ||||
241 | IRBuilder<> Builder(Bitcast); | ||||
242 | // Use the maximun column as stride. | ||||
243 | Value *Stride = Builder.getInt64(64); | ||||
244 | Value *I8Ptr = | ||||
245 | Builder.CreateBitCast(LD->getOperand(0), Builder.getInt8PtrTy()); | ||||
246 | std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride}; | ||||
247 | |||||
248 | Value *NewInst = | ||||
249 | Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, None, Args); | ||||
250 | Bitcast->replaceAllUsesWith(NewInst); | ||||
251 | } | ||||
252 | |||||
253 | // %src = call x86_amx @llvm.x86.tileloadd64.internal(%row, %col, %addr, | ||||
254 | // %stride); | ||||
255 | // %13 = bitcast x86_amx %src to <256 x i32> | ||||
256 | // store <256 x i32> %13, <256 x i32>* %addr, align 64 | ||||
257 | // --> | ||||
258 | // call void @llvm.x86.tilestored64.internal(%row, %col, %addr, | ||||
259 | // %stride64, %13) | ||||
260 | void X86LowerAMXType::combineBitcastStore(BitCastInst *Bitcast, StoreInst *ST) { | ||||
261 | |||||
262 | Value *Tile = Bitcast->getOperand(0); | ||||
263 | auto *II = cast<IntrinsicInst>(Tile); | ||||
264 | // Tile is output from AMX intrinsic. The first operand of the | ||||
265 | // intrinsic is row, the second operand of the intrinsic is column. | ||||
266 | Value *Row = II->getOperand(0); | ||||
267 | Value *Col = II->getOperand(1); | ||||
268 | IRBuilder<> Builder(ST); | ||||
269 | // Use the maximum column as stride. It must be the same with load | ||||
270 | // stride. | ||||
271 | Value *Stride = Builder.getInt64(64); | ||||
272 | Value *I8Ptr = | ||||
273 | Builder.CreateBitCast(ST->getOperand(1), Builder.getInt8PtrTy()); | ||||
274 | std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Tile}; | ||||
275 | Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args); | ||||
276 | if (Bitcast->hasOneUse()) | ||||
277 | return; | ||||
278 | // %13 = bitcast x86_amx %src to <256 x i32> | ||||
279 | // store <256 x i32> %13, <256 x i32>* %addr, align 64 | ||||
280 | // %add = <256 x i32> %13, <256 x i32> %src2 | ||||
281 | // --> | ||||
282 | // %13 = bitcast x86_amx %src to <256 x i32> | ||||
283 | // call void @llvm.x86.tilestored64.internal(%row, %col, %addr, | ||||
284 | // %stride64, %13) | ||||
285 | // %14 = load <256 x i32>, %addr | ||||
286 | // %add = <256 x i32> %14, <256 x i32> %src2 | ||||
287 | Value *Vec = Builder.CreateLoad(Bitcast->getType(), ST->getOperand(1)); | ||||
288 | Bitcast->replaceAllUsesWith(Vec); | ||||
289 | } | ||||
290 | |||||
291 | // transform bitcast to <store, load> instructions. | ||||
292 | bool X86LowerAMXType::transformBitcast(BitCastInst *Bitcast) { | ||||
293 | IRBuilder<> Builder(Bitcast); | ||||
294 | AllocaInst *AllocaAddr; | ||||
295 | Value *I8Ptr, *Stride; | ||||
296 | auto *Src = Bitcast->getOperand(0); | ||||
297 | |||||
298 | auto Prepare = [&](Type *MemTy) { | ||||
299 | AllocaAddr = createAllocaInstAtEntry(Builder, Bitcast->getParent(), MemTy); | ||||
300 | I8Ptr = Builder.CreateBitCast(AllocaAddr, Builder.getInt8PtrTy()); | ||||
301 | Stride = Builder.getInt64(64); | ||||
302 | }; | ||||
303 | |||||
304 | if (Bitcast->getType()->isX86_AMXTy()) { | ||||
305 | // %2 = bitcast <256 x i32> %src to x86_amx | ||||
306 | // --> | ||||
307 | // %addr = alloca <256 x i32>, align 64 | ||||
308 | // store <256 x i32> %src, <256 x i32>* %addr, align 64 | ||||
309 | // %addr2 = bitcast <256 x i32>* to i8* | ||||
310 | // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, | ||||
311 | // i8* %addr2, | ||||
312 | // i64 64) | ||||
313 | Use &U = *(Bitcast->use_begin()); | ||||
314 | unsigned OpNo = U.getOperandNo(); | ||||
315 | auto *II = dyn_cast<IntrinsicInst>(U.getUser()); | ||||
316 | if (!II) | ||||
317 | return false; // May be bitcast from x86amx to <256 x i32>. | ||||
318 | Prepare(Bitcast->getOperand(0)->getType()); | ||||
319 | Builder.CreateStore(Src, AllocaAddr); | ||||
320 | // TODO we can pick an constant operand for the shape. | ||||
321 | Value *Row = nullptr, *Col = nullptr; | ||||
322 | std::tie(Row, Col) = getShape(II, OpNo); | ||||
323 | std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride}; | ||||
324 | Value *NewInst = Builder.CreateIntrinsic( | ||||
325 | Intrinsic::x86_tileloadd64_internal, None, Args); | ||||
326 | Bitcast->replaceAllUsesWith(NewInst); | ||||
327 | } else { | ||||
328 | // %2 = bitcast x86_amx %src to <256 x i32> | ||||
329 | // --> | ||||
330 | // %addr = alloca <256 x i32>, align 64 | ||||
331 | // %addr2 = bitcast <256 x i32>* to i8* | ||||
332 | // call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, | ||||
333 | // i8* %addr2, i64 %stride) | ||||
334 | // %2 = load <256 x i32>, <256 x i32>* %addr, align 64 | ||||
335 | auto *II = dyn_cast<IntrinsicInst>(Src); | ||||
336 | if (!II) | ||||
337 | return false; // May be bitcast from <256 x i32> to x86amx. | ||||
338 | Prepare(Bitcast->getType()); | ||||
339 | Value *Row = II->getOperand(0); | ||||
340 | Value *Col = II->getOperand(1); | ||||
341 | std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Src}; | ||||
342 | Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args); | ||||
343 | Value *NewInst = Builder.CreateLoad(Bitcast->getType(), AllocaAddr); | ||||
344 | Bitcast->replaceAllUsesWith(NewInst); | ||||
345 | } | ||||
346 | |||||
347 | return true; | ||||
348 | } | ||||
349 | |||||
350 | bool X86LowerAMXType::visit() { | ||||
351 | SmallVector<Instruction *, 8> DeadInsts; | ||||
352 | Col2Row.clear(); | ||||
353 | |||||
354 | for (BasicBlock *BB : post_order(&Func)) { | ||||
355 | for (Instruction &Inst : llvm::make_early_inc_range(llvm::reverse(*BB))) { | ||||
356 | auto *Bitcast = dyn_cast<BitCastInst>(&Inst); | ||||
357 | if (!Bitcast) | ||||
358 | continue; | ||||
359 | |||||
360 | Value *Src = Bitcast->getOperand(0); | ||||
361 | if (Bitcast->getType()->isX86_AMXTy()) { | ||||
362 | if (Bitcast->user_empty()) { | ||||
363 | DeadInsts.push_back(Bitcast); | ||||
364 | continue; | ||||
365 | } | ||||
366 | LoadInst *LD = dyn_cast<LoadInst>(Src); | ||||
367 | if (!LD) { | ||||
368 | if (transformBitcast(Bitcast)) | ||||
369 | DeadInsts.push_back(Bitcast); | ||||
370 | continue; | ||||
371 | } | ||||
372 | // If load has mutli-user, duplicate a vector load. | ||||
373 | // %src = load <256 x i32>, <256 x i32>* %addr, align 64 | ||||
374 | // %2 = bitcast <256 x i32> %src to x86_amx | ||||
375 | // %add = add <256 x i32> %src, <256 x i32> %src2 | ||||
376 | // --> | ||||
377 | // %src = load <256 x i32>, <256 x i32>* %addr, align 64 | ||||
378 | // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, | ||||
379 | // i8* %addr, i64 %stride64) | ||||
380 | // %add = add <256 x i32> %src, <256 x i32> %src2 | ||||
381 | |||||
382 | // If load has one user, the load will be eliminated in DAG ISel. | ||||
383 | // %src = load <256 x i32>, <256 x i32>* %addr, align 64 | ||||
384 | // %2 = bitcast <256 x i32> %src to x86_amx | ||||
385 | // --> | ||||
386 | // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, | ||||
387 | // i8* %addr, i64 %stride64) | ||||
388 | combineLoadBitcast(LD, Bitcast); | ||||
389 | DeadInsts.push_back(Bitcast); | ||||
390 | if (LD->hasOneUse()) | ||||
391 | DeadInsts.push_back(LD); | ||||
392 | } else if (Src->getType()->isX86_AMXTy()) { | ||||
393 | if (Bitcast->user_empty()) { | ||||
394 | DeadInsts.push_back(Bitcast); | ||||
395 | continue; | ||||
396 | } | ||||
397 | StoreInst *ST = nullptr; | ||||
398 | for (Use &U : Bitcast->uses()) { | ||||
399 | ST = dyn_cast<StoreInst>(U.getUser()); | ||||
400 | if (ST) | ||||
401 | break; | ||||
402 | } | ||||
403 | if (!ST) { | ||||
404 | if (transformBitcast(Bitcast)) | ||||
405 | DeadInsts.push_back(Bitcast); | ||||
406 | continue; | ||||
407 | } | ||||
408 | // If bitcast (%13) has one use, combine bitcast and store to amx store. | ||||
409 | // %src = call x86_amx @llvm.x86.tileloadd64.internal(%row, %col, %addr, | ||||
410 | // %stride); | ||||
411 | // %13 = bitcast x86_amx %src to <256 x i32> | ||||
412 | // store <256 x i32> %13, <256 x i32>* %addr, align 64 | ||||
413 | // --> | ||||
414 | // call void @llvm.x86.tilestored64.internal(%row, %col, %addr, | ||||
415 | // %stride64, %13) | ||||
416 | // | ||||
417 | // If bitcast (%13) has multi-use, transform as below. | ||||
418 | // %13 = bitcast x86_amx %src to <256 x i32> | ||||
419 | // store <256 x i32> %13, <256 x i32>* %addr, align 64 | ||||
420 | // %add = <256 x i32> %13, <256 x i32> %src2 | ||||
421 | // --> | ||||
422 | // %13 = bitcast x86_amx %src to <256 x i32> | ||||
423 | // call void @llvm.x86.tilestored64.internal(%row, %col, %addr, | ||||
424 | // %stride64, %13) | ||||
425 | // %14 = load <256 x i32>, %addr | ||||
426 | // %add = <256 x i32> %14, <256 x i32> %src2 | ||||
427 | // | ||||
428 | combineBitcastStore(Bitcast, ST); | ||||
429 | // Delete user first. | ||||
430 | DeadInsts.push_back(ST); | ||||
431 | DeadInsts.push_back(Bitcast); | ||||
432 | } | ||||
433 | } | ||||
434 | } | ||||
435 | |||||
436 | bool C = !DeadInsts.empty(); | ||||
437 | |||||
438 | for (auto *Inst : DeadInsts) | ||||
439 | Inst->eraseFromParent(); | ||||
440 | |||||
441 | return C; | ||||
442 | } | ||||
443 | } // anonymous namespace | ||||
444 | |||||
445 | static Value *getAllocaPos(BasicBlock *BB) { | ||||
446 | Module *M = BB->getModule(); | ||||
447 | Function *F = BB->getParent(); | ||||
448 | IRBuilder<> Builder(&F->getEntryBlock().front()); | ||||
449 | const DataLayout &DL = M->getDataLayout(); | ||||
450 | unsigned AllocaAS = DL.getAllocaAddrSpace(); | ||||
451 | Type *V256I32Ty = VectorType::get(Builder.getInt32Ty(), 256, false); | ||||
452 | AllocaInst *AllocaRes = | ||||
453 | new AllocaInst(V256I32Ty, AllocaAS, "", &F->getEntryBlock().front()); | ||||
454 | BasicBlock::iterator Iter = AllocaRes->getIterator(); | ||||
455 | ++Iter; | ||||
456 | Builder.SetInsertPoint(&*Iter); | ||||
457 | Value *I8Ptr = Builder.CreateBitCast(AllocaRes, Builder.getInt8PtrTy()); | ||||
458 | return I8Ptr; | ||||
459 | } | ||||
460 | |||||
461 | static Instruction *createTileStore(Instruction *TileDef, Value *Ptr) { | ||||
462 | assert(TileDef->getType()->isX86_AMXTy() && "Not define tile!")(static_cast <bool> (TileDef->getType()->isX86_AMXTy () && "Not define tile!") ? void (0) : __assert_fail ( "TileDef->getType()->isX86_AMXTy() && \"Not define tile!\"" , "llvm/lib/Target/X86/X86LowerAMXType.cpp", 462, __extension__ __PRETTY_FUNCTION__)); | ||||
463 | auto *II = cast<IntrinsicInst>(TileDef); | ||||
464 | assert(II && "Not tile intrinsic!")(static_cast <bool> (II && "Not tile intrinsic!" ) ? void (0) : __assert_fail ("II && \"Not tile intrinsic!\"" , "llvm/lib/Target/X86/X86LowerAMXType.cpp", 464, __extension__ __PRETTY_FUNCTION__)); | ||||
465 | Value *Row = II->getOperand(0); | ||||
466 | Value *Col = II->getOperand(1); | ||||
467 | |||||
468 | BasicBlock *BB = TileDef->getParent(); | ||||
469 | BasicBlock::iterator Iter = TileDef->getIterator(); | ||||
470 | IRBuilder<> Builder(BB, ++Iter); | ||||
471 | Value *Stride = Builder.getInt64(64); | ||||
472 | std::array<Value *, 5> Args = {Row, Col, Ptr, Stride, TileDef}; | ||||
473 | |||||
474 | Instruction *TileStore = | ||||
475 | Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args); | ||||
476 | return TileStore; | ||||
477 | } | ||||
478 | |||||
479 | static void replaceWithTileLoad(Use &U, Value *Ptr, bool IsPHI = false) { | ||||
480 | Value *V = U.get(); | ||||
481 | assert(V->getType()->isX86_AMXTy() && "Not define tile!")(static_cast <bool> (V->getType()->isX86_AMXTy() && "Not define tile!") ? void (0) : __assert_fail ("V->getType()->isX86_AMXTy() && \"Not define tile!\"" , "llvm/lib/Target/X86/X86LowerAMXType.cpp", 481, __extension__ __PRETTY_FUNCTION__)); | ||||
482 | |||||
483 | // Get tile shape. | ||||
484 | IntrinsicInst *II = nullptr; | ||||
485 | if (IsPHI
| ||||
486 | Value *PhiOp = dyn_cast<PHINode>(V)->getIncomingValue(0); | ||||
487 | II = cast<IntrinsicInst>(PhiOp); | ||||
488 | } else { | ||||
489 | II = cast<IntrinsicInst>(V); | ||||
490 | } | ||||
491 | Value *Row = II->getOperand(0); | ||||
492 | Value *Col = II->getOperand(1); | ||||
493 | |||||
494 | Instruction *UserI = dyn_cast<Instruction>(U.getUser()); | ||||
495 | IRBuilder<> Builder(UserI); | ||||
496 | Value *Stride = Builder.getInt64(64); | ||||
497 | std::array<Value *, 4> Args = {Row, Col, Ptr, Stride}; | ||||
498 | |||||
499 | Value *TileLoad = | ||||
500 | Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, None, Args); | ||||
501 | UserI->replaceUsesOfWith(V, TileLoad); | ||||
502 | } | ||||
503 | |||||
504 | static bool isIncomingOfPHI(Instruction *I) { | ||||
505 | for (Use &U : I->uses()) { | ||||
506 | User *V = U.getUser(); | ||||
507 | if (isa<PHINode>(V)) | ||||
508 | return true; | ||||
509 | } | ||||
510 | return false; | ||||
511 | } | ||||
512 | |||||
513 | // Let all AMX tile data become volatile data, shorten the life range | ||||
514 | // of each tile register before fast register allocation. | ||||
515 | namespace { | ||||
516 | class X86VolatileTileData { | ||||
517 | Function &F; | ||||
518 | |||||
519 | public: | ||||
520 | X86VolatileTileData(Function &Func) : F(Func) {} | ||||
521 | Value *updatePhiIncomings(BasicBlock *BB, | ||||
522 | SmallVector<Instruction *, 2> &Incomings); | ||||
523 | void replacePhiDefWithLoad(Instruction *PHI, Value *StorePtr); | ||||
524 | bool volatileTileData(); | ||||
525 | void volatileTilePHI(PHINode *Inst); | ||||
526 | void volatileTileNonPHI(Instruction *I); | ||||
527 | }; | ||||
528 | |||||
529 | Value *X86VolatileTileData::updatePhiIncomings( | ||||
530 | BasicBlock *BB, SmallVector<Instruction *, 2> &Incomings) { | ||||
531 | Value *I8Ptr = getAllocaPos(BB); | ||||
532 | |||||
533 | for (auto *I : Incomings) { | ||||
534 | User *Store = createTileStore(I, I8Ptr); | ||||
535 | |||||
536 | // All its uses (except phi) should load from stored mem. | ||||
537 | for (Use &U : I->uses()) { | ||||
538 | User *V = U.getUser(); | ||||
539 | if (isa<PHINode>(V) || V == Store) | ||||
540 | continue; | ||||
541 | replaceWithTileLoad(U, I8Ptr); | ||||
542 | } | ||||
543 | } | ||||
544 | return I8Ptr; | ||||
545 | } | ||||
546 | |||||
547 | void X86VolatileTileData::replacePhiDefWithLoad(Instruction *PHI, | ||||
548 | Value *StorePtr) { | ||||
549 | for (Use &U : PHI->uses()) | ||||
550 | replaceWithTileLoad(U, StorePtr, true); | ||||
551 | PHI->eraseFromParent(); | ||||
552 | } | ||||
553 | |||||
554 | // Smilar with volatileTileNonPHI, this function only handle PHI Nodes | ||||
555 | // and their related AMX intrinsics. | ||||
556 | // 1) PHI Def should change to tileload. | ||||
557 | // 2) PHI Incoming Values should tilestored in just after their def. | ||||
558 | // 3) The mem of these tileload and tilestores should be same. | ||||
559 | // e.g. | ||||
560 | // ------------------------------------------------------ | ||||
561 | // bb_dom: | ||||
562 | // ... | ||||
563 | // br i1 %bool.cond, label %if.else, label %if.then | ||||
564 | // | ||||
565 | // if.then: | ||||
566 | // def %t0 = ... | ||||
567 | // ... | ||||
568 | // use %t0 | ||||
569 | // ... | ||||
570 | // br label %if.end | ||||
571 | // | ||||
572 | // if.else: | ||||
573 | // def %t1 = ... | ||||
574 | // br label %if.end | ||||
575 | // | ||||
576 | // if.end: | ||||
577 | // %td = phi x86_amx [ %t1, %if.else ], [ %t0, %if.then ] | ||||
578 | // ... | ||||
579 | // use %td | ||||
580 | // ------------------------------------------------------ | ||||
581 | // --> | ||||
582 | // ------------------------------------------------------ | ||||
583 | // bb_entry: | ||||
584 | // %mem = alloca <256 x i32>, align 1024 * | ||||
585 | // ... | ||||
586 | // bb_dom: | ||||
587 | // ... | ||||
588 | // br i1 %bool.cond, label %if.else, label %if.then | ||||
589 | // | ||||
590 | // if.then: | ||||
591 | // def %t0 = ... | ||||
592 | // call void @llvm.x86.tilestored64.internal(mem, %t0) * | ||||
593 | // ... | ||||
594 | // %t0` = call x86_amx @llvm.x86.tileloadd64.internal(mem)* | ||||
595 | // use %t0` * | ||||
596 | // ... | ||||
597 | // br label %if.end | ||||
598 | // | ||||
599 | // if.else: | ||||
600 | // def %t1 = ... | ||||
601 | // call void @llvm.x86.tilestored64.internal(mem, %t1) * | ||||
602 | // br label %if.end | ||||
603 | // | ||||
604 | // if.end: | ||||
605 | // ... | ||||
606 | // %td = call x86_amx @llvm.x86.tileloadd64.internal(mem) * | ||||
607 | // use %td | ||||
608 | // ------------------------------------------------------ | ||||
609 | void X86VolatileTileData::volatileTilePHI(PHINode *PHI) { | ||||
610 | BasicBlock *BB = PHI->getParent(); | ||||
611 | SmallVector<Instruction *, 2> Incomings; | ||||
612 | |||||
613 | for (unsigned I = 0, E = PHI->getNumIncomingValues(); I != E; ++I) { | ||||
614 | Value *Op = PHI->getIncomingValue(I); | ||||
615 | Instruction *Inst = dyn_cast<Instruction>(Op); | ||||
616 | assert(Inst && "We shouldn't fold AMX instrution!")(static_cast <bool> (Inst && "We shouldn't fold AMX instrution!" ) ? void (0) : __assert_fail ("Inst && \"We shouldn't fold AMX instrution!\"" , "llvm/lib/Target/X86/X86LowerAMXType.cpp", 616, __extension__ __PRETTY_FUNCTION__)); | ||||
617 | Incomings.push_back(Inst); | ||||
618 | } | ||||
619 | |||||
620 | Value *StorePtr = updatePhiIncomings(BB, Incomings); | ||||
621 | replacePhiDefWithLoad(PHI, StorePtr); | ||||
622 | } | ||||
623 | |||||
624 | // Store the defined tile and load it before use. | ||||
625 | // All its users are not PHI. | ||||
626 | // e.g. | ||||
627 | // ------------------------------------------------------ | ||||
628 | // def %td = ... | ||||
629 | // ... | ||||
630 | // "use %td" | ||||
631 | // ------------------------------------------------------ | ||||
632 | // --> | ||||
633 | // ------------------------------------------------------ | ||||
634 | // def %td = ... | ||||
635 | // call void @llvm.x86.tilestored64.internal(mem, %td) | ||||
636 | // ... | ||||
637 | // %td2 = call x86_amx @llvm.x86.tileloadd64.internal(mem) | ||||
638 | // "use %td2" | ||||
639 | // ------------------------------------------------------ | ||||
640 | void X86VolatileTileData::volatileTileNonPHI(Instruction *I) { | ||||
641 | BasicBlock *BB = I->getParent(); | ||||
642 | Value *I8Ptr = getAllocaPos(BB); | ||||
643 | User *Store = createTileStore(I, I8Ptr); | ||||
644 | |||||
645 | // All its uses should load from stored mem. | ||||
646 | for (Use &U : I->uses()) { | ||||
647 | User *V = U.getUser(); | ||||
648 | assert(!isa<PHINode>(V) && "PHI Nodes should be excluded!")(static_cast <bool> (!isa<PHINode>(V) && "PHI Nodes should be excluded!" ) ? void (0) : __assert_fail ("!isa<PHINode>(V) && \"PHI Nodes should be excluded!\"" , "llvm/lib/Target/X86/X86LowerAMXType.cpp", 648, __extension__ __PRETTY_FUNCTION__)); | ||||
649 | if (V != Store) | ||||
650 | replaceWithTileLoad(U, I8Ptr); | ||||
651 | } | ||||
652 | } | ||||
653 | |||||
654 | // Volatile Tile Model: | ||||
655 | // 1) All the uses of tile data comes from tileload in time. | ||||
656 | // 2) All the defs of tile data tilestore into mem immediately. | ||||
657 | // For example: | ||||
658 | // -------------------------------------------------------------------------- | ||||
659 | // %t1 = call x86_amx @llvm.x86.tileloadd64.internal(m, k, ...) key | ||||
660 | // %t2 = call x86_amx @llvm.x86.tileloadd64.internal(k, n, ...) | ||||
661 | // %t3 = call x86_amx @llvm.x86.tileloadd64.internal(m, n, ...) amx | ||||
662 | // %td = tail call x86_amx @llvm.x86.tdpbssd.internal(m, n, k, t1, t2, t3) | ||||
663 | // call void @llvm.x86.tilestored64.internal(... td) area | ||||
664 | // -------------------------------------------------------------------------- | ||||
665 | // 3) No terminator, call or other amx instructions in the key amx area. | ||||
666 | bool X86VolatileTileData::volatileTileData() { | ||||
667 | bool Changed = false; | ||||
668 | for (BasicBlock &BB : F) { | ||||
669 | SmallVector<Instruction *, 2> PHIInsts; | ||||
670 | SmallVector<Instruction *, 8> AMXDefInsts; | ||||
671 | |||||
672 | for (Instruction &I : BB) { | ||||
673 | if (!I.getType()->isX86_AMXTy()) | ||||
674 | continue; | ||||
675 | if (isa<PHINode>(&I)) | ||||
676 | PHIInsts.push_back(&I); | ||||
677 | else | ||||
678 | AMXDefInsts.push_back(&I); | ||||
679 | } | ||||
680 | |||||
681 | // First we "volatile" the non-phi related amx intrinsics. | ||||
682 | for (Instruction *I : AMXDefInsts) { | ||||
683 | if (isIncomingOfPHI(I)) | ||||
684 | continue; | ||||
685 | volatileTileNonPHI(I); | ||||
686 | Changed = true; | ||||
687 | } | ||||
688 | |||||
689 | for (Instruction *I : PHIInsts) { | ||||
690 | volatileTilePHI(dyn_cast<PHINode>(I)); | ||||
691 | Changed = true; | ||||
692 | } | ||||
693 | } | ||||
694 | return Changed; | ||||
695 | } | ||||
696 | |||||
697 | } // anonymous namespace | ||||
698 | |||||
699 | namespace { | ||||
700 | |||||
701 | class X86LowerAMXCast { | ||||
702 | Function &Func; | ||||
703 | |||||
704 | public: | ||||
705 | X86LowerAMXCast(Function &F) : Func(F) {} | ||||
706 | void combineCastStore(IntrinsicInst *Cast, StoreInst *ST); | ||||
707 | void combineLoadCast(IntrinsicInst *Cast, LoadInst *LD); | ||||
708 | bool combineLdSt(SmallVectorImpl<Instruction *> &Casts); | ||||
709 | bool combineAMXcast(TargetLibraryInfo *TLI); | ||||
710 | bool transformAMXCast(IntrinsicInst *AMXCast); | ||||
711 | bool transformAllAMXCast(); | ||||
712 | bool optimizeAMXCastFromPhi(IntrinsicInst *CI, PHINode *PN, | ||||
713 | SmallSetVector<Instruction *, 16> &DeadInst); | ||||
714 | }; | ||||
715 | |||||
716 | static bool DCEInstruction(Instruction *I, | ||||
717 | SmallSetVector<Instruction *, 16> &WorkList, | ||||
718 | const TargetLibraryInfo *TLI) { | ||||
719 | if (isInstructionTriviallyDead(I, TLI)) { | ||||
720 | salvageDebugInfo(*I); | ||||
721 | salvageKnowledge(I); | ||||
722 | |||||
723 | // Null out all of the instruction's operands to see if any operand becomes | ||||
724 | // dead as we go. | ||||
725 | for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) { | ||||
726 | Value *OpV = I->getOperand(i); | ||||
727 | I->setOperand(i, nullptr); | ||||
728 | |||||
729 | if (!OpV->use_empty() || I == OpV) | ||||
730 | continue; | ||||
731 | |||||
732 | // If the operand is an instruction that became dead as we nulled out the | ||||
733 | // operand, and if it is 'trivially' dead, delete it in a future loop | ||||
734 | // iteration. | ||||
735 | if (Instruction *OpI = dyn_cast<Instruction>(OpV)) { | ||||
736 | if (isInstructionTriviallyDead(OpI, TLI)) { | ||||
737 | WorkList.insert(OpI); | ||||
738 | } | ||||
739 | } | ||||
740 | } | ||||
741 | I->eraseFromParent(); | ||||
742 | return true; | ||||
743 | } | ||||
744 | return false; | ||||
745 | } | ||||
746 | |||||
747 | /// This function handles following case | ||||
748 | /// | ||||
749 | /// A -> B amxcast | ||||
750 | /// PHI | ||||
751 | /// B -> A amxcast | ||||
752 | /// | ||||
753 | /// All the related PHI nodes can be replaced by new PHI nodes with type A. | ||||
754 | /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN. | ||||
755 | bool X86LowerAMXCast::optimizeAMXCastFromPhi( | ||||
756 | IntrinsicInst *CI, PHINode *PN, | ||||
757 | SmallSetVector<Instruction *, 16> &DeadInst) { | ||||
758 | IRBuilder<> Builder(CI); | ||||
759 | Value *Src = CI->getOperand(0); | ||||
760 | Type *SrcTy = Src->getType(); // Type B | ||||
761 | Type *DestTy = CI->getType(); // Type A | ||||
762 | |||||
763 | SmallVector<PHINode *, 4> PhiWorklist; | ||||
764 | SmallSetVector<PHINode *, 4> OldPhiNodes; | ||||
765 | |||||
766 | // Find all of the A->B casts and PHI nodes. | ||||
767 | // We need to inspect all related PHI nodes, but PHIs can be cyclic, so | ||||
768 | // OldPhiNodes is used to track all known PHI nodes, before adding a new | ||||
769 | // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first. | ||||
770 | PhiWorklist.push_back(PN); | ||||
771 | OldPhiNodes.insert(PN); | ||||
772 | while (!PhiWorklist.empty()) { | ||||
773 | auto *OldPN = PhiWorklist.pop_back_val(); | ||||
774 | for (unsigned I = 0; I < OldPN->getNumOperands(); ++I) { | ||||
775 | Value *IncValue = OldPN->getIncomingValue(I); | ||||
776 | // TODO: currently, We ignore cases where it is a const. In the future, we | ||||
777 | // might support const. | ||||
778 | if (isa<Constant>(IncValue)) { | ||||
779 | auto *IncConst = dyn_cast<Constant>(IncValue); | ||||
780 | if (!isa<UndefValue>(IncValue) && !IncConst->isZeroValue()) | ||||
781 | return false; | ||||
782 | Value *Row = nullptr, *Col = nullptr; | ||||
783 | std::tie(Row, Col) = getShape(OldPN); | ||||
784 | // TODO: If it is not constant the Row and Col must domoniate tilezero | ||||
785 | // that we are going to create. | ||||
786 | if (!Row || !Col || !isa<Constant>(Row) || !isa<Constant>(Col)) | ||||
787 | return false; | ||||
788 | // Create tilezero at the end of incoming block. | ||||
789 | auto *Block = OldPN->getIncomingBlock(I); | ||||
790 | BasicBlock::iterator Iter = Block->getTerminator()->getIterator(); | ||||
791 | Instruction *NewInst = Builder.CreateIntrinsic( | ||||
792 | Intrinsic::x86_tilezero_internal, None, {Row, Col}); | ||||
793 | NewInst->moveBefore(&*Iter); | ||||
794 | NewInst = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, | ||||
795 | {IncValue->getType()}, {NewInst}); | ||||
796 | NewInst->moveBefore(&*Iter); | ||||
797 | // Replace InValue with new Value. | ||||
798 | OldPN->setIncomingValue(I, NewInst); | ||||
799 | IncValue = NewInst; | ||||
800 | } | ||||
801 | |||||
802 | if (auto *PNode = dyn_cast<PHINode>(IncValue)) { | ||||
803 | if (OldPhiNodes.insert(PNode)) | ||||
804 | PhiWorklist.push_back(PNode); | ||||
805 | continue; | ||||
806 | } | ||||
807 | Instruction *ACI = dyn_cast<Instruction>(IncValue); | ||||
808 | if (ACI && isAMXCast(ACI)) { | ||||
809 | // Verify it's a A->B cast. | ||||
810 | Type *TyA = ACI->getOperand(0)->getType(); | ||||
811 | Type *TyB = ACI->getType(); | ||||
812 | if (TyA != DestTy || TyB != SrcTy) | ||||
813 | return false; | ||||
814 | continue; | ||||
815 | } | ||||
816 | return false; | ||||
817 | } | ||||
818 | } | ||||
819 | |||||
820 | // Check that each user of each old PHI node is something that we can | ||||
821 | // rewrite, so that all of the old PHI nodes can be cleaned up afterwards. | ||||
822 | for (auto *OldPN : OldPhiNodes) { | ||||
823 | for (User *V : OldPN->users()) { | ||||
824 | Instruction *ACI = dyn_cast<Instruction>(V); | ||||
825 | if (ACI && isAMXCast(ACI)) { | ||||
826 | // Verify it's a B->A cast. | ||||
827 | Type *TyB = ACI->getOperand(0)->getType(); | ||||
828 | Type *TyA = ACI->getType(); | ||||
829 | if (TyA != DestTy || TyB != SrcTy) | ||||
830 | return false; | ||||
831 | } else if (auto *PHI = dyn_cast<PHINode>(V)) { | ||||
832 | // As long as the user is another old PHI node, then even if we don't | ||||
833 | // rewrite it, the PHI web we're considering won't have any users | ||||
834 | // outside itself, so it'll be dead. | ||||
835 | // example: | ||||
836 | // bb.0: | ||||
837 | // %0 = amxcast ... | ||||
838 | // bb.1: | ||||
839 | // %1 = amxcast ... | ||||
840 | // bb.2: | ||||
841 | // %goodphi = phi %0, %1 | ||||
842 | // %3 = amxcast %goodphi | ||||
843 | // bb.3: | ||||
844 | // %goodphi2 = phi %0, %goodphi | ||||
845 | // %4 = amxcast %goodphi2 | ||||
846 | // When optimizeAMXCastFromPhi process %3 and %goodphi, %goodphi2 is | ||||
847 | // outside the phi-web, so the combination stop When | ||||
848 | // optimizeAMXCastFromPhi process %4 and %goodphi2, the optimization | ||||
849 | // will be done. | ||||
850 | if (OldPhiNodes.count(PHI) == 0) | ||||
851 | return false; | ||||
852 | } else | ||||
853 | return false; | ||||
854 | } | ||||
855 | } | ||||
856 | |||||
857 | // For each old PHI node, create a corresponding new PHI node with a type A. | ||||
858 | SmallDenseMap<PHINode *, PHINode *> NewPNodes; | ||||
859 | for (auto *OldPN : OldPhiNodes) { | ||||
860 | Builder.SetInsertPoint(OldPN); | ||||
861 | PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands()); | ||||
862 | NewPNodes[OldPN] = NewPN; | ||||
863 | } | ||||
864 | |||||
865 | // Fill in the operands of new PHI nodes. | ||||
866 | for (auto *OldPN : OldPhiNodes) { | ||||
867 | PHINode *NewPN = NewPNodes[OldPN]; | ||||
868 | for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) { | ||||
869 | Value *V = OldPN->getOperand(j); | ||||
870 | Value *NewV = nullptr; | ||||
871 | Instruction *ACI = dyn_cast<Instruction>(V); | ||||
872 | // There should not be a AMXcast from a const. | ||||
873 | if (ACI && isAMXCast(ACI)) | ||||
874 | NewV = ACI->getOperand(0); | ||||
875 | else if (auto *PrevPN = dyn_cast<PHINode>(V)) | ||||
876 | NewV = NewPNodes[PrevPN]; | ||||
877 | assert(NewV)(static_cast <bool> (NewV) ? void (0) : __assert_fail ( "NewV", "llvm/lib/Target/X86/X86LowerAMXType.cpp", 877, __extension__ __PRETTY_FUNCTION__)); | ||||
878 | NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j)); | ||||
879 | } | ||||
880 | } | ||||
881 | |||||
882 | // Traverse all accumulated PHI nodes and process its users, | ||||
883 | // which are Stores and BitcCasts. Without this processing | ||||
884 | // NewPHI nodes could be replicated and could lead to extra | ||||
885 | // moves generated after DeSSA. | ||||
886 | // If there is a store with type B, change it to type A. | ||||
887 | |||||
888 | // Replace users of BitCast B->A with NewPHI. These will help | ||||
889 | // later to get rid of a closure formed by OldPHI nodes. | ||||
890 | for (auto *OldPN : OldPhiNodes) { | ||||
891 | PHINode *NewPN = NewPNodes[OldPN]; | ||||
892 | for (User *V : make_early_inc_range(OldPN->users())) { | ||||
893 | Instruction *ACI = dyn_cast<Instruction>(V); | ||||
894 | if (ACI && isAMXCast(ACI)) { | ||||
895 | Type *TyB = ACI->getOperand(0)->getType(); | ||||
896 | Type *TyA = ACI->getType(); | ||||
897 | assert(TyA == DestTy && TyB == SrcTy)(static_cast <bool> (TyA == DestTy && TyB == SrcTy ) ? void (0) : __assert_fail ("TyA == DestTy && TyB == SrcTy" , "llvm/lib/Target/X86/X86LowerAMXType.cpp", 897, __extension__ __PRETTY_FUNCTION__)); | ||||
898 | (void)TyA; | ||||
899 | (void)TyB; | ||||
900 | ACI->replaceAllUsesWith(NewPN); | ||||
901 | DeadInst.insert(ACI); | ||||
902 | } else if (auto *PHI = dyn_cast<PHINode>(V)) { | ||||
903 | // We don't need to push PHINode into DeadInst since they are operands | ||||
904 | // of rootPN DCE can safely delete rootPN's operands if rootPN is dead. | ||||
905 | assert(OldPhiNodes.contains(PHI))(static_cast <bool> (OldPhiNodes.contains(PHI)) ? void ( 0) : __assert_fail ("OldPhiNodes.contains(PHI)", "llvm/lib/Target/X86/X86LowerAMXType.cpp" , 905, __extension__ __PRETTY_FUNCTION__)); | ||||
906 | (void)PHI; | ||||
907 | } else | ||||
908 | llvm_unreachable("all uses should be handled")::llvm::llvm_unreachable_internal("all uses should be handled" , "llvm/lib/Target/X86/X86LowerAMXType.cpp", 908); | ||||
909 | } | ||||
910 | } | ||||
911 | return true; | ||||
912 | } | ||||
913 | |||||
914 | // %43 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %42) | ||||
915 | // store <256 x i32> %43, <256 x i32>* %p, align 64 | ||||
916 | // --> | ||||
917 | // call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %p, | ||||
918 | // i64 64, x86_amx %42) | ||||
919 | void X86LowerAMXCast::combineCastStore(IntrinsicInst *Cast, StoreInst *ST) { | ||||
920 | Value *Tile = Cast->getOperand(0); | ||||
921 | // TODO: If it is cast intrinsic or phi node, we can propagate the | ||||
922 | // shape information through def-use chain. | ||||
923 | if (!isAMXIntrinsic(Tile)) | ||||
924 | return; | ||||
925 | auto *II = cast<IntrinsicInst>(Tile); | ||||
926 | // Tile is output from AMX intrinsic. The first operand of the | ||||
927 | // intrinsic is row, the second operand of the intrinsic is column. | ||||
928 | Value *Row = II->getOperand(0); | ||||
929 | Value *Col = II->getOperand(1); | ||||
930 | IRBuilder<> Builder(ST); | ||||
931 | // Use the maximum column as stride. It must be the same with load | ||||
932 | // stride. | ||||
933 | Value *Stride = Builder.getInt64(64); | ||||
934 | Value *I8Ptr = | ||||
935 | Builder.CreateBitCast(ST->getOperand(1), Builder.getInt8PtrTy()); | ||||
936 | std::array<Value *, 5> Args = {Row, Col, I8Ptr, Stride, Tile}; | ||||
937 | Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args); | ||||
938 | } | ||||
939 | |||||
940 | // %65 = load <256 x i32>, <256 x i32>* %p, align 64 | ||||
941 | // %66 = call x86_amx @llvm.x86.cast.vector.to.tile(<256 x i32> %65) | ||||
942 | // --> | ||||
943 | // %66 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, | ||||
944 | // i8* %p, i64 64) | ||||
945 | void X86LowerAMXCast::combineLoadCast(IntrinsicInst *Cast, LoadInst *LD) { | ||||
946 | Value *Row = nullptr, *Col = nullptr; | ||||
947 | Use &U = *(Cast->use_begin()); | ||||
948 | unsigned OpNo = U.getOperandNo(); | ||||
949 | auto *II = cast<IntrinsicInst>(U.getUser()); | ||||
950 | // TODO: If it is cast intrinsic or phi node, we can propagate the | ||||
951 | // shape information through def-use chain. | ||||
952 | if (!isAMXIntrinsic(II)) | ||||
953 | return; | ||||
954 | std::tie(Row, Col) = getShape(II, OpNo); | ||||
955 | IRBuilder<> Builder(LD); | ||||
956 | // Use the maximun column as stride. | ||||
957 | Value *Stride = Builder.getInt64(64); | ||||
958 | Value *I8Ptr = | ||||
959 | Builder.CreateBitCast(LD->getOperand(0), Builder.getInt8PtrTy()); | ||||
960 | std::array<Value *, 4> Args = {Row, Col, I8Ptr, Stride}; | ||||
961 | |||||
962 | Value *NewInst = | ||||
963 | Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, None, Args); | ||||
964 | Cast->replaceAllUsesWith(NewInst); | ||||
965 | } | ||||
966 | |||||
967 | bool X86LowerAMXCast::combineLdSt(SmallVectorImpl<Instruction *> &Casts) { | ||||
968 | bool Change = false; | ||||
969 | for (auto *Cast : Casts) { | ||||
970 | auto *II = cast<IntrinsicInst>(Cast); | ||||
971 | // %43 = call <256 x i32> @llvm.x86.cast.tile.to.vector(x86_amx %42) | ||||
972 | // store <256 x i32> %43, <256 x i32>* %p, align 64 | ||||
973 | // --> | ||||
974 | // call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %p, | ||||
975 | // i64 64, x86_amx %42) | ||||
976 | if (II->getIntrinsicID() == Intrinsic::x86_cast_tile_to_vector) { | ||||
977 | SmallVector<Instruction *, 2> DeadStores; | ||||
978 | for (User *U : Cast->users()) { | ||||
979 | StoreInst *Store = dyn_cast<StoreInst>(U); | ||||
980 | if (!Store) | ||||
981 | continue; | ||||
982 | combineCastStore(cast<IntrinsicInst>(Cast), Store); | ||||
983 | DeadStores.push_back(Store); | ||||
984 | Change = true; | ||||
985 | } | ||||
986 | for (auto *Store : DeadStores) | ||||
987 | Store->eraseFromParent(); | ||||
988 | } else { // x86_cast_vector_to_tile | ||||
989 | SmallVector<Instruction *, 2> DeadLoads; | ||||
990 | auto *Load = dyn_cast<LoadInst>(Cast->getOperand(0)); | ||||
991 | if (!Load || !Load->hasOneUse()) | ||||
992 | continue; | ||||
993 | // %65 = load <256 x i32>, <256 x i32>* %p, align 64 | ||||
994 | // %66 = call x86_amx @llvm.x86.cast.vector.to.tile(<256 x i32> %65) | ||||
995 | // --> | ||||
996 | // %66 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, | ||||
997 | // i8* %p, i64 64) | ||||
998 | combineLoadCast(cast<IntrinsicInst>(Cast), Load); | ||||
999 | // Set the operand is null so that load instruction can be erased. | ||||
1000 | Cast->setOperand(0, nullptr); | ||||
1001 | Load->eraseFromParent(); | ||||
1002 | } | ||||
1003 | } | ||||
1004 | return Change; | ||||
1005 | } | ||||
1006 | |||||
1007 | bool X86LowerAMXCast::combineAMXcast(TargetLibraryInfo *TLI) { | ||||
1008 | bool Change = false; | ||||
1009 | // Collect tile cast instruction. | ||||
1010 | SmallVector<Instruction *, 8> Vec2TileInsts; | ||||
1011 | SmallVector<Instruction *, 8> Tile2VecInsts; | ||||
1012 | SmallVector<Instruction *, 8> PhiCastWorkList; | ||||
1013 | SmallSetVector<Instruction *, 16> DeadInst; | ||||
1014 | for (BasicBlock &BB : Func) { | ||||
1015 | for (Instruction &I : BB) { | ||||
1016 | Value *Vec; | ||||
1017 | if (match(&I, | ||||
1018 | m_Intrinsic<Intrinsic::x86_cast_vector_to_tile>(m_Value(Vec)))) | ||||
1019 | Vec2TileInsts.push_back(&I); | ||||
1020 | else if (match(&I, m_Intrinsic<Intrinsic::x86_cast_tile_to_vector>( | ||||
1021 | m_Value(Vec)))) | ||||
1022 | Tile2VecInsts.push_back(&I); | ||||
1023 | } | ||||
1024 | } | ||||
1025 | |||||
1026 | auto Convert = [&](SmallVectorImpl<Instruction *> &Insts, Intrinsic::ID IID) { | ||||
1027 | for (auto *Inst : Insts) { | ||||
1028 | for (User *U : Inst->users()) { | ||||
1029 | IntrinsicInst *II = dyn_cast<IntrinsicInst>(U); | ||||
1030 | if (!II || II->getIntrinsicID() != IID) | ||||
1031 | continue; | ||||
1032 | // T1 = vec2tile V0 | ||||
1033 | // V2 = tile2vec T1 | ||||
1034 | // V3 = OP V2 | ||||
1035 | // --> | ||||
1036 | // T1 = vec2tile V0 | ||||
1037 | // V2 = tile2vec T1 | ||||
1038 | // V3 = OP V0 | ||||
1039 | II->replaceAllUsesWith(Inst->getOperand(0)); | ||||
1040 | Change = true; | ||||
1041 | } | ||||
1042 | } | ||||
1043 | }; | ||||
1044 | |||||
1045 | Convert(Vec2TileInsts, Intrinsic::x86_cast_tile_to_vector); | ||||
1046 | Convert(Tile2VecInsts, Intrinsic::x86_cast_vector_to_tile); | ||||
1047 | |||||
1048 | SmallVector<Instruction *, 8> LiveCasts; | ||||
1049 | auto EraseInst = [&](SmallVectorImpl<Instruction *> &Insts) { | ||||
1050 | for (auto *Inst : Insts) { | ||||
1051 | if (Inst->use_empty()) { | ||||
1052 | Inst->eraseFromParent(); | ||||
1053 | Change = true; | ||||
1054 | } else { | ||||
1055 | LiveCasts.push_back(Inst); | ||||
1056 | } | ||||
1057 | } | ||||
1058 | }; | ||||
1059 | |||||
1060 | EraseInst(Vec2TileInsts); | ||||
1061 | EraseInst(Tile2VecInsts); | ||||
1062 | Change |= combineLdSt(LiveCasts); | ||||
1063 | EraseInst(LiveCasts); | ||||
1064 | |||||
1065 | // Handle the A->B->A cast, and there is an intervening PHI node. | ||||
1066 | for (BasicBlock &BB : Func) { | ||||
1067 | for (Instruction &I : BB) { | ||||
1068 | if (isAMXCast(&I)) { | ||||
1069 | if (isa<PHINode>(I.getOperand(0))) | ||||
1070 | PhiCastWorkList.push_back(&I); | ||||
1071 | } | ||||
1072 | } | ||||
1073 | } | ||||
1074 | for (auto *I : PhiCastWorkList) { | ||||
1075 | // We skip the dead Amxcast. | ||||
1076 | if (DeadInst.contains(I)) | ||||
1077 | continue; | ||||
1078 | PHINode *PN = cast<PHINode>(I->getOperand(0)); | ||||
1079 | if (optimizeAMXCastFromPhi(cast<IntrinsicInst>(I), PN, DeadInst)) { | ||||
1080 | DeadInst.insert(PN); | ||||
1081 | Change = true; | ||||
1082 | } | ||||
1083 | } | ||||
1084 | |||||
1085 | // Since we create new phi and merge AMXCast, some old phis and AMXCast might | ||||
1086 | // have no uses. We do some DeadCodeElimination for them. | ||||
1087 | while (!DeadInst.empty()) { | ||||
1088 | Instruction *I = DeadInst.pop_back_val(); | ||||
1089 | Change |= DCEInstruction(I, DeadInst, TLI); | ||||
1090 | } | ||||
1091 | return Change; | ||||
1092 | } | ||||
1093 | |||||
1094 | // There might be remaining AMXcast after combineAMXcast and they should be | ||||
1095 | // handled elegantly. | ||||
1096 | bool X86LowerAMXCast::transformAMXCast(IntrinsicInst *AMXCast) { | ||||
1097 | IRBuilder<> Builder(AMXCast); | ||||
1098 | AllocaInst *AllocaAddr; | ||||
1099 | Value *I8Ptr, *Stride; | ||||
1100 | auto *Src = AMXCast->getOperand(0); | ||||
1101 | |||||
1102 | auto Prepare = [&](Type *MemTy) { | ||||
1103 | AllocaAddr = createAllocaInstAtEntry(Builder, AMXCast->getParent(), MemTy); | ||||
1104 | I8Ptr = Builder.CreateBitCast(AllocaAddr, Builder.getInt8PtrTy()); | ||||
1105 | Stride = Builder.getInt64(64); | ||||
1106 | }; | ||||
1107 | |||||
1108 | if (AMXCast->getType()->isX86_AMXTy()) { | ||||
1109 | // %2 = amxcast <225 x i32> %src to x86_amx | ||||
1110 | // call void @llvm.x86.tilestored64.internal(i16 15, i16 60, | ||||
1111 | // i8* %addr3, i64 60, x86_amx %2) | ||||
1112 | // --> | ||||
1113 | // %addr = alloca <225 x i32>, align 64 | ||||
1114 | // store <225 x i32> %src, <225 x i32>* %addr, align 64 | ||||
1115 | // %addr2 = bitcast <225 x i32>* %addr to i8* | ||||
1116 | // %2 = call x86_amx @llvm.x86.tileloadd64.internal(i16 15, i16 60, | ||||
1117 | // i8* %addr2, | ||||
1118 | // i64 60) | ||||
1119 | // call void @llvm.x86.tilestored64.internal(i16 15, i16 60, | ||||
1120 | // i8* %addr3, i64 60, x86_amx %2) | ||||
1121 | if (AMXCast->use_empty()) { | ||||
1122 | AMXCast->eraseFromParent(); | ||||
1123 | return true; | ||||
1124 | } | ||||
1125 | Use &U = *(AMXCast->use_begin()); | ||||
1126 | unsigned OpNo = U.getOperandNo(); | ||||
1127 | auto *II = dyn_cast<IntrinsicInst>(U.getUser()); | ||||
1128 | if (!II) | ||||
1129 | return false; // May be bitcast from x86amx to <256 x i32>. | ||||
1130 | Prepare(AMXCast->getOperand(0)->getType()); | ||||
1131 | Builder.CreateStore(Src, AllocaAddr); | ||||
1132 | // TODO we can pick an constant operand for the shape. | ||||
1133 | Value *Row = nullptr, *Col = nullptr; | ||||
1134 | std::tie(Row, Col) = getShape(II, OpNo); | ||||
1135 | std::array<Value *, 4> Args = { | ||||
1136 | Row, Col, I8Ptr, Builder.CreateSExt(Col, Builder.getInt64Ty())}; | ||||
1137 | Value *NewInst = Builder.CreateIntrinsic( | ||||
1138 | Intrinsic::x86_tileloadd64_internal, None, Args); | ||||
1139 | AMXCast->replaceAllUsesWith(NewInst); | ||||
1140 | AMXCast->eraseFromParent(); | ||||
1141 | } else { | ||||
1142 | // %2 = amxcast x86_amx %src to <225 x i32> | ||||
1143 | // --> | ||||
1144 | // %addr = alloca <225 x i32>, align 64 | ||||
1145 | // %addr2 = bitcast <225 x i32>* to i8* | ||||
1146 | // call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, | ||||
1147 | // i8* %addr2, i64 %stride) | ||||
1148 | // %2 = load <225 x i32>, <225 x i32>* %addr, align 64 | ||||
1149 | auto *II = dyn_cast<IntrinsicInst>(Src); | ||||
1150 | if (!II) | ||||
1151 | return false; // May be bitcast from <256 x i32> to x86amx. | ||||
1152 | Prepare(AMXCast->getType()); | ||||
1153 | Value *Row = II->getOperand(0); | ||||
1154 | Value *Col = II->getOperand(1); | ||||
1155 | std::array<Value *, 5> Args = { | ||||
1156 | Row, Col, I8Ptr, Builder.CreateSExt(Col, Builder.getInt64Ty()), Src}; | ||||
1157 | Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args); | ||||
1158 | Value *NewInst = Builder.CreateLoad(AMXCast->getType(), AllocaAddr); | ||||
1159 | AMXCast->replaceAllUsesWith(NewInst); | ||||
1160 | AMXCast->eraseFromParent(); | ||||
1161 | } | ||||
1162 | |||||
1163 | return true; | ||||
1164 | } | ||||
1165 | |||||
1166 | bool X86LowerAMXCast::transformAllAMXCast() { | ||||
1167 | bool Change = false; | ||||
1168 | // Collect tile cast instruction. | ||||
1169 | SmallVector<Instruction *, 8> WorkLists; | ||||
1170 | for (BasicBlock &BB : Func) { | ||||
1171 | for (Instruction &I : BB) { | ||||
1172 | if (isAMXCast(&I)) | ||||
1173 | WorkLists.push_back(&I); | ||||
1174 | } | ||||
1175 | } | ||||
1176 | |||||
1177 | for (auto *Inst : WorkLists) { | ||||
1178 | Change |= transformAMXCast(cast<IntrinsicInst>(Inst)); | ||||
1179 | } | ||||
1180 | |||||
1181 | return Change; | ||||
1182 | } | ||||
1183 | |||||
1184 | } // anonymous namespace | ||||
1185 | |||||
1186 | namespace { | ||||
1187 | |||||
1188 | class X86LowerAMXTypeLegacyPass : public FunctionPass { | ||||
1189 | public: | ||||
1190 | static char ID; | ||||
1191 | |||||
1192 | X86LowerAMXTypeLegacyPass() : FunctionPass(ID) { | ||||
1193 | initializeX86LowerAMXTypeLegacyPassPass(*PassRegistry::getPassRegistry()); | ||||
1194 | } | ||||
1195 | |||||
1196 | bool runOnFunction(Function &F) override { | ||||
1197 | bool C = false; | ||||
1198 | TargetMachine *TM = &getAnalysis<TargetPassConfig>().getTM<TargetMachine>(); | ||||
1199 | TargetLibraryInfo *TLI = | ||||
1200 | &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F); | ||||
1201 | X86LowerAMXCast LAC(F); | ||||
1202 | C |= LAC.combineAMXcast(TLI); | ||||
1203 | // There might be remaining AMXcast after combineAMXcast and they should be | ||||
1204 | // handled elegantly. | ||||
1205 | C |= LAC.transformAllAMXCast(); | ||||
1206 | |||||
1207 | X86LowerAMXType LAT(F); | ||||
1208 | C |= LAT.visit(); | ||||
1209 | |||||
1210 | // Prepare for fast register allocation at O0. | ||||
1211 | // Todo: May better check the volatile model of AMX code, not just | ||||
1212 | // by checking Attribute::OptimizeNone and CodeGenOpt::None. | ||||
1213 | if (TM->getOptLevel() == CodeGenOpt::None) { | ||||
| |||||
1214 | // If Front End not use O0 but the Mid/Back end use O0, (e.g. | ||||
1215 | // "Clang -O2 -S -emit-llvm t.c" + "llc t.ll") we should make | ||||
1216 | // sure the amx data is volatile, that is nessary for AMX fast | ||||
1217 | // register allocation. | ||||
1218 | if (!F.hasFnAttribute(Attribute::OptimizeNone)) { | ||||
1219 | X86VolatileTileData VTD(F); | ||||
1220 | C = VTD.volatileTileData() || C; | ||||
1221 | } | ||||
1222 | } | ||||
1223 | |||||
1224 | return C; | ||||
1225 | } | ||||
1226 | |||||
1227 | void getAnalysisUsage(AnalysisUsage &AU) const override { | ||||
1228 | AU.setPreservesCFG(); | ||||
1229 | AU.addRequired<TargetPassConfig>(); | ||||
1230 | AU.addRequired<TargetLibraryInfoWrapperPass>(); | ||||
1231 | } | ||||
1232 | }; | ||||
1233 | |||||
1234 | } // anonymous namespace | ||||
1235 | |||||
1236 | static const char PassName[] = "Lower AMX type for load/store"; | ||||
1237 | char X86LowerAMXTypeLegacyPass::ID = 0; | ||||
1238 | INITIALIZE_PASS_BEGIN(X86LowerAMXTypeLegacyPass, DEBUG_TYPE, PassName, false,static void *initializeX86LowerAMXTypeLegacyPassPassOnce(PassRegistry &Registry) { | ||||
1239 | false)static void *initializeX86LowerAMXTypeLegacyPassPassOnce(PassRegistry &Registry) { | ||||
1240 | INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)initializeTargetPassConfigPass(Registry); | ||||
1241 | INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)initializeTargetLibraryInfoWrapperPassPass(Registry); | ||||
1242 | INITIALIZE_PASS_END(X86LowerAMXTypeLegacyPass, DEBUG_TYPE, PassName, false,PassInfo *PI = new PassInfo( PassName, "lower-amx-type", & X86LowerAMXTypeLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <X86LowerAMXTypeLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeX86LowerAMXTypeLegacyPassPassFlag ; void llvm::initializeX86LowerAMXTypeLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeX86LowerAMXTypeLegacyPassPassFlag , initializeX86LowerAMXTypeLegacyPassPassOnce, std::ref(Registry )); } | ||||
1243 | false)PassInfo *PI = new PassInfo( PassName, "lower-amx-type", & X86LowerAMXTypeLegacyPass::ID, PassInfo::NormalCtor_t(callDefaultCtor <X86LowerAMXTypeLegacyPass>), false, false); Registry.registerPass (*PI, true); return PI; } static llvm::once_flag InitializeX86LowerAMXTypeLegacyPassPassFlag ; void llvm::initializeX86LowerAMXTypeLegacyPassPass(PassRegistry &Registry) { llvm::call_once(InitializeX86LowerAMXTypeLegacyPassPassFlag , initializeX86LowerAMXTypeLegacyPassPassOnce, std::ref(Registry )); } | ||||
1244 | |||||
1245 | FunctionPass *llvm::createX86LowerAMXTypePass() { | ||||
1246 | return new X86LowerAMXTypeLegacyPass(); | ||||
1247 | } |
1 | //===- llvm/IRBuilder.h - Builder for LLVM Instructions ---------*- C++ -*-===// | |||
2 | // | |||
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. | |||
4 | // See https://llvm.org/LICENSE.txt for license information. | |||
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception | |||
6 | // | |||
7 | //===----------------------------------------------------------------------===// | |||
8 | // | |||
9 | // This file defines the IRBuilder class, which is used as a convenient way | |||
10 | // to create LLVM instructions with a consistent and simplified interface. | |||
11 | // | |||
12 | //===----------------------------------------------------------------------===// | |||
13 | ||||
14 | #ifndef LLVM_IR_IRBUILDER_H | |||
15 | #define LLVM_IR_IRBUILDER_H | |||
16 | ||||
17 | #include "llvm-c/Types.h" | |||
18 | #include "llvm/ADT/ArrayRef.h" | |||
19 | #include "llvm/ADT/None.h" | |||
20 | #include "llvm/ADT/STLExtras.h" | |||
21 | #include "llvm/ADT/StringRef.h" | |||
22 | #include "llvm/ADT/Twine.h" | |||
23 | #include "llvm/IR/BasicBlock.h" | |||
24 | #include "llvm/IR/Constant.h" | |||
25 | #include "llvm/IR/ConstantFolder.h" | |||
26 | #include "llvm/IR/Constants.h" | |||
27 | #include "llvm/IR/DataLayout.h" | |||
28 | #include "llvm/IR/DebugLoc.h" | |||
29 | #include "llvm/IR/DerivedTypes.h" | |||
30 | #include "llvm/IR/FPEnv.h" | |||
31 | #include "llvm/IR/Function.h" | |||
32 | #include "llvm/IR/GlobalVariable.h" | |||
33 | #include "llvm/IR/InstrTypes.h" | |||
34 | #include "llvm/IR/Instruction.h" | |||
35 | #include "llvm/IR/Instructions.h" | |||
36 | #include "llvm/IR/Intrinsics.h" | |||
37 | #include "llvm/IR/LLVMContext.h" | |||
38 | #include "llvm/IR/Module.h" | |||
39 | #include "llvm/IR/Operator.h" | |||
40 | #include "llvm/IR/Type.h" | |||
41 | #include "llvm/IR/Value.h" | |||
42 | #include "llvm/IR/ValueHandle.h" | |||
43 | #include "llvm/Support/AtomicOrdering.h" | |||
44 | #include "llvm/Support/CBindingWrapping.h" | |||
45 | #include "llvm/Support/Casting.h" | |||
46 | #include <cassert> | |||
47 | #include <cstdint> | |||
48 | #include <functional> | |||
49 | #include <utility> | |||
50 | ||||
51 | namespace llvm { | |||
52 | ||||
53 | class APInt; | |||
54 | class Use; | |||
55 | ||||
56 | /// This provides the default implementation of the IRBuilder | |||
57 | /// 'InsertHelper' method that is called whenever an instruction is created by | |||
58 | /// IRBuilder and needs to be inserted. | |||
59 | /// | |||
60 | /// By default, this inserts the instruction at the insertion point. | |||
61 | class IRBuilderDefaultInserter { | |||
62 | public: | |||
63 | virtual ~IRBuilderDefaultInserter(); | |||
64 | ||||
65 | virtual void InsertHelper(Instruction *I, const Twine &Name, | |||
66 | BasicBlock *BB, | |||
67 | BasicBlock::iterator InsertPt) const { | |||
68 | if (BB) BB->getInstList().insert(InsertPt, I); | |||
69 | I->setName(Name); | |||
70 | } | |||
71 | }; | |||
72 | ||||
73 | /// Provides an 'InsertHelper' that calls a user-provided callback after | |||
74 | /// performing the default insertion. | |||
75 | class IRBuilderCallbackInserter : public IRBuilderDefaultInserter { | |||
76 | std::function<void(Instruction *)> Callback; | |||
77 | ||||
78 | public: | |||
79 | ~IRBuilderCallbackInserter() override; | |||
80 | ||||
81 | IRBuilderCallbackInserter(std::function<void(Instruction *)> Callback) | |||
82 | : Callback(std::move(Callback)) {} | |||
83 | ||||
84 | void InsertHelper(Instruction *I, const Twine &Name, | |||
85 | BasicBlock *BB, | |||
86 | BasicBlock::iterator InsertPt) const override { | |||
87 | IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt); | |||
88 | Callback(I); | |||
89 | } | |||
90 | }; | |||
91 | ||||
92 | /// Common base class shared among various IRBuilders. | |||
93 | class IRBuilderBase { | |||
94 | /// Pairs of (metadata kind, MDNode *) that should be added to all newly | |||
95 | /// created instructions, like !dbg metadata. | |||
96 | SmallVector<std::pair<unsigned, MDNode *>, 2> MetadataToCopy; | |||
97 | ||||
98 | /// Add or update the an entry (Kind, MD) to MetadataToCopy, if \p MD is not | |||
99 | /// null. If \p MD is null, remove the entry with \p Kind. | |||
100 | void AddOrRemoveMetadataToCopy(unsigned Kind, MDNode *MD) { | |||
101 | if (!MD) { | |||
102 | erase_if(MetadataToCopy, [Kind](const std::pair<unsigned, MDNode *> &KV) { | |||
103 | return KV.first == Kind; | |||
104 | }); | |||
105 | return; | |||
106 | } | |||
107 | ||||
108 | for (auto &KV : MetadataToCopy) | |||
109 | if (KV.first == Kind) { | |||
110 | KV.second = MD; | |||
111 | return; | |||
112 | } | |||
113 | ||||
114 | MetadataToCopy.emplace_back(Kind, MD); | |||
115 | } | |||
116 | ||||
117 | protected: | |||
118 | BasicBlock *BB; | |||
119 | BasicBlock::iterator InsertPt; | |||
120 | LLVMContext &Context; | |||
121 | const IRBuilderFolder &Folder; | |||
122 | const IRBuilderDefaultInserter &Inserter; | |||
123 | ||||
124 | MDNode *DefaultFPMathTag; | |||
125 | FastMathFlags FMF; | |||
126 | ||||
127 | bool IsFPConstrained = false; | |||
128 | fp::ExceptionBehavior DefaultConstrainedExcept = fp::ebStrict; | |||
129 | RoundingMode DefaultConstrainedRounding = RoundingMode::Dynamic; | |||
130 | ||||
131 | ArrayRef<OperandBundleDef> DefaultOperandBundles; | |||
132 | ||||
133 | public: | |||
134 | IRBuilderBase(LLVMContext &context, const IRBuilderFolder &Folder, | |||
135 | const IRBuilderDefaultInserter &Inserter, MDNode *FPMathTag, | |||
136 | ArrayRef<OperandBundleDef> OpBundles) | |||
137 | : Context(context), Folder(Folder), Inserter(Inserter), | |||
138 | DefaultFPMathTag(FPMathTag), DefaultOperandBundles(OpBundles) { | |||
139 | ClearInsertionPoint(); | |||
140 | } | |||
141 | ||||
142 | /// Insert and return the specified instruction. | |||
143 | template<typename InstTy> | |||
144 | InstTy *Insert(InstTy *I, const Twine &Name = "") const { | |||
145 | Inserter.InsertHelper(I, Name, BB, InsertPt); | |||
146 | AddMetadataToInst(I); | |||
147 | return I; | |||
148 | } | |||
149 | ||||
150 | /// No-op overload to handle constants. | |||
151 | Constant *Insert(Constant *C, const Twine& = "") const { | |||
152 | return C; | |||
153 | } | |||
154 | ||||
155 | Value *Insert(Value *V, const Twine &Name = "") const { | |||
156 | if (Instruction *I = dyn_cast<Instruction>(V)) | |||
157 | return Insert(I, Name); | |||
158 | assert(isa<Constant>(V))(static_cast <bool> (isa<Constant>(V)) ? void (0) : __assert_fail ("isa<Constant>(V)", "llvm/include/llvm/IR/IRBuilder.h" , 158, __extension__ __PRETTY_FUNCTION__)); | |||
159 | return V; | |||
160 | } | |||
161 | ||||
162 | //===--------------------------------------------------------------------===// | |||
163 | // Builder configuration methods | |||
164 | //===--------------------------------------------------------------------===// | |||
165 | ||||
166 | /// Clear the insertion point: created instructions will not be | |||
167 | /// inserted into a block. | |||
168 | void ClearInsertionPoint() { | |||
169 | BB = nullptr; | |||
170 | InsertPt = BasicBlock::iterator(); | |||
171 | } | |||
172 | ||||
173 | BasicBlock *GetInsertBlock() const { return BB; } | |||
174 | BasicBlock::iterator GetInsertPoint() const { return InsertPt; } | |||
175 | LLVMContext &getContext() const { return Context; } | |||
176 | ||||
177 | /// This specifies that created instructions should be appended to the | |||
178 | /// end of the specified block. | |||
179 | void SetInsertPoint(BasicBlock *TheBB) { | |||
180 | BB = TheBB; | |||
181 | InsertPt = BB->end(); | |||
182 | } | |||
183 | ||||
184 | /// This specifies that created instructions should be inserted before | |||
185 | /// the specified instruction. | |||
186 | void SetInsertPoint(Instruction *I) { | |||
187 | BB = I->getParent(); | |||
188 | InsertPt = I->getIterator(); | |||
189 | assert(InsertPt != BB->end() && "Can't read debug loc from end()")(static_cast <bool> (InsertPt != BB->end() && "Can't read debug loc from end()") ? void (0) : __assert_fail ("InsertPt != BB->end() && \"Can't read debug loc from end()\"" , "llvm/include/llvm/IR/IRBuilder.h", 189, __extension__ __PRETTY_FUNCTION__ )); | |||
190 | SetCurrentDebugLocation(I->getDebugLoc()); | |||
191 | } | |||
192 | ||||
193 | /// This specifies that created instructions should be inserted at the | |||
194 | /// specified point. | |||
195 | void SetInsertPoint(BasicBlock *TheBB, BasicBlock::iterator IP) { | |||
196 | BB = TheBB; | |||
197 | InsertPt = IP; | |||
198 | if (IP != TheBB->end()) | |||
199 | SetCurrentDebugLocation(IP->getDebugLoc()); | |||
200 | } | |||
201 | ||||
202 | /// This specifies that created instructions should inserted at the beginning | |||
203 | /// end of the specified function, but after already existing static alloca | |||
204 | /// instructions that are at the start. | |||
205 | void SetInsertPointPastAllocas(Function *F) { | |||
206 | BB = &F->getEntryBlock(); | |||
207 | InsertPt = BB->getFirstNonPHIOrDbgOrAlloca(); | |||
208 | } | |||
209 | ||||
210 | /// Set location information used by debugging information. | |||
211 | void SetCurrentDebugLocation(DebugLoc L) { | |||
212 | AddOrRemoveMetadataToCopy(LLVMContext::MD_dbg, L.getAsMDNode()); | |||
213 | } | |||
214 | ||||
215 | /// Collect metadata with IDs \p MetadataKinds from \p Src which should be | |||
216 | /// added to all created instructions. Entries present in MedataDataToCopy but | |||
217 | /// not on \p Src will be dropped from MetadataToCopy. | |||
218 | void CollectMetadataToCopy(Instruction *Src, | |||
219 | ArrayRef<unsigned> MetadataKinds) { | |||
220 | for (unsigned K : MetadataKinds) | |||
221 | AddOrRemoveMetadataToCopy(K, Src->getMetadata(K)); | |||
222 | } | |||
223 | ||||
224 | /// Get location information used by debugging information. | |||
225 | DebugLoc getCurrentDebugLocation() const; | |||
226 | ||||
227 | /// If this builder has a current debug location, set it on the | |||
228 | /// specified instruction. | |||
229 | void SetInstDebugLocation(Instruction *I) const; | |||
230 | ||||
231 | /// Add all entries in MetadataToCopy to \p I. | |||
232 | void AddMetadataToInst(Instruction *I) const { | |||
233 | for (const auto &KV : MetadataToCopy) | |||
234 | I->setMetadata(KV.first, KV.second); | |||
235 | } | |||
236 | ||||
237 | /// Get the return type of the current function that we're emitting | |||
238 | /// into. | |||
239 | Type *getCurrentFunctionReturnType() const; | |||
240 | ||||
241 | /// InsertPoint - A saved insertion point. | |||
242 | class InsertPoint { | |||
243 | BasicBlock *Block = nullptr; | |||
244 | BasicBlock::iterator Point; | |||
245 | ||||
246 | public: | |||
247 | /// Creates a new insertion point which doesn't point to anything. | |||
248 | InsertPoint() = default; | |||
249 | ||||
250 | /// Creates a new insertion point at the given location. | |||
251 | InsertPoint(BasicBlock *InsertBlock, BasicBlock::iterator InsertPoint) | |||
252 | : Block(InsertBlock), Point(InsertPoint) {} | |||
253 | ||||
254 | /// Returns true if this insert point is set. | |||
255 | bool isSet() const { return (Block != nullptr); } | |||
256 | ||||
257 | BasicBlock *getBlock() const { return Block; } | |||
258 | BasicBlock::iterator getPoint() const { return Point; } | |||
259 | }; | |||
260 | ||||
261 | /// Returns the current insert point. | |||
262 | InsertPoint saveIP() const { | |||
263 | return InsertPoint(GetInsertBlock(), GetInsertPoint()); | |||
264 | } | |||
265 | ||||
266 | /// Returns the current insert point, clearing it in the process. | |||
267 | InsertPoint saveAndClearIP() { | |||
268 | InsertPoint IP(GetInsertBlock(), GetInsertPoint()); | |||
269 | ClearInsertionPoint(); | |||
270 | return IP; | |||
271 | } | |||
272 | ||||
273 | /// Sets the current insert point to a previously-saved location. | |||
274 | void restoreIP(InsertPoint IP) { | |||
275 | if (IP.isSet()) | |||
276 | SetInsertPoint(IP.getBlock(), IP.getPoint()); | |||
277 | else | |||
278 | ClearInsertionPoint(); | |||
279 | } | |||
280 | ||||
281 | /// Get the floating point math metadata being used. | |||
282 | MDNode *getDefaultFPMathTag() const { return DefaultFPMathTag; } | |||
283 | ||||
284 | /// Get the flags to be applied to created floating point ops | |||
285 | FastMathFlags getFastMathFlags() const { return FMF; } | |||
286 | ||||
287 | FastMathFlags &getFastMathFlags() { return FMF; } | |||
288 | ||||
289 | /// Clear the fast-math flags. | |||
290 | void clearFastMathFlags() { FMF.clear(); } | |||
291 | ||||
292 | /// Set the floating point math metadata to be used. | |||
293 | void setDefaultFPMathTag(MDNode *FPMathTag) { DefaultFPMathTag = FPMathTag; } | |||
294 | ||||
295 | /// Set the fast-math flags to be used with generated fp-math operators | |||
296 | void setFastMathFlags(FastMathFlags NewFMF) { FMF = NewFMF; } | |||
297 | ||||
298 | /// Enable/Disable use of constrained floating point math. When | |||
299 | /// enabled the CreateF<op>() calls instead create constrained | |||
300 | /// floating point intrinsic calls. Fast math flags are unaffected | |||
301 | /// by this setting. | |||
302 | void setIsFPConstrained(bool IsCon) { IsFPConstrained = IsCon; } | |||
303 | ||||
304 | /// Query for the use of constrained floating point math | |||
305 | bool getIsFPConstrained() { return IsFPConstrained; } | |||
306 | ||||
307 | /// Set the exception handling to be used with constrained floating point | |||
308 | void setDefaultConstrainedExcept(fp::ExceptionBehavior NewExcept) { | |||
309 | #ifndef NDEBUG | |||
310 | Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(NewExcept); | |||
311 | assert(ExceptStr && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr && "Garbage strict exception behavior!" ) ? void (0) : __assert_fail ("ExceptStr && \"Garbage strict exception behavior!\"" , "llvm/include/llvm/IR/IRBuilder.h", 311, __extension__ __PRETTY_FUNCTION__ )); | |||
312 | #endif | |||
313 | DefaultConstrainedExcept = NewExcept; | |||
314 | } | |||
315 | ||||
316 | /// Set the rounding mode handling to be used with constrained floating point | |||
317 | void setDefaultConstrainedRounding(RoundingMode NewRounding) { | |||
318 | #ifndef NDEBUG | |||
319 | Optional<StringRef> RoundingStr = convertRoundingModeToStr(NewRounding); | |||
320 | assert(RoundingStr && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr && "Garbage strict rounding mode!" ) ? void (0) : __assert_fail ("RoundingStr && \"Garbage strict rounding mode!\"" , "llvm/include/llvm/IR/IRBuilder.h", 320, __extension__ __PRETTY_FUNCTION__ )); | |||
321 | #endif | |||
322 | DefaultConstrainedRounding = NewRounding; | |||
323 | } | |||
324 | ||||
325 | /// Get the exception handling used with constrained floating point | |||
326 | fp::ExceptionBehavior getDefaultConstrainedExcept() { | |||
327 | return DefaultConstrainedExcept; | |||
328 | } | |||
329 | ||||
330 | /// Get the rounding mode handling used with constrained floating point | |||
331 | RoundingMode getDefaultConstrainedRounding() { | |||
332 | return DefaultConstrainedRounding; | |||
333 | } | |||
334 | ||||
335 | void setConstrainedFPFunctionAttr() { | |||
336 | assert(BB && "Must have a basic block to set any function attributes!")(static_cast <bool> (BB && "Must have a basic block to set any function attributes!" ) ? void (0) : __assert_fail ("BB && \"Must have a basic block to set any function attributes!\"" , "llvm/include/llvm/IR/IRBuilder.h", 336, __extension__ __PRETTY_FUNCTION__ )); | |||
337 | ||||
338 | Function *F = BB->getParent(); | |||
339 | if (!F->hasFnAttribute(Attribute::StrictFP)) { | |||
340 | F->addFnAttr(Attribute::StrictFP); | |||
341 | } | |||
342 | } | |||
343 | ||||
344 | void setConstrainedFPCallAttr(CallBase *I) { | |||
345 | I->addFnAttr(Attribute::StrictFP); | |||
346 | } | |||
347 | ||||
348 | void setDefaultOperandBundles(ArrayRef<OperandBundleDef> OpBundles) { | |||
349 | DefaultOperandBundles = OpBundles; | |||
350 | } | |||
351 | ||||
352 | //===--------------------------------------------------------------------===// | |||
353 | // RAII helpers. | |||
354 | //===--------------------------------------------------------------------===// | |||
355 | ||||
356 | // RAII object that stores the current insertion point and restores it | |||
357 | // when the object is destroyed. This includes the debug location. | |||
358 | class InsertPointGuard { | |||
359 | IRBuilderBase &Builder; | |||
360 | AssertingVH<BasicBlock> Block; | |||
361 | BasicBlock::iterator Point; | |||
362 | DebugLoc DbgLoc; | |||
363 | ||||
364 | public: | |||
365 | InsertPointGuard(IRBuilderBase &B) | |||
366 | : Builder(B), Block(B.GetInsertBlock()), Point(B.GetInsertPoint()), | |||
367 | DbgLoc(B.getCurrentDebugLocation()) {} | |||
368 | ||||
369 | InsertPointGuard(const InsertPointGuard &) = delete; | |||
370 | InsertPointGuard &operator=(const InsertPointGuard &) = delete; | |||
371 | ||||
372 | ~InsertPointGuard() { | |||
373 | Builder.restoreIP(InsertPoint(Block, Point)); | |||
374 | Builder.SetCurrentDebugLocation(DbgLoc); | |||
375 | } | |||
376 | }; | |||
377 | ||||
378 | // RAII object that stores the current fast math settings and restores | |||
379 | // them when the object is destroyed. | |||
380 | class FastMathFlagGuard { | |||
381 | IRBuilderBase &Builder; | |||
382 | FastMathFlags FMF; | |||
383 | MDNode *FPMathTag; | |||
384 | bool IsFPConstrained; | |||
385 | fp::ExceptionBehavior DefaultConstrainedExcept; | |||
386 | RoundingMode DefaultConstrainedRounding; | |||
387 | ||||
388 | public: | |||
389 | FastMathFlagGuard(IRBuilderBase &B) | |||
390 | : Builder(B), FMF(B.FMF), FPMathTag(B.DefaultFPMathTag), | |||
391 | IsFPConstrained(B.IsFPConstrained), | |||
392 | DefaultConstrainedExcept(B.DefaultConstrainedExcept), | |||
393 | DefaultConstrainedRounding(B.DefaultConstrainedRounding) {} | |||
394 | ||||
395 | FastMathFlagGuard(const FastMathFlagGuard &) = delete; | |||
396 | FastMathFlagGuard &operator=(const FastMathFlagGuard &) = delete; | |||
397 | ||||
398 | ~FastMathFlagGuard() { | |||
399 | Builder.FMF = FMF; | |||
400 | Builder.DefaultFPMathTag = FPMathTag; | |||
401 | Builder.IsFPConstrained = IsFPConstrained; | |||
402 | Builder.DefaultConstrainedExcept = DefaultConstrainedExcept; | |||
403 | Builder.DefaultConstrainedRounding = DefaultConstrainedRounding; | |||
404 | } | |||
405 | }; | |||
406 | ||||
407 | // RAII object that stores the current default operand bundles and restores | |||
408 | // them when the object is destroyed. | |||
409 | class OperandBundlesGuard { | |||
410 | IRBuilderBase &Builder; | |||
411 | ArrayRef<OperandBundleDef> DefaultOperandBundles; | |||
412 | ||||
413 | public: | |||
414 | OperandBundlesGuard(IRBuilderBase &B) | |||
415 | : Builder(B), DefaultOperandBundles(B.DefaultOperandBundles) {} | |||
416 | ||||
417 | OperandBundlesGuard(const OperandBundlesGuard &) = delete; | |||
418 | OperandBundlesGuard &operator=(const OperandBundlesGuard &) = delete; | |||
419 | ||||
420 | ~OperandBundlesGuard() { | |||
421 | Builder.DefaultOperandBundles = DefaultOperandBundles; | |||
422 | } | |||
423 | }; | |||
424 | ||||
425 | ||||
426 | //===--------------------------------------------------------------------===// | |||
427 | // Miscellaneous creation methods. | |||
428 | //===--------------------------------------------------------------------===// | |||
429 | ||||
430 | /// Make a new global variable with initializer type i8* | |||
431 | /// | |||
432 | /// Make a new global variable with an initializer that has array of i8 type | |||
433 | /// filled in with the null terminated string value specified. The new global | |||
434 | /// variable will be marked mergable with any others of the same contents. If | |||
435 | /// Name is specified, it is the name of the global variable created. | |||
436 | /// | |||
437 | /// If no module is given via \p M, it is take from the insertion point basic | |||
438 | /// block. | |||
439 | GlobalVariable *CreateGlobalString(StringRef Str, const Twine &Name = "", | |||
440 | unsigned AddressSpace = 0, | |||
441 | Module *M = nullptr); | |||
442 | ||||
443 | /// Get a constant value representing either true or false. | |||
444 | ConstantInt *getInt1(bool V) { | |||
445 | return ConstantInt::get(getInt1Ty(), V); | |||
446 | } | |||
447 | ||||
448 | /// Get the constant value for i1 true. | |||
449 | ConstantInt *getTrue() { | |||
450 | return ConstantInt::getTrue(Context); | |||
451 | } | |||
452 | ||||
453 | /// Get the constant value for i1 false. | |||
454 | ConstantInt *getFalse() { | |||
455 | return ConstantInt::getFalse(Context); | |||
456 | } | |||
457 | ||||
458 | /// Get a constant 8-bit value. | |||
459 | ConstantInt *getInt8(uint8_t C) { | |||
460 | return ConstantInt::get(getInt8Ty(), C); | |||
461 | } | |||
462 | ||||
463 | /// Get a constant 16-bit value. | |||
464 | ConstantInt *getInt16(uint16_t C) { | |||
465 | return ConstantInt::get(getInt16Ty(), C); | |||
466 | } | |||
467 | ||||
468 | /// Get a constant 32-bit value. | |||
469 | ConstantInt *getInt32(uint32_t C) { | |||
470 | return ConstantInt::get(getInt32Ty(), C); | |||
471 | } | |||
472 | ||||
473 | /// Get a constant 64-bit value. | |||
474 | ConstantInt *getInt64(uint64_t C) { | |||
475 | return ConstantInt::get(getInt64Ty(), C); | |||
476 | } | |||
477 | ||||
478 | /// Get a constant N-bit value, zero extended or truncated from | |||
479 | /// a 64-bit value. | |||
480 | ConstantInt *getIntN(unsigned N, uint64_t C) { | |||
481 | return ConstantInt::get(getIntNTy(N), C); | |||
482 | } | |||
483 | ||||
484 | /// Get a constant integer value. | |||
485 | ConstantInt *getInt(const APInt &AI) { | |||
486 | return ConstantInt::get(Context, AI); | |||
487 | } | |||
488 | ||||
489 | //===--------------------------------------------------------------------===// | |||
490 | // Type creation methods | |||
491 | //===--------------------------------------------------------------------===// | |||
492 | ||||
493 | /// Fetch the type representing a single bit | |||
494 | IntegerType *getInt1Ty() { | |||
495 | return Type::getInt1Ty(Context); | |||
496 | } | |||
497 | ||||
498 | /// Fetch the type representing an 8-bit integer. | |||
499 | IntegerType *getInt8Ty() { | |||
500 | return Type::getInt8Ty(Context); | |||
501 | } | |||
502 | ||||
503 | /// Fetch the type representing a 16-bit integer. | |||
504 | IntegerType *getInt16Ty() { | |||
505 | return Type::getInt16Ty(Context); | |||
506 | } | |||
507 | ||||
508 | /// Fetch the type representing a 32-bit integer. | |||
509 | IntegerType *getInt32Ty() { | |||
510 | return Type::getInt32Ty(Context); | |||
511 | } | |||
512 | ||||
513 | /// Fetch the type representing a 64-bit integer. | |||
514 | IntegerType *getInt64Ty() { | |||
515 | return Type::getInt64Ty(Context); | |||
516 | } | |||
517 | ||||
518 | /// Fetch the type representing a 128-bit integer. | |||
519 | IntegerType *getInt128Ty() { return Type::getInt128Ty(Context); } | |||
520 | ||||
521 | /// Fetch the type representing an N-bit integer. | |||
522 | IntegerType *getIntNTy(unsigned N) { | |||
523 | return Type::getIntNTy(Context, N); | |||
524 | } | |||
525 | ||||
526 | /// Fetch the type representing a 16-bit floating point value. | |||
527 | Type *getHalfTy() { | |||
528 | return Type::getHalfTy(Context); | |||
529 | } | |||
530 | ||||
531 | /// Fetch the type representing a 16-bit brain floating point value. | |||
532 | Type *getBFloatTy() { | |||
533 | return Type::getBFloatTy(Context); | |||
534 | } | |||
535 | ||||
536 | /// Fetch the type representing a 32-bit floating point value. | |||
537 | Type *getFloatTy() { | |||
538 | return Type::getFloatTy(Context); | |||
539 | } | |||
540 | ||||
541 | /// Fetch the type representing a 64-bit floating point value. | |||
542 | Type *getDoubleTy() { | |||
543 | return Type::getDoubleTy(Context); | |||
544 | } | |||
545 | ||||
546 | /// Fetch the type representing void. | |||
547 | Type *getVoidTy() { | |||
548 | return Type::getVoidTy(Context); | |||
549 | } | |||
550 | ||||
551 | /// Fetch the type representing a pointer. | |||
552 | PointerType *getPtrTy(unsigned AddrSpace = 0) { | |||
553 | return PointerType::get(Context, AddrSpace); | |||
554 | } | |||
555 | ||||
556 | /// Fetch the type representing a pointer to an 8-bit integer value. | |||
557 | PointerType *getInt8PtrTy(unsigned AddrSpace = 0) { | |||
558 | return Type::getInt8PtrTy(Context, AddrSpace); | |||
559 | } | |||
560 | ||||
561 | /// Fetch the type of an integer with size at least as big as that of a | |||
562 | /// pointer in the given address space. | |||
563 | IntegerType *getIntPtrTy(const DataLayout &DL, unsigned AddrSpace = 0) { | |||
564 | return DL.getIntPtrType(Context, AddrSpace); | |||
565 | } | |||
566 | ||||
567 | //===--------------------------------------------------------------------===// | |||
568 | // Intrinsic creation methods | |||
569 | //===--------------------------------------------------------------------===// | |||
570 | ||||
571 | /// Create and insert a memset to the specified pointer and the | |||
572 | /// specified value. | |||
573 | /// | |||
574 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is | |||
575 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
576 | /// and noalias tags. | |||
577 | CallInst *CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, | |||
578 | MaybeAlign Align, bool isVolatile = false, | |||
579 | MDNode *TBAATag = nullptr, MDNode *ScopeTag = nullptr, | |||
580 | MDNode *NoAliasTag = nullptr) { | |||
581 | return CreateMemSet(Ptr, Val, getInt64(Size), Align, isVolatile, | |||
582 | TBAATag, ScopeTag, NoAliasTag); | |||
583 | } | |||
584 | ||||
585 | CallInst *CreateMemSet(Value *Ptr, Value *Val, Value *Size, MaybeAlign Align, | |||
586 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
587 | MDNode *ScopeTag = nullptr, | |||
588 | MDNode *NoAliasTag = nullptr); | |||
589 | ||||
590 | CallInst *CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, Value *Val, | |||
591 | Value *Size, bool IsVolatile = false, | |||
592 | MDNode *TBAATag = nullptr, | |||
593 | MDNode *ScopeTag = nullptr, | |||
594 | MDNode *NoAliasTag = nullptr); | |||
595 | ||||
596 | /// Create and insert an element unordered-atomic memset of the region of | |||
597 | /// memory starting at the given pointer to the given value. | |||
598 | /// | |||
599 | /// If the pointer isn't an i8*, it will be converted. If a TBAA tag is | |||
600 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
601 | /// and noalias tags. | |||
602 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, | |||
603 | uint64_t Size, Align Alignment, | |||
604 | uint32_t ElementSize, | |||
605 | MDNode *TBAATag = nullptr, | |||
606 | MDNode *ScopeTag = nullptr, | |||
607 | MDNode *NoAliasTag = nullptr) { | |||
608 | return CreateElementUnorderedAtomicMemSet(Ptr, Val, getInt64(Size), | |||
609 | Align(Alignment), ElementSize, | |||
610 | TBAATag, ScopeTag, NoAliasTag); | |||
611 | } | |||
612 | ||||
613 | CallInst *CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, | |||
614 | Value *Size, Align Alignment, | |||
615 | uint32_t ElementSize, | |||
616 | MDNode *TBAATag = nullptr, | |||
617 | MDNode *ScopeTag = nullptr, | |||
618 | MDNode *NoAliasTag = nullptr); | |||
619 | ||||
620 | /// Create and insert a memcpy between the specified pointers. | |||
621 | /// | |||
622 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is | |||
623 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
624 | /// and noalias tags. | |||
625 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
626 | MaybeAlign SrcAlign, uint64_t Size, | |||
627 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
628 | MDNode *TBAAStructTag = nullptr, | |||
629 | MDNode *ScopeTag = nullptr, | |||
630 | MDNode *NoAliasTag = nullptr) { | |||
631 | return CreateMemCpy(Dst, DstAlign, Src, SrcAlign, getInt64(Size), | |||
632 | isVolatile, TBAATag, TBAAStructTag, ScopeTag, | |||
633 | NoAliasTag); | |||
634 | } | |||
635 | ||||
636 | CallInst *CreateMemTransferInst( | |||
637 | Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
638 | MaybeAlign SrcAlign, Value *Size, bool isVolatile = false, | |||
639 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, | |||
640 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); | |||
641 | ||||
642 | CallInst *CreateMemCpy(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
643 | MaybeAlign SrcAlign, Value *Size, | |||
644 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
645 | MDNode *TBAAStructTag = nullptr, | |||
646 | MDNode *ScopeTag = nullptr, | |||
647 | MDNode *NoAliasTag = nullptr) { | |||
648 | return CreateMemTransferInst(Intrinsic::memcpy, Dst, DstAlign, Src, | |||
649 | SrcAlign, Size, isVolatile, TBAATag, | |||
650 | TBAAStructTag, ScopeTag, NoAliasTag); | |||
651 | } | |||
652 | ||||
653 | CallInst * | |||
654 | CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
655 | MaybeAlign SrcAlign, Value *Size, bool IsVolatile = false, | |||
656 | MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr, | |||
657 | MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr); | |||
658 | ||||
659 | /// Create and insert an element unordered-atomic memcpy between the | |||
660 | /// specified pointers. | |||
661 | /// | |||
662 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, respectively. | |||
663 | /// | |||
664 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is | |||
665 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
666 | /// and noalias tags. | |||
667 | CallInst *CreateElementUnorderedAtomicMemCpy( | |||
668 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, | |||
669 | uint32_t ElementSize, MDNode *TBAATag = nullptr, | |||
670 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, | |||
671 | MDNode *NoAliasTag = nullptr); | |||
672 | ||||
673 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
674 | MaybeAlign SrcAlign, uint64_t Size, | |||
675 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
676 | MDNode *ScopeTag = nullptr, | |||
677 | MDNode *NoAliasTag = nullptr) { | |||
678 | return CreateMemMove(Dst, DstAlign, Src, SrcAlign, getInt64(Size), | |||
679 | isVolatile, TBAATag, ScopeTag, NoAliasTag); | |||
680 | } | |||
681 | ||||
682 | CallInst *CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, | |||
683 | MaybeAlign SrcAlign, Value *Size, | |||
684 | bool isVolatile = false, MDNode *TBAATag = nullptr, | |||
685 | MDNode *ScopeTag = nullptr, | |||
686 | MDNode *NoAliasTag = nullptr); | |||
687 | ||||
688 | /// \brief Create and insert an element unordered-atomic memmove between the | |||
689 | /// specified pointers. | |||
690 | /// | |||
691 | /// DstAlign/SrcAlign are the alignments of the Dst/Src pointers, | |||
692 | /// respectively. | |||
693 | /// | |||
694 | /// If the pointers aren't i8*, they will be converted. If a TBAA tag is | |||
695 | /// specified, it will be added to the instruction. Likewise with alias.scope | |||
696 | /// and noalias tags. | |||
697 | CallInst *CreateElementUnorderedAtomicMemMove( | |||
698 | Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, | |||
699 | uint32_t ElementSize, MDNode *TBAATag = nullptr, | |||
700 | MDNode *TBAAStructTag = nullptr, MDNode *ScopeTag = nullptr, | |||
701 | MDNode *NoAliasTag = nullptr); | |||
702 | ||||
703 | private: | |||
704 | CallInst *getReductionIntrinsic(Intrinsic::ID ID, Value *Src); | |||
705 | ||||
706 | public: | |||
707 | /// Create a sequential vector fadd reduction intrinsic of the source vector. | |||
708 | /// The first parameter is a scalar accumulator value. An unordered reduction | |||
709 | /// can be created by adding the reassoc fast-math flag to the resulting | |||
710 | /// sequential reduction. | |||
711 | CallInst *CreateFAddReduce(Value *Acc, Value *Src); | |||
712 | ||||
713 | /// Create a sequential vector fmul reduction intrinsic of the source vector. | |||
714 | /// The first parameter is a scalar accumulator value. An unordered reduction | |||
715 | /// can be created by adding the reassoc fast-math flag to the resulting | |||
716 | /// sequential reduction. | |||
717 | CallInst *CreateFMulReduce(Value *Acc, Value *Src); | |||
718 | ||||
719 | /// Create a vector int add reduction intrinsic of the source vector. | |||
720 | CallInst *CreateAddReduce(Value *Src); | |||
721 | ||||
722 | /// Create a vector int mul reduction intrinsic of the source vector. | |||
723 | CallInst *CreateMulReduce(Value *Src); | |||
724 | ||||
725 | /// Create a vector int AND reduction intrinsic of the source vector. | |||
726 | CallInst *CreateAndReduce(Value *Src); | |||
727 | ||||
728 | /// Create a vector int OR reduction intrinsic of the source vector. | |||
729 | CallInst *CreateOrReduce(Value *Src); | |||
730 | ||||
731 | /// Create a vector int XOR reduction intrinsic of the source vector. | |||
732 | CallInst *CreateXorReduce(Value *Src); | |||
733 | ||||
734 | /// Create a vector integer max reduction intrinsic of the source | |||
735 | /// vector. | |||
736 | CallInst *CreateIntMaxReduce(Value *Src, bool IsSigned = false); | |||
737 | ||||
738 | /// Create a vector integer min reduction intrinsic of the source | |||
739 | /// vector. | |||
740 | CallInst *CreateIntMinReduce(Value *Src, bool IsSigned = false); | |||
741 | ||||
742 | /// Create a vector float max reduction intrinsic of the source | |||
743 | /// vector. | |||
744 | CallInst *CreateFPMaxReduce(Value *Src); | |||
745 | ||||
746 | /// Create a vector float min reduction intrinsic of the source | |||
747 | /// vector. | |||
748 | CallInst *CreateFPMinReduce(Value *Src); | |||
749 | ||||
750 | /// Create a lifetime.start intrinsic. | |||
751 | /// | |||
752 | /// If the pointer isn't i8* it will be converted. | |||
753 | CallInst *CreateLifetimeStart(Value *Ptr, ConstantInt *Size = nullptr); | |||
754 | ||||
755 | /// Create a lifetime.end intrinsic. | |||
756 | /// | |||
757 | /// If the pointer isn't i8* it will be converted. | |||
758 | CallInst *CreateLifetimeEnd(Value *Ptr, ConstantInt *Size = nullptr); | |||
759 | ||||
760 | /// Create a call to invariant.start intrinsic. | |||
761 | /// | |||
762 | /// If the pointer isn't i8* it will be converted. | |||
763 | CallInst *CreateInvariantStart(Value *Ptr, ConstantInt *Size = nullptr); | |||
764 | ||||
765 | /// Create a call to llvm.threadlocal.address intrinsic. | |||
766 | CallInst *CreateThreadLocalAddress(Value *Ptr); | |||
767 | ||||
768 | /// Create a call to Masked Load intrinsic | |||
769 | CallInst *CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, | |||
770 | Value *PassThru = nullptr, const Twine &Name = ""); | |||
771 | ||||
772 | /// Create a call to Masked Store intrinsic | |||
773 | CallInst *CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, | |||
774 | Value *Mask); | |||
775 | ||||
776 | /// Create a call to Masked Gather intrinsic | |||
777 | CallInst *CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, | |||
778 | Value *Mask = nullptr, Value *PassThru = nullptr, | |||
779 | const Twine &Name = ""); | |||
780 | ||||
781 | /// Create a call to Masked Scatter intrinsic | |||
782 | CallInst *CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, | |||
783 | Value *Mask = nullptr); | |||
784 | ||||
785 | /// Create a call to Masked Expand Load intrinsic | |||
786 | CallInst *CreateMaskedExpandLoad(Type *Ty, Value *Ptr, Value *Mask = nullptr, | |||
787 | Value *PassThru = nullptr, | |||
788 | const Twine &Name = ""); | |||
789 | ||||
790 | /// Create a call to Masked Compress Store intrinsic | |||
791 | CallInst *CreateMaskedCompressStore(Value *Val, Value *Ptr, | |||
792 | Value *Mask = nullptr); | |||
793 | ||||
794 | /// Create an assume intrinsic call that allows the optimizer to | |||
795 | /// assume that the provided condition will be true. | |||
796 | /// | |||
797 | /// The optional argument \p OpBundles specifies operand bundles that are | |||
798 | /// added to the call instruction. | |||
799 | CallInst *CreateAssumption(Value *Cond, | |||
800 | ArrayRef<OperandBundleDef> OpBundles = llvm::None); | |||
801 | ||||
802 | /// Create a llvm.experimental.noalias.scope.decl intrinsic call. | |||
803 | Instruction *CreateNoAliasScopeDeclaration(Value *Scope); | |||
804 | Instruction *CreateNoAliasScopeDeclaration(MDNode *ScopeTag) { | |||
805 | return CreateNoAliasScopeDeclaration( | |||
806 | MetadataAsValue::get(Context, ScopeTag)); | |||
807 | } | |||
808 | ||||
809 | /// Create a call to the experimental.gc.statepoint intrinsic to | |||
810 | /// start a new statepoint sequence. | |||
811 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, | |||
812 | FunctionCallee ActualCallee, | |||
813 | ArrayRef<Value *> CallArgs, | |||
814 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
815 | ArrayRef<Value *> GCArgs, | |||
816 | const Twine &Name = ""); | |||
817 | ||||
818 | /// Create a call to the experimental.gc.statepoint intrinsic to | |||
819 | /// start a new statepoint sequence. | |||
820 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, | |||
821 | FunctionCallee ActualCallee, uint32_t Flags, | |||
822 | ArrayRef<Value *> CallArgs, | |||
823 | Optional<ArrayRef<Use>> TransitionArgs, | |||
824 | Optional<ArrayRef<Use>> DeoptArgs, | |||
825 | ArrayRef<Value *> GCArgs, | |||
826 | const Twine &Name = ""); | |||
827 | ||||
828 | /// Conveninence function for the common case when CallArgs are filled | |||
829 | /// in using makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be | |||
830 | /// .get()'ed to get the Value pointer. | |||
831 | CallInst *CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, | |||
832 | FunctionCallee ActualCallee, | |||
833 | ArrayRef<Use> CallArgs, | |||
834 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
835 | ArrayRef<Value *> GCArgs, | |||
836 | const Twine &Name = ""); | |||
837 | ||||
838 | /// Create an invoke to the experimental.gc.statepoint intrinsic to | |||
839 | /// start a new statepoint sequence. | |||
840 | InvokeInst * | |||
841 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, | |||
842 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, | |||
843 | BasicBlock *UnwindDest, ArrayRef<Value *> InvokeArgs, | |||
844 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
845 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); | |||
846 | ||||
847 | /// Create an invoke to the experimental.gc.statepoint intrinsic to | |||
848 | /// start a new statepoint sequence. | |||
849 | InvokeInst *CreateGCStatepointInvoke( | |||
850 | uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, | |||
851 | BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, | |||
852 | ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs, | |||
853 | Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, | |||
854 | const Twine &Name = ""); | |||
855 | ||||
856 | // Convenience function for the common case when CallArgs are filled in using | |||
857 | // makeArrayRef(CS.arg_begin(), CS.arg_end()); Use needs to be .get()'ed to | |||
858 | // get the Value *. | |||
859 | InvokeInst * | |||
860 | CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, | |||
861 | FunctionCallee ActualInvokee, BasicBlock *NormalDest, | |||
862 | BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs, | |||
863 | Optional<ArrayRef<Value *>> DeoptArgs, | |||
864 | ArrayRef<Value *> GCArgs, const Twine &Name = ""); | |||
865 | ||||
866 | /// Create a call to the experimental.gc.result intrinsic to extract | |||
867 | /// the result from a call wrapped in a statepoint. | |||
868 | CallInst *CreateGCResult(Instruction *Statepoint, | |||
869 | Type *ResultType, | |||
870 | const Twine &Name = ""); | |||
871 | ||||
872 | /// Create a call to the experimental.gc.relocate intrinsics to | |||
873 | /// project the relocated value of one pointer from the statepoint. | |||
874 | CallInst *CreateGCRelocate(Instruction *Statepoint, | |||
875 | int BaseOffset, | |||
876 | int DerivedOffset, | |||
877 | Type *ResultType, | |||
878 | const Twine &Name = ""); | |||
879 | ||||
880 | /// Create a call to the experimental.gc.pointer.base intrinsic to get the | |||
881 | /// base pointer for the specified derived pointer. | |||
882 | CallInst *CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name = ""); | |||
883 | ||||
884 | /// Create a call to the experimental.gc.get.pointer.offset intrinsic to get | |||
885 | /// the offset of the specified derived pointer from its base. | |||
886 | CallInst *CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name = ""); | |||
887 | ||||
888 | /// Create a call to llvm.vscale, multiplied by \p Scaling. The type of VScale | |||
889 | /// will be the same type as that of \p Scaling. | |||
890 | Value *CreateVScale(Constant *Scaling, const Twine &Name = ""); | |||
891 | ||||
892 | /// Creates a vector of type \p DstType with the linear sequence <0, 1, ...> | |||
893 | Value *CreateStepVector(Type *DstType, const Twine &Name = ""); | |||
894 | ||||
895 | /// Create a call to intrinsic \p ID with 1 operand which is mangled on its | |||
896 | /// type. | |||
897 | CallInst *CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, | |||
898 | Instruction *FMFSource = nullptr, | |||
899 | const Twine &Name = ""); | |||
900 | ||||
901 | /// Create a call to intrinsic \p ID with 2 operands which is mangled on the | |||
902 | /// first type. | |||
903 | CallInst *CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, | |||
904 | Instruction *FMFSource = nullptr, | |||
905 | const Twine &Name = ""); | |||
906 | ||||
907 | /// Create a call to intrinsic \p ID with \p Args, mangled using \p Types. If | |||
908 | /// \p FMFSource is provided, copy fast-math-flags from that instruction to | |||
909 | /// the intrinsic. | |||
910 | CallInst *CreateIntrinsic(Intrinsic::ID ID, ArrayRef<Type *> Types, | |||
911 | ArrayRef<Value *> Args, | |||
912 | Instruction *FMFSource = nullptr, | |||
913 | const Twine &Name = ""); | |||
914 | ||||
915 | /// Create a call to intrinsic \p ID with \p RetTy and \p Args. If | |||
916 | /// \p FMFSource is provided, copy fast-math-flags from that instruction to | |||
917 | /// the intrinsic. | |||
918 | CallInst *CreateIntrinsic(Type *RetTy, Intrinsic::ID ID, | |||
919 | ArrayRef<Value *> Args, | |||
920 | Instruction *FMFSource = nullptr, | |||
921 | const Twine &Name = ""); | |||
922 | ||||
923 | /// Create call to the minnum intrinsic. | |||
924 | CallInst *CreateMinNum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
925 | return CreateBinaryIntrinsic(Intrinsic::minnum, LHS, RHS, nullptr, Name); | |||
926 | } | |||
927 | ||||
928 | /// Create call to the maxnum intrinsic. | |||
929 | CallInst *CreateMaxNum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
930 | return CreateBinaryIntrinsic(Intrinsic::maxnum, LHS, RHS, nullptr, Name); | |||
931 | } | |||
932 | ||||
933 | /// Create call to the minimum intrinsic. | |||
934 | CallInst *CreateMinimum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
935 | return CreateBinaryIntrinsic(Intrinsic::minimum, LHS, RHS, nullptr, Name); | |||
936 | } | |||
937 | ||||
938 | /// Create call to the maximum intrinsic. | |||
939 | CallInst *CreateMaximum(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
940 | return CreateBinaryIntrinsic(Intrinsic::maximum, LHS, RHS, nullptr, Name); | |||
941 | } | |||
942 | ||||
943 | /// Create a call to the arithmetic_fence intrinsic. | |||
944 | CallInst *CreateArithmeticFence(Value *Val, Type *DstType, | |||
945 | const Twine &Name = "") { | |||
946 | return CreateIntrinsic(Intrinsic::arithmetic_fence, DstType, Val, nullptr, | |||
947 | Name); | |||
948 | } | |||
949 | ||||
950 | /// Create a call to the vector.extract intrinsic. | |||
951 | CallInst *CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, | |||
952 | const Twine &Name = "") { | |||
953 | return CreateIntrinsic(Intrinsic::vector_extract, | |||
954 | {DstType, SrcVec->getType()}, {SrcVec, Idx}, nullptr, | |||
955 | Name); | |||
956 | } | |||
957 | ||||
958 | /// Create a call to the vector.insert intrinsic. | |||
959 | CallInst *CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, | |||
960 | Value *Idx, const Twine &Name = "") { | |||
961 | return CreateIntrinsic(Intrinsic::vector_insert, | |||
962 | {DstType, SubVec->getType()}, {SrcVec, SubVec, Idx}, | |||
963 | nullptr, Name); | |||
964 | } | |||
965 | ||||
966 | private: | |||
967 | /// Create a call to a masked intrinsic with given Id. | |||
968 | CallInst *CreateMaskedIntrinsic(Intrinsic::ID Id, ArrayRef<Value *> Ops, | |||
969 | ArrayRef<Type *> OverloadedTypes, | |||
970 | const Twine &Name = ""); | |||
971 | ||||
972 | Value *getCastedInt8PtrValue(Value *Ptr); | |||
973 | ||||
974 | //===--------------------------------------------------------------------===// | |||
975 | // Instruction creation methods: Terminators | |||
976 | //===--------------------------------------------------------------------===// | |||
977 | ||||
978 | private: | |||
979 | /// Helper to add branch weight and unpredictable metadata onto an | |||
980 | /// instruction. | |||
981 | /// \returns The annotated instruction. | |||
982 | template <typename InstTy> | |||
983 | InstTy *addBranchMetadata(InstTy *I, MDNode *Weights, MDNode *Unpredictable) { | |||
984 | if (Weights) | |||
985 | I->setMetadata(LLVMContext::MD_prof, Weights); | |||
986 | if (Unpredictable) | |||
987 | I->setMetadata(LLVMContext::MD_unpredictable, Unpredictable); | |||
988 | return I; | |||
989 | } | |||
990 | ||||
991 | public: | |||
992 | /// Create a 'ret void' instruction. | |||
993 | ReturnInst *CreateRetVoid() { | |||
994 | return Insert(ReturnInst::Create(Context)); | |||
995 | } | |||
996 | ||||
997 | /// Create a 'ret <val>' instruction. | |||
998 | ReturnInst *CreateRet(Value *V) { | |||
999 | return Insert(ReturnInst::Create(Context, V)); | |||
1000 | } | |||
1001 | ||||
1002 | /// Create a sequence of N insertvalue instructions, | |||
1003 | /// with one Value from the retVals array each, that build a aggregate | |||
1004 | /// return value one value at a time, and a ret instruction to return | |||
1005 | /// the resulting aggregate value. | |||
1006 | /// | |||
1007 | /// This is a convenience function for code that uses aggregate return values | |||
1008 | /// as a vehicle for having multiple return values. | |||
1009 | ReturnInst *CreateAggregateRet(Value *const *retVals, unsigned N) { | |||
1010 | Value *V = PoisonValue::get(getCurrentFunctionReturnType()); | |||
1011 | for (unsigned i = 0; i != N; ++i) | |||
1012 | V = CreateInsertValue(V, retVals[i], i, "mrv"); | |||
1013 | return Insert(ReturnInst::Create(Context, V)); | |||
1014 | } | |||
1015 | ||||
1016 | /// Create an unconditional 'br label X' instruction. | |||
1017 | BranchInst *CreateBr(BasicBlock *Dest) { | |||
1018 | return Insert(BranchInst::Create(Dest)); | |||
1019 | } | |||
1020 | ||||
1021 | /// Create a conditional 'br Cond, TrueDest, FalseDest' | |||
1022 | /// instruction. | |||
1023 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, | |||
1024 | MDNode *BranchWeights = nullptr, | |||
1025 | MDNode *Unpredictable = nullptr) { | |||
1026 | return Insert(addBranchMetadata(BranchInst::Create(True, False, Cond), | |||
1027 | BranchWeights, Unpredictable)); | |||
1028 | } | |||
1029 | ||||
1030 | /// Create a conditional 'br Cond, TrueDest, FalseDest' | |||
1031 | /// instruction. Copy branch meta data if available. | |||
1032 | BranchInst *CreateCondBr(Value *Cond, BasicBlock *True, BasicBlock *False, | |||
1033 | Instruction *MDSrc) { | |||
1034 | BranchInst *Br = BranchInst::Create(True, False, Cond); | |||
1035 | if (MDSrc) { | |||
1036 | unsigned WL[4] = {LLVMContext::MD_prof, LLVMContext::MD_unpredictable, | |||
1037 | LLVMContext::MD_make_implicit, LLVMContext::MD_dbg}; | |||
1038 | Br->copyMetadata(*MDSrc, makeArrayRef(&WL[0], 4)); | |||
1039 | } | |||
1040 | return Insert(Br); | |||
1041 | } | |||
1042 | ||||
1043 | /// Create a switch instruction with the specified value, default dest, | |||
1044 | /// and with a hint for the number of cases that will be added (for efficient | |||
1045 | /// allocation). | |||
1046 | SwitchInst *CreateSwitch(Value *V, BasicBlock *Dest, unsigned NumCases = 10, | |||
1047 | MDNode *BranchWeights = nullptr, | |||
1048 | MDNode *Unpredictable = nullptr) { | |||
1049 | return Insert(addBranchMetadata(SwitchInst::Create(V, Dest, NumCases), | |||
1050 | BranchWeights, Unpredictable)); | |||
1051 | } | |||
1052 | ||||
1053 | /// Create an indirect branch instruction with the specified address | |||
1054 | /// operand, with an optional hint for the number of destinations that will be | |||
1055 | /// added (for efficient allocation). | |||
1056 | IndirectBrInst *CreateIndirectBr(Value *Addr, unsigned NumDests = 10) { | |||
1057 | return Insert(IndirectBrInst::Create(Addr, NumDests)); | |||
1058 | } | |||
1059 | ||||
1060 | /// Create an invoke instruction. | |||
1061 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, | |||
1062 | BasicBlock *NormalDest, BasicBlock *UnwindDest, | |||
1063 | ArrayRef<Value *> Args, | |||
1064 | ArrayRef<OperandBundleDef> OpBundles, | |||
1065 | const Twine &Name = "") { | |||
1066 | InvokeInst *II = | |||
1067 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args, OpBundles); | |||
1068 | if (IsFPConstrained) | |||
1069 | setConstrainedFPCallAttr(II); | |||
1070 | return Insert(II, Name); | |||
1071 | } | |||
1072 | InvokeInst *CreateInvoke(FunctionType *Ty, Value *Callee, | |||
1073 | BasicBlock *NormalDest, BasicBlock *UnwindDest, | |||
1074 | ArrayRef<Value *> Args = None, | |||
1075 | const Twine &Name = "") { | |||
1076 | InvokeInst *II = | |||
1077 | InvokeInst::Create(Ty, Callee, NormalDest, UnwindDest, Args); | |||
1078 | if (IsFPConstrained) | |||
1079 | setConstrainedFPCallAttr(II); | |||
1080 | return Insert(II, Name); | |||
1081 | } | |||
1082 | ||||
1083 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, | |||
1084 | BasicBlock *UnwindDest, ArrayRef<Value *> Args, | |||
1085 | ArrayRef<OperandBundleDef> OpBundles, | |||
1086 | const Twine &Name = "") { | |||
1087 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), | |||
1088 | NormalDest, UnwindDest, Args, OpBundles, Name); | |||
1089 | } | |||
1090 | ||||
1091 | InvokeInst *CreateInvoke(FunctionCallee Callee, BasicBlock *NormalDest, | |||
1092 | BasicBlock *UnwindDest, | |||
1093 | ArrayRef<Value *> Args = None, | |||
1094 | const Twine &Name = "") { | |||
1095 | return CreateInvoke(Callee.getFunctionType(), Callee.getCallee(), | |||
1096 | NormalDest, UnwindDest, Args, Name); | |||
1097 | } | |||
1098 | ||||
1099 | /// \brief Create a callbr instruction. | |||
1100 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, | |||
1101 | BasicBlock *DefaultDest, | |||
1102 | ArrayRef<BasicBlock *> IndirectDests, | |||
1103 | ArrayRef<Value *> Args = None, | |||
1104 | const Twine &Name = "") { | |||
1105 | return Insert(CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, | |||
1106 | Args), Name); | |||
1107 | } | |||
1108 | CallBrInst *CreateCallBr(FunctionType *Ty, Value *Callee, | |||
1109 | BasicBlock *DefaultDest, | |||
1110 | ArrayRef<BasicBlock *> IndirectDests, | |||
1111 | ArrayRef<Value *> Args, | |||
1112 | ArrayRef<OperandBundleDef> OpBundles, | |||
1113 | const Twine &Name = "") { | |||
1114 | return Insert( | |||
1115 | CallBrInst::Create(Ty, Callee, DefaultDest, IndirectDests, Args, | |||
1116 | OpBundles), Name); | |||
1117 | } | |||
1118 | ||||
1119 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, | |||
1120 | ArrayRef<BasicBlock *> IndirectDests, | |||
1121 | ArrayRef<Value *> Args = None, | |||
1122 | const Twine &Name = "") { | |||
1123 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), | |||
1124 | DefaultDest, IndirectDests, Args, Name); | |||
1125 | } | |||
1126 | CallBrInst *CreateCallBr(FunctionCallee Callee, BasicBlock *DefaultDest, | |||
1127 | ArrayRef<BasicBlock *> IndirectDests, | |||
1128 | ArrayRef<Value *> Args, | |||
1129 | ArrayRef<OperandBundleDef> OpBundles, | |||
1130 | const Twine &Name = "") { | |||
1131 | return CreateCallBr(Callee.getFunctionType(), Callee.getCallee(), | |||
1132 | DefaultDest, IndirectDests, Args, Name); | |||
1133 | } | |||
1134 | ||||
1135 | ResumeInst *CreateResume(Value *Exn) { | |||
1136 | return Insert(ResumeInst::Create(Exn)); | |||
1137 | } | |||
1138 | ||||
1139 | CleanupReturnInst *CreateCleanupRet(CleanupPadInst *CleanupPad, | |||
1140 | BasicBlock *UnwindBB = nullptr) { | |||
1141 | return Insert(CleanupReturnInst::Create(CleanupPad, UnwindBB)); | |||
1142 | } | |||
1143 | ||||
1144 | CatchSwitchInst *CreateCatchSwitch(Value *ParentPad, BasicBlock *UnwindBB, | |||
1145 | unsigned NumHandlers, | |||
1146 | const Twine &Name = "") { | |||
1147 | return Insert(CatchSwitchInst::Create(ParentPad, UnwindBB, NumHandlers), | |||
1148 | Name); | |||
1149 | } | |||
1150 | ||||
1151 | CatchPadInst *CreateCatchPad(Value *ParentPad, ArrayRef<Value *> Args, | |||
1152 | const Twine &Name = "") { | |||
1153 | return Insert(CatchPadInst::Create(ParentPad, Args), Name); | |||
1154 | } | |||
1155 | ||||
1156 | CleanupPadInst *CreateCleanupPad(Value *ParentPad, | |||
1157 | ArrayRef<Value *> Args = None, | |||
1158 | const Twine &Name = "") { | |||
1159 | return Insert(CleanupPadInst::Create(ParentPad, Args), Name); | |||
1160 | } | |||
1161 | ||||
1162 | CatchReturnInst *CreateCatchRet(CatchPadInst *CatchPad, BasicBlock *BB) { | |||
1163 | return Insert(CatchReturnInst::Create(CatchPad, BB)); | |||
1164 | } | |||
1165 | ||||
1166 | UnreachableInst *CreateUnreachable() { | |||
1167 | return Insert(new UnreachableInst(Context)); | |||
1168 | } | |||
1169 | ||||
1170 | //===--------------------------------------------------------------------===// | |||
1171 | // Instruction creation methods: Binary Operators | |||
1172 | //===--------------------------------------------------------------------===// | |||
1173 | private: | |||
1174 | BinaryOperator *CreateInsertNUWNSWBinOp(BinaryOperator::BinaryOps Opc, | |||
1175 | Value *LHS, Value *RHS, | |||
1176 | const Twine &Name, | |||
1177 | bool HasNUW, bool HasNSW) { | |||
1178 | BinaryOperator *BO = Insert(BinaryOperator::Create(Opc, LHS, RHS), Name); | |||
1179 | if (HasNUW) BO->setHasNoUnsignedWrap(); | |||
1180 | if (HasNSW) BO->setHasNoSignedWrap(); | |||
1181 | return BO; | |||
1182 | } | |||
1183 | ||||
1184 | Instruction *setFPAttrs(Instruction *I, MDNode *FPMD, | |||
1185 | FastMathFlags FMF) const { | |||
1186 | if (!FPMD) | |||
1187 | FPMD = DefaultFPMathTag; | |||
1188 | if (FPMD) | |||
1189 | I->setMetadata(LLVMContext::MD_fpmath, FPMD); | |||
1190 | I->setFastMathFlags(FMF); | |||
1191 | return I; | |||
1192 | } | |||
1193 | ||||
1194 | Value *getConstrainedFPRounding(Optional<RoundingMode> Rounding) { | |||
1195 | RoundingMode UseRounding = DefaultConstrainedRounding; | |||
1196 | ||||
1197 | if (Rounding) | |||
1198 | UseRounding = Rounding.value(); | |||
1199 | ||||
1200 | Optional<StringRef> RoundingStr = convertRoundingModeToStr(UseRounding); | |||
1201 | assert(RoundingStr && "Garbage strict rounding mode!")(static_cast <bool> (RoundingStr && "Garbage strict rounding mode!" ) ? void (0) : __assert_fail ("RoundingStr && \"Garbage strict rounding mode!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1201, __extension__ __PRETTY_FUNCTION__ )); | |||
1202 | auto *RoundingMDS = MDString::get(Context, RoundingStr.value()); | |||
1203 | ||||
1204 | return MetadataAsValue::get(Context, RoundingMDS); | |||
1205 | } | |||
1206 | ||||
1207 | Value *getConstrainedFPExcept(Optional<fp::ExceptionBehavior> Except) { | |||
1208 | fp::ExceptionBehavior UseExcept = DefaultConstrainedExcept; | |||
1209 | ||||
1210 | if (Except) | |||
1211 | UseExcept = Except.value(); | |||
1212 | ||||
1213 | Optional<StringRef> ExceptStr = convertExceptionBehaviorToStr(UseExcept); | |||
1214 | assert(ExceptStr && "Garbage strict exception behavior!")(static_cast <bool> (ExceptStr && "Garbage strict exception behavior!" ) ? void (0) : __assert_fail ("ExceptStr && \"Garbage strict exception behavior!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1214, __extension__ __PRETTY_FUNCTION__ )); | |||
1215 | auto *ExceptMDS = MDString::get(Context, ExceptStr.value()); | |||
1216 | ||||
1217 | return MetadataAsValue::get(Context, ExceptMDS); | |||
1218 | } | |||
1219 | ||||
1220 | Value *getConstrainedFPPredicate(CmpInst::Predicate Predicate) { | |||
1221 | assert(CmpInst::isFPPredicate(Predicate) &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1224, __extension__ __PRETTY_FUNCTION__ )) | |||
1222 | Predicate != CmpInst::FCMP_FALSE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1224, __extension__ __PRETTY_FUNCTION__ )) | |||
1223 | Predicate != CmpInst::FCMP_TRUE &&(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1224, __extension__ __PRETTY_FUNCTION__ )) | |||
1224 | "Invalid constrained FP comparison predicate!")(static_cast <bool> (CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst ::FCMP_TRUE && "Invalid constrained FP comparison predicate!" ) ? void (0) : __assert_fail ("CmpInst::isFPPredicate(Predicate) && Predicate != CmpInst::FCMP_FALSE && Predicate != CmpInst::FCMP_TRUE && \"Invalid constrained FP comparison predicate!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1224, __extension__ __PRETTY_FUNCTION__ )); | |||
1225 | ||||
1226 | StringRef PredicateStr = CmpInst::getPredicateName(Predicate); | |||
1227 | auto *PredicateMDS = MDString::get(Context, PredicateStr); | |||
1228 | ||||
1229 | return MetadataAsValue::get(Context, PredicateMDS); | |||
1230 | } | |||
1231 | ||||
1232 | public: | |||
1233 | Value *CreateAdd(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1234 | bool HasNUW = false, bool HasNSW = false) { | |||
1235 | if (Value *V = | |||
1236 | Folder.FoldNoWrapBinOp(Instruction::Add, LHS, RHS, HasNUW, HasNSW)) | |||
1237 | return V; | |||
1238 | return CreateInsertNUWNSWBinOp(Instruction::Add, LHS, RHS, Name, HasNUW, | |||
1239 | HasNSW); | |||
1240 | } | |||
1241 | ||||
1242 | Value *CreateNSWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1243 | return CreateAdd(LHS, RHS, Name, false, true); | |||
1244 | } | |||
1245 | ||||
1246 | Value *CreateNUWAdd(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1247 | return CreateAdd(LHS, RHS, Name, true, false); | |||
1248 | } | |||
1249 | ||||
1250 | Value *CreateSub(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1251 | bool HasNUW = false, bool HasNSW = false) { | |||
1252 | if (Value *V = | |||
1253 | Folder.FoldNoWrapBinOp(Instruction::Sub, LHS, RHS, HasNUW, HasNSW)) | |||
1254 | return V; | |||
1255 | return CreateInsertNUWNSWBinOp(Instruction::Sub, LHS, RHS, Name, HasNUW, | |||
1256 | HasNSW); | |||
1257 | } | |||
1258 | ||||
1259 | Value *CreateNSWSub(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1260 | return CreateSub(LHS, RHS, Name, false, true); | |||
1261 | } | |||
1262 | ||||
1263 | Value *CreateNUWSub(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1264 | return CreateSub(LHS, RHS, Name, true, false); | |||
1265 | } | |||
1266 | ||||
1267 | Value *CreateMul(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1268 | bool HasNUW = false, bool HasNSW = false) { | |||
1269 | if (Value *V = | |||
1270 | Folder.FoldNoWrapBinOp(Instruction::Mul, LHS, RHS, HasNUW, HasNSW)) | |||
1271 | return V; | |||
1272 | return CreateInsertNUWNSWBinOp(Instruction::Mul, LHS, RHS, Name, HasNUW, | |||
1273 | HasNSW); | |||
1274 | } | |||
1275 | ||||
1276 | Value *CreateNSWMul(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1277 | return CreateMul(LHS, RHS, Name, false, true); | |||
1278 | } | |||
1279 | ||||
1280 | Value *CreateNUWMul(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1281 | return CreateMul(LHS, RHS, Name, true, false); | |||
1282 | } | |||
1283 | ||||
1284 | Value *CreateUDiv(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1285 | bool isExact = false) { | |||
1286 | if (Value *V = Folder.FoldExactBinOp(Instruction::UDiv, LHS, RHS, isExact)) | |||
1287 | return V; | |||
1288 | if (!isExact) | |||
1289 | return Insert(BinaryOperator::CreateUDiv(LHS, RHS), Name); | |||
1290 | return Insert(BinaryOperator::CreateExactUDiv(LHS, RHS), Name); | |||
1291 | } | |||
1292 | ||||
1293 | Value *CreateExactUDiv(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1294 | return CreateUDiv(LHS, RHS, Name, true); | |||
1295 | } | |||
1296 | ||||
1297 | Value *CreateSDiv(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1298 | bool isExact = false) { | |||
1299 | if (Value *V = Folder.FoldExactBinOp(Instruction::SDiv, LHS, RHS, isExact)) | |||
1300 | return V; | |||
1301 | if (!isExact) | |||
1302 | return Insert(BinaryOperator::CreateSDiv(LHS, RHS), Name); | |||
1303 | return Insert(BinaryOperator::CreateExactSDiv(LHS, RHS), Name); | |||
1304 | } | |||
1305 | ||||
1306 | Value *CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1307 | return CreateSDiv(LHS, RHS, Name, true); | |||
1308 | } | |||
1309 | ||||
1310 | Value *CreateURem(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1311 | if (Value *V = Folder.FoldBinOp(Instruction::URem, LHS, RHS)) | |||
1312 | return V; | |||
1313 | return Insert(BinaryOperator::CreateURem(LHS, RHS), Name); | |||
1314 | } | |||
1315 | ||||
1316 | Value *CreateSRem(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1317 | if (Value *V = Folder.FoldBinOp(Instruction::SRem, LHS, RHS)) | |||
1318 | return V; | |||
1319 | return Insert(BinaryOperator::CreateSRem(LHS, RHS), Name); | |||
1320 | } | |||
1321 | ||||
1322 | Value *CreateShl(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1323 | bool HasNUW = false, bool HasNSW = false) { | |||
1324 | if (Value *V = | |||
1325 | Folder.FoldNoWrapBinOp(Instruction::Shl, LHS, RHS, HasNUW, HasNSW)) | |||
1326 | return V; | |||
1327 | return CreateInsertNUWNSWBinOp(Instruction::Shl, LHS, RHS, Name, | |||
1328 | HasNUW, HasNSW); | |||
1329 | } | |||
1330 | ||||
1331 | Value *CreateShl(Value *LHS, const APInt &RHS, const Twine &Name = "", | |||
1332 | bool HasNUW = false, bool HasNSW = false) { | |||
1333 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, | |||
1334 | HasNUW, HasNSW); | |||
1335 | } | |||
1336 | ||||
1337 | Value *CreateShl(Value *LHS, uint64_t RHS, const Twine &Name = "", | |||
1338 | bool HasNUW = false, bool HasNSW = false) { | |||
1339 | return CreateShl(LHS, ConstantInt::get(LHS->getType(), RHS), Name, | |||
1340 | HasNUW, HasNSW); | |||
1341 | } | |||
1342 | ||||
1343 | Value *CreateLShr(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1344 | bool isExact = false) { | |||
1345 | if (Value *V = Folder.FoldExactBinOp(Instruction::LShr, LHS, RHS, isExact)) | |||
1346 | return V; | |||
1347 | if (!isExact) | |||
1348 | return Insert(BinaryOperator::CreateLShr(LHS, RHS), Name); | |||
1349 | return Insert(BinaryOperator::CreateExactLShr(LHS, RHS), Name); | |||
1350 | } | |||
1351 | ||||
1352 | Value *CreateLShr(Value *LHS, const APInt &RHS, const Twine &Name = "", | |||
1353 | bool isExact = false) { | |||
1354 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1355 | } | |||
1356 | ||||
1357 | Value *CreateLShr(Value *LHS, uint64_t RHS, const Twine &Name = "", | |||
1358 | bool isExact = false) { | |||
1359 | return CreateLShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1360 | } | |||
1361 | ||||
1362 | Value *CreateAShr(Value *LHS, Value *RHS, const Twine &Name = "", | |||
1363 | bool isExact = false) { | |||
1364 | if (Value *V = Folder.FoldExactBinOp(Instruction::AShr, LHS, RHS, isExact)) | |||
1365 | return V; | |||
1366 | if (!isExact) | |||
1367 | return Insert(BinaryOperator::CreateAShr(LHS, RHS), Name); | |||
1368 | return Insert(BinaryOperator::CreateExactAShr(LHS, RHS), Name); | |||
1369 | } | |||
1370 | ||||
1371 | Value *CreateAShr(Value *LHS, const APInt &RHS, const Twine &Name = "", | |||
1372 | bool isExact = false) { | |||
1373 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1374 | } | |||
1375 | ||||
1376 | Value *CreateAShr(Value *LHS, uint64_t RHS, const Twine &Name = "", | |||
1377 | bool isExact = false) { | |||
1378 | return CreateAShr(LHS, ConstantInt::get(LHS->getType(), RHS), Name,isExact); | |||
1379 | } | |||
1380 | ||||
1381 | Value *CreateAnd(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1382 | if (auto *V = Folder.FoldBinOp(Instruction::And, LHS, RHS)) | |||
1383 | return V; | |||
1384 | return Insert(BinaryOperator::CreateAnd(LHS, RHS), Name); | |||
1385 | } | |||
1386 | ||||
1387 | Value *CreateAnd(Value *LHS, const APInt &RHS, const Twine &Name = "") { | |||
1388 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1389 | } | |||
1390 | ||||
1391 | Value *CreateAnd(Value *LHS, uint64_t RHS, const Twine &Name = "") { | |||
1392 | return CreateAnd(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1393 | } | |||
1394 | ||||
1395 | Value *CreateAnd(ArrayRef<Value*> Ops) { | |||
1396 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1396, __extension__ __PRETTY_FUNCTION__)); | |||
1397 | Value *Accum = Ops[0]; | |||
1398 | for (unsigned i = 1; i < Ops.size(); i++) | |||
1399 | Accum = CreateAnd(Accum, Ops[i]); | |||
1400 | return Accum; | |||
1401 | } | |||
1402 | ||||
1403 | Value *CreateOr(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1404 | if (auto *V = Folder.FoldBinOp(Instruction::Or, LHS, RHS)) | |||
1405 | return V; | |||
1406 | return Insert(BinaryOperator::CreateOr(LHS, RHS), Name); | |||
1407 | } | |||
1408 | ||||
1409 | Value *CreateOr(Value *LHS, const APInt &RHS, const Twine &Name = "") { | |||
1410 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1411 | } | |||
1412 | ||||
1413 | Value *CreateOr(Value *LHS, uint64_t RHS, const Twine &Name = "") { | |||
1414 | return CreateOr(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1415 | } | |||
1416 | ||||
1417 | Value *CreateOr(ArrayRef<Value*> Ops) { | |||
1418 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1418, __extension__ __PRETTY_FUNCTION__)); | |||
1419 | Value *Accum = Ops[0]; | |||
1420 | for (unsigned i = 1; i < Ops.size(); i++) | |||
1421 | Accum = CreateOr(Accum, Ops[i]); | |||
1422 | return Accum; | |||
1423 | } | |||
1424 | ||||
1425 | Value *CreateXor(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
1426 | if (Value *V = Folder.FoldBinOp(Instruction::Xor, LHS, RHS)) | |||
1427 | return V; | |||
1428 | return Insert(BinaryOperator::CreateXor(LHS, RHS), Name); | |||
1429 | } | |||
1430 | ||||
1431 | Value *CreateXor(Value *LHS, const APInt &RHS, const Twine &Name = "") { | |||
1432 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1433 | } | |||
1434 | ||||
1435 | Value *CreateXor(Value *LHS, uint64_t RHS, const Twine &Name = "") { | |||
1436 | return CreateXor(LHS, ConstantInt::get(LHS->getType(), RHS), Name); | |||
1437 | } | |||
1438 | ||||
1439 | Value *CreateFAdd(Value *L, Value *R, const Twine &Name = "", | |||
1440 | MDNode *FPMD = nullptr) { | |||
1441 | if (IsFPConstrained) | |||
1442 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, | |||
1443 | L, R, nullptr, Name, FPMD); | |||
1444 | ||||
1445 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FAdd, L, R, FMF)) | |||
1446 | return V; | |||
1447 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), FPMD, FMF); | |||
1448 | return Insert(I, Name); | |||
1449 | } | |||
1450 | ||||
1451 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1452 | /// default FMF. | |||
1453 | Value *CreateFAddFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1454 | const Twine &Name = "") { | |||
1455 | if (IsFPConstrained) | |||
1456 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fadd, | |||
1457 | L, R, FMFSource, Name); | |||
1458 | ||||
1459 | FastMathFlags FMF = FMFSource->getFastMathFlags(); | |||
1460 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FAdd, L, R, FMF)) | |||
1461 | return V; | |||
1462 | Instruction *I = setFPAttrs(BinaryOperator::CreateFAdd(L, R), nullptr, FMF); | |||
1463 | return Insert(I, Name); | |||
1464 | } | |||
1465 | ||||
1466 | Value *CreateFSub(Value *L, Value *R, const Twine &Name = "", | |||
1467 | MDNode *FPMD = nullptr) { | |||
1468 | if (IsFPConstrained) | |||
1469 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, | |||
1470 | L, R, nullptr, Name, FPMD); | |||
1471 | ||||
1472 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FSub, L, R, FMF)) | |||
1473 | return V; | |||
1474 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), FPMD, FMF); | |||
1475 | return Insert(I, Name); | |||
1476 | } | |||
1477 | ||||
1478 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1479 | /// default FMF. | |||
1480 | Value *CreateFSubFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1481 | const Twine &Name = "") { | |||
1482 | if (IsFPConstrained) | |||
1483 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fsub, | |||
1484 | L, R, FMFSource, Name); | |||
1485 | ||||
1486 | FastMathFlags FMF = FMFSource->getFastMathFlags(); | |||
1487 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FSub, L, R, FMF)) | |||
1488 | return V; | |||
1489 | Instruction *I = setFPAttrs(BinaryOperator::CreateFSub(L, R), nullptr, FMF); | |||
1490 | return Insert(I, Name); | |||
1491 | } | |||
1492 | ||||
1493 | Value *CreateFMul(Value *L, Value *R, const Twine &Name = "", | |||
1494 | MDNode *FPMD = nullptr) { | |||
1495 | if (IsFPConstrained) | |||
1496 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, | |||
1497 | L, R, nullptr, Name, FPMD); | |||
1498 | ||||
1499 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FMul, L, R, FMF)) | |||
1500 | return V; | |||
1501 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), FPMD, FMF); | |||
1502 | return Insert(I, Name); | |||
1503 | } | |||
1504 | ||||
1505 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1506 | /// default FMF. | |||
1507 | Value *CreateFMulFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1508 | const Twine &Name = "") { | |||
1509 | if (IsFPConstrained) | |||
1510 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fmul, | |||
1511 | L, R, FMFSource, Name); | |||
1512 | ||||
1513 | FastMathFlags FMF = FMFSource->getFastMathFlags(); | |||
1514 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FMul, L, R, FMF)) | |||
1515 | return V; | |||
1516 | Instruction *I = setFPAttrs(BinaryOperator::CreateFMul(L, R), nullptr, FMF); | |||
1517 | return Insert(I, Name); | |||
1518 | } | |||
1519 | ||||
1520 | Value *CreateFDiv(Value *L, Value *R, const Twine &Name = "", | |||
1521 | MDNode *FPMD = nullptr) { | |||
1522 | if (IsFPConstrained) | |||
1523 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, | |||
1524 | L, R, nullptr, Name, FPMD); | |||
1525 | ||||
1526 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FDiv, L, R, FMF)) | |||
1527 | return V; | |||
1528 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), FPMD, FMF); | |||
1529 | return Insert(I, Name); | |||
1530 | } | |||
1531 | ||||
1532 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1533 | /// default FMF. | |||
1534 | Value *CreateFDivFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1535 | const Twine &Name = "") { | |||
1536 | if (IsFPConstrained) | |||
1537 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_fdiv, | |||
1538 | L, R, FMFSource, Name); | |||
1539 | ||||
1540 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FDiv, L, R, FMF)) | |||
1541 | return V; | |||
1542 | Instruction *I = setFPAttrs(BinaryOperator::CreateFDiv(L, R), nullptr, FMF); | |||
1543 | return Insert(I, Name); | |||
1544 | } | |||
1545 | ||||
1546 | Value *CreateFRem(Value *L, Value *R, const Twine &Name = "", | |||
1547 | MDNode *FPMD = nullptr) { | |||
1548 | if (IsFPConstrained) | |||
1549 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, | |||
1550 | L, R, nullptr, Name, FPMD); | |||
1551 | ||||
1552 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FRem, L, R, FMF)) return V; | |||
1553 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), FPMD, FMF); | |||
1554 | return Insert(I, Name); | |||
1555 | } | |||
1556 | ||||
1557 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1558 | /// default FMF. | |||
1559 | Value *CreateFRemFMF(Value *L, Value *R, Instruction *FMFSource, | |||
1560 | const Twine &Name = "") { | |||
1561 | if (IsFPConstrained) | |||
1562 | return CreateConstrainedFPBinOp(Intrinsic::experimental_constrained_frem, | |||
1563 | L, R, FMFSource, Name); | |||
1564 | ||||
1565 | FastMathFlags FMF = FMFSource->getFastMathFlags(); | |||
1566 | if (Value *V = Folder.FoldBinOpFMF(Instruction::FRem, L, R, FMF)) return V; | |||
1567 | Instruction *I = setFPAttrs(BinaryOperator::CreateFRem(L, R), nullptr, FMF); | |||
1568 | return Insert(I, Name); | |||
1569 | } | |||
1570 | ||||
1571 | Value *CreateBinOp(Instruction::BinaryOps Opc, | |||
1572 | Value *LHS, Value *RHS, const Twine &Name = "", | |||
1573 | MDNode *FPMathTag = nullptr) { | |||
1574 | if (Value *V = Folder.FoldBinOp(Opc, LHS, RHS)) return V; | |||
1575 | Instruction *BinOp = BinaryOperator::Create(Opc, LHS, RHS); | |||
1576 | if (isa<FPMathOperator>(BinOp)) | |||
1577 | setFPAttrs(BinOp, FPMathTag, FMF); | |||
1578 | return Insert(BinOp, Name); | |||
1579 | } | |||
1580 | ||||
1581 | Value *CreateLogicalAnd(Value *Cond1, Value *Cond2, const Twine &Name = "") { | |||
1582 | assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy (1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)" , "llvm/include/llvm/IR/IRBuilder.h", 1582, __extension__ __PRETTY_FUNCTION__ )); | |||
1583 | return CreateSelect(Cond1, Cond2, | |||
1584 | ConstantInt::getNullValue(Cond2->getType()), Name); | |||
1585 | } | |||
1586 | ||||
1587 | Value *CreateLogicalOr(Value *Cond1, Value *Cond2, const Twine &Name = "") { | |||
1588 | assert(Cond2->getType()->isIntOrIntVectorTy(1))(static_cast <bool> (Cond2->getType()->isIntOrIntVectorTy (1)) ? void (0) : __assert_fail ("Cond2->getType()->isIntOrIntVectorTy(1)" , "llvm/include/llvm/IR/IRBuilder.h", 1588, __extension__ __PRETTY_FUNCTION__ )); | |||
1589 | return CreateSelect(Cond1, ConstantInt::getAllOnesValue(Cond2->getType()), | |||
1590 | Cond2, Name); | |||
1591 | } | |||
1592 | ||||
1593 | // NOTE: this is sequential, non-commutative, ordered reduction! | |||
1594 | Value *CreateLogicalOr(ArrayRef<Value *> Ops) { | |||
1595 | assert(!Ops.empty())(static_cast <bool> (!Ops.empty()) ? void (0) : __assert_fail ("!Ops.empty()", "llvm/include/llvm/IR/IRBuilder.h", 1595, __extension__ __PRETTY_FUNCTION__)); | |||
1596 | Value *Accum = Ops[0]; | |||
1597 | for (unsigned i = 1; i < Ops.size(); i++) | |||
1598 | Accum = CreateLogicalOr(Accum, Ops[i]); | |||
1599 | return Accum; | |||
1600 | } | |||
1601 | ||||
1602 | CallInst *CreateConstrainedFPBinOp( | |||
1603 | Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource = nullptr, | |||
1604 | const Twine &Name = "", MDNode *FPMathTag = nullptr, | |||
1605 | Optional<RoundingMode> Rounding = None, | |||
1606 | Optional<fp::ExceptionBehavior> Except = None); | |||
1607 | ||||
1608 | Value *CreateNeg(Value *V, const Twine &Name = "", bool HasNUW = false, | |||
1609 | bool HasNSW = false) { | |||
1610 | return CreateSub(Constant::getNullValue(V->getType()), V, Name, HasNUW, | |||
1611 | HasNSW); | |||
1612 | } | |||
1613 | ||||
1614 | Value *CreateNSWNeg(Value *V, const Twine &Name = "") { | |||
1615 | return CreateNeg(V, Name, false, true); | |||
1616 | } | |||
1617 | ||||
1618 | Value *CreateNUWNeg(Value *V, const Twine &Name = "") { | |||
1619 | return CreateNeg(V, Name, true, false); | |||
1620 | } | |||
1621 | ||||
1622 | Value *CreateFNeg(Value *V, const Twine &Name = "", | |||
1623 | MDNode *FPMathTag = nullptr) { | |||
1624 | if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF)) | |||
1625 | return Res; | |||
1626 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), FPMathTag, FMF), | |||
1627 | Name); | |||
1628 | } | |||
1629 | ||||
1630 | /// Copy fast-math-flags from an instruction rather than using the builder's | |||
1631 | /// default FMF. | |||
1632 | Value *CreateFNegFMF(Value *V, Instruction *FMFSource, | |||
1633 | const Twine &Name = "") { | |||
1634 | FastMathFlags FMF = FMFSource->getFastMathFlags(); | |||
1635 | if (Value *Res = Folder.FoldUnOpFMF(Instruction::FNeg, V, FMF)) | |||
1636 | return Res; | |||
1637 | return Insert(setFPAttrs(UnaryOperator::CreateFNeg(V), nullptr, FMF), | |||
1638 | Name); | |||
1639 | } | |||
1640 | ||||
1641 | Value *CreateNot(Value *V, const Twine &Name = "") { | |||
1642 | return CreateXor(V, Constant::getAllOnesValue(V->getType()), Name); | |||
1643 | } | |||
1644 | ||||
1645 | Value *CreateUnOp(Instruction::UnaryOps Opc, | |||
1646 | Value *V, const Twine &Name = "", | |||
1647 | MDNode *FPMathTag = nullptr) { | |||
1648 | if (Value *Res = Folder.FoldUnOpFMF(Opc, V, FMF)) | |||
1649 | return Res; | |||
1650 | Instruction *UnOp = UnaryOperator::Create(Opc, V); | |||
1651 | if (isa<FPMathOperator>(UnOp)) | |||
1652 | setFPAttrs(UnOp, FPMathTag, FMF); | |||
1653 | return Insert(UnOp, Name); | |||
1654 | } | |||
1655 | ||||
1656 | /// Create either a UnaryOperator or BinaryOperator depending on \p Opc. | |||
1657 | /// Correct number of operands must be passed accordingly. | |||
1658 | Value *CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops, | |||
1659 | const Twine &Name = "", MDNode *FPMathTag = nullptr); | |||
1660 | ||||
1661 | //===--------------------------------------------------------------------===// | |||
1662 | // Instruction creation methods: Memory Instructions | |||
1663 | //===--------------------------------------------------------------------===// | |||
1664 | ||||
1665 | AllocaInst *CreateAlloca(Type *Ty, unsigned AddrSpace, | |||
1666 | Value *ArraySize = nullptr, const Twine &Name = "") { | |||
1667 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1668 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); | |||
1669 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); | |||
1670 | } | |||
1671 | ||||
1672 | AllocaInst *CreateAlloca(Type *Ty, Value *ArraySize = nullptr, | |||
1673 | const Twine &Name = "") { | |||
1674 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1675 | Align AllocaAlign = DL.getPrefTypeAlign(Ty); | |||
1676 | unsigned AddrSpace = DL.getAllocaAddrSpace(); | |||
1677 | return Insert(new AllocaInst(Ty, AddrSpace, ArraySize, AllocaAlign), Name); | |||
1678 | } | |||
1679 | ||||
1680 | /// Provided to resolve 'CreateLoad(Ty, Ptr, "...")' correctly, instead of | |||
1681 | /// converting the string to 'bool' for the isVolatile parameter. | |||
1682 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const char *Name) { | |||
1683 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); | |||
1684 | } | |||
1685 | ||||
1686 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, const Twine &Name = "") { | |||
1687 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), Name); | |||
1688 | } | |||
1689 | ||||
1690 | LoadInst *CreateLoad(Type *Ty, Value *Ptr, bool isVolatile, | |||
1691 | const Twine &Name = "") { | |||
1692 | return CreateAlignedLoad(Ty, Ptr, MaybeAlign(), isVolatile, Name); | |||
1693 | } | |||
1694 | ||||
1695 | StoreInst *CreateStore(Value *Val, Value *Ptr, bool isVolatile = false) { | |||
1696 | return CreateAlignedStore(Val, Ptr, MaybeAlign(), isVolatile); | |||
1697 | } | |||
1698 | ||||
1699 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, | |||
1700 | const char *Name) { | |||
1701 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); | |||
1702 | } | |||
1703 | ||||
1704 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, | |||
1705 | const Twine &Name = "") { | |||
1706 | return CreateAlignedLoad(Ty, Ptr, Align, /*isVolatile*/false, Name); | |||
1707 | } | |||
1708 | ||||
1709 | LoadInst *CreateAlignedLoad(Type *Ty, Value *Ptr, MaybeAlign Align, | |||
1710 | bool isVolatile, const Twine &Name = "") { | |||
1711 | if (!Align) { | |||
1712 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1713 | Align = DL.getABITypeAlign(Ty); | |||
1714 | } | |||
1715 | return Insert(new LoadInst(Ty, Ptr, Twine(), isVolatile, *Align), Name); | |||
1716 | } | |||
1717 | ||||
1718 | StoreInst *CreateAlignedStore(Value *Val, Value *Ptr, MaybeAlign Align, | |||
1719 | bool isVolatile = false) { | |||
1720 | if (!Align) { | |||
1721 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1722 | Align = DL.getABITypeAlign(Val->getType()); | |||
1723 | } | |||
1724 | return Insert(new StoreInst(Val, Ptr, isVolatile, *Align)); | |||
1725 | } | |||
1726 | FenceInst *CreateFence(AtomicOrdering Ordering, | |||
1727 | SyncScope::ID SSID = SyncScope::System, | |||
1728 | const Twine &Name = "") { | |||
1729 | return Insert(new FenceInst(Context, Ordering, SSID), Name); | |||
1730 | } | |||
1731 | ||||
1732 | AtomicCmpXchgInst * | |||
1733 | CreateAtomicCmpXchg(Value *Ptr, Value *Cmp, Value *New, MaybeAlign Align, | |||
1734 | AtomicOrdering SuccessOrdering, | |||
1735 | AtomicOrdering FailureOrdering, | |||
1736 | SyncScope::ID SSID = SyncScope::System) { | |||
1737 | if (!Align) { | |||
1738 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1739 | Align = llvm::Align(DL.getTypeStoreSize(New->getType())); | |||
1740 | } | |||
1741 | ||||
1742 | return Insert(new AtomicCmpXchgInst(Ptr, Cmp, New, *Align, SuccessOrdering, | |||
1743 | FailureOrdering, SSID)); | |||
1744 | } | |||
1745 | ||||
1746 | AtomicRMWInst *CreateAtomicRMW(AtomicRMWInst::BinOp Op, Value *Ptr, | |||
1747 | Value *Val, MaybeAlign Align, | |||
1748 | AtomicOrdering Ordering, | |||
1749 | SyncScope::ID SSID = SyncScope::System) { | |||
1750 | if (!Align) { | |||
1751 | const DataLayout &DL = BB->getModule()->getDataLayout(); | |||
1752 | Align = llvm::Align(DL.getTypeStoreSize(Val->getType())); | |||
1753 | } | |||
1754 | ||||
1755 | return Insert(new AtomicRMWInst(Op, Ptr, Val, *Align, Ordering, SSID)); | |||
1756 | } | |||
1757 | ||||
1758 | Value *CreateGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, | |||
1759 | const Twine &Name = "", bool IsInBounds = false) { | |||
1760 | if (auto *V = Folder.FoldGEP(Ty, Ptr, IdxList, IsInBounds)) | |||
1761 | return V; | |||
1762 | return Insert(IsInBounds | |||
1763 | ? GetElementPtrInst::CreateInBounds(Ty, Ptr, IdxList) | |||
1764 | : GetElementPtrInst::Create(Ty, Ptr, IdxList), | |||
1765 | Name); | |||
1766 | } | |||
1767 | ||||
1768 | Value *CreateInBoundsGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList, | |||
1769 | const Twine &Name = "") { | |||
1770 | return CreateGEP(Ty, Ptr, IdxList, Name, /* IsInBounds */ true); | |||
1771 | } | |||
1772 | ||||
1773 | Value *CreateConstGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, | |||
1774 | const Twine &Name = "") { | |||
1775 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); | |||
1776 | ||||
1777 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false)) | |||
1778 | return V; | |||
1779 | ||||
1780 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); | |||
1781 | } | |||
1782 | ||||
1783 | Value *CreateConstInBoundsGEP1_32(Type *Ty, Value *Ptr, unsigned Idx0, | |||
1784 | const Twine &Name = "") { | |||
1785 | Value *Idx = ConstantInt::get(Type::getInt32Ty(Context), Idx0); | |||
1786 | ||||
1787 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true)) | |||
1788 | return V; | |||
1789 | ||||
1790 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); | |||
1791 | } | |||
1792 | ||||
1793 | Value *CreateConstGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, unsigned Idx1, | |||
1794 | const Twine &Name = "") { | |||
1795 | Value *Idxs[] = { | |||
1796 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), | |||
1797 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) | |||
1798 | }; | |||
1799 | ||||
1800 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false)) | |||
1801 | return V; | |||
1802 | ||||
1803 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); | |||
1804 | } | |||
1805 | ||||
1806 | Value *CreateConstInBoundsGEP2_32(Type *Ty, Value *Ptr, unsigned Idx0, | |||
1807 | unsigned Idx1, const Twine &Name = "") { | |||
1808 | Value *Idxs[] = { | |||
1809 | ConstantInt::get(Type::getInt32Ty(Context), Idx0), | |||
1810 | ConstantInt::get(Type::getInt32Ty(Context), Idx1) | |||
1811 | }; | |||
1812 | ||||
1813 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true)) | |||
1814 | return V; | |||
1815 | ||||
1816 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); | |||
1817 | } | |||
1818 | ||||
1819 | Value *CreateConstGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, | |||
1820 | const Twine &Name = "") { | |||
1821 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); | |||
1822 | ||||
1823 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/false)) | |||
1824 | return V; | |||
1825 | ||||
1826 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idx), Name); | |||
1827 | } | |||
1828 | ||||
1829 | Value *CreateConstInBoundsGEP1_64(Type *Ty, Value *Ptr, uint64_t Idx0, | |||
1830 | const Twine &Name = "") { | |||
1831 | Value *Idx = ConstantInt::get(Type::getInt64Ty(Context), Idx0); | |||
1832 | ||||
1833 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idx, /*IsInBounds=*/true)) | |||
1834 | return V; | |||
1835 | ||||
1836 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idx), Name); | |||
1837 | } | |||
1838 | ||||
1839 | Value *CreateConstGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, uint64_t Idx1, | |||
1840 | const Twine &Name = "") { | |||
1841 | Value *Idxs[] = { | |||
1842 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), | |||
1843 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) | |||
1844 | }; | |||
1845 | ||||
1846 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/false)) | |||
1847 | return V; | |||
1848 | ||||
1849 | return Insert(GetElementPtrInst::Create(Ty, Ptr, Idxs), Name); | |||
1850 | } | |||
1851 | ||||
1852 | Value *CreateConstInBoundsGEP2_64(Type *Ty, Value *Ptr, uint64_t Idx0, | |||
1853 | uint64_t Idx1, const Twine &Name = "") { | |||
1854 | Value *Idxs[] = { | |||
1855 | ConstantInt::get(Type::getInt64Ty(Context), Idx0), | |||
1856 | ConstantInt::get(Type::getInt64Ty(Context), Idx1) | |||
1857 | }; | |||
1858 | ||||
1859 | if (auto *V = Folder.FoldGEP(Ty, Ptr, Idxs, /*IsInBounds=*/true)) | |||
1860 | return V; | |||
1861 | ||||
1862 | return Insert(GetElementPtrInst::CreateInBounds(Ty, Ptr, Idxs), Name); | |||
1863 | } | |||
1864 | ||||
1865 | Value *CreateStructGEP(Type *Ty, Value *Ptr, unsigned Idx, | |||
1866 | const Twine &Name = "") { | |||
1867 | return CreateConstInBoundsGEP2_32(Ty, Ptr, 0, Idx, Name); | |||
1868 | } | |||
1869 | ||||
1870 | /// Same as CreateGlobalString, but return a pointer with "i8*" type | |||
1871 | /// instead of a pointer to array of i8. | |||
1872 | /// | |||
1873 | /// If no module is given via \p M, it is take from the insertion point basic | |||
1874 | /// block. | |||
1875 | Constant *CreateGlobalStringPtr(StringRef Str, const Twine &Name = "", | |||
1876 | unsigned AddressSpace = 0, | |||
1877 | Module *M = nullptr) { | |||
1878 | GlobalVariable *GV = CreateGlobalString(Str, Name, AddressSpace, M); | |||
1879 | Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0); | |||
1880 | Constant *Indices[] = {Zero, Zero}; | |||
1881 | return ConstantExpr::getInBoundsGetElementPtr(GV->getValueType(), GV, | |||
1882 | Indices); | |||
1883 | } | |||
1884 | ||||
1885 | //===--------------------------------------------------------------------===// | |||
1886 | // Instruction creation methods: Cast/Conversion Operators | |||
1887 | //===--------------------------------------------------------------------===// | |||
1888 | ||||
1889 | Value *CreateTrunc(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1890 | return CreateCast(Instruction::Trunc, V, DestTy, Name); | |||
1891 | } | |||
1892 | ||||
1893 | Value *CreateZExt(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1894 | return CreateCast(Instruction::ZExt, V, DestTy, Name); | |||
1895 | } | |||
1896 | ||||
1897 | Value *CreateSExt(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1898 | return CreateCast(Instruction::SExt, V, DestTy, Name); | |||
1899 | } | |||
1900 | ||||
1901 | /// Create a ZExt or Trunc from the integer value V to DestTy. Return | |||
1902 | /// the value untouched if the type of V is already DestTy. | |||
1903 | Value *CreateZExtOrTrunc(Value *V, Type *DestTy, | |||
1904 | const Twine &Name = "") { | |||
1905 | assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1907, __extension__ __PRETTY_FUNCTION__ )) | |||
1906 | DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1907, __extension__ __PRETTY_FUNCTION__ )) | |||
1907 | "Can only zero extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only zero extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only zero extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1907, __extension__ __PRETTY_FUNCTION__ )); | |||
1908 | Type *VTy = V->getType(); | |||
1909 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) | |||
1910 | return CreateZExt(V, DestTy, Name); | |||
1911 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) | |||
1912 | return CreateTrunc(V, DestTy, Name); | |||
1913 | return V; | |||
1914 | } | |||
1915 | ||||
1916 | /// Create a SExt or Trunc from the integer value V to DestTy. Return | |||
1917 | /// the value untouched if the type of V is already DestTy. | |||
1918 | Value *CreateSExtOrTrunc(Value *V, Type *DestTy, | |||
1919 | const Twine &Name = "") { | |||
1920 | assert(V->getType()->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1922, __extension__ __PRETTY_FUNCTION__ )) | |||
1921 | DestTy->isIntOrIntVectorTy() &&(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1922, __extension__ __PRETTY_FUNCTION__ )) | |||
1922 | "Can only sign extend/truncate integers!")(static_cast <bool> (V->getType()->isIntOrIntVectorTy () && DestTy->isIntOrIntVectorTy() && "Can only sign extend/truncate integers!" ) ? void (0) : __assert_fail ("V->getType()->isIntOrIntVectorTy() && DestTy->isIntOrIntVectorTy() && \"Can only sign extend/truncate integers!\"" , "llvm/include/llvm/IR/IRBuilder.h", 1922, __extension__ __PRETTY_FUNCTION__ )); | |||
1923 | Type *VTy = V->getType(); | |||
1924 | if (VTy->getScalarSizeInBits() < DestTy->getScalarSizeInBits()) | |||
1925 | return CreateSExt(V, DestTy, Name); | |||
1926 | if (VTy->getScalarSizeInBits() > DestTy->getScalarSizeInBits()) | |||
1927 | return CreateTrunc(V, DestTy, Name); | |||
1928 | return V; | |||
1929 | } | |||
1930 | ||||
1931 | Value *CreateFPToUI(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1932 | if (IsFPConstrained) | |||
1933 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptoui, | |||
1934 | V, DestTy, nullptr, Name); | |||
1935 | return CreateCast(Instruction::FPToUI, V, DestTy, Name); | |||
1936 | } | |||
1937 | ||||
1938 | Value *CreateFPToSI(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1939 | if (IsFPConstrained) | |||
1940 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fptosi, | |||
1941 | V, DestTy, nullptr, Name); | |||
1942 | return CreateCast(Instruction::FPToSI, V, DestTy, Name); | |||
1943 | } | |||
1944 | ||||
1945 | Value *CreateUIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ | |||
1946 | if (IsFPConstrained) | |||
1947 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_uitofp, | |||
1948 | V, DestTy, nullptr, Name); | |||
1949 | return CreateCast(Instruction::UIToFP, V, DestTy, Name); | |||
1950 | } | |||
1951 | ||||
1952 | Value *CreateSIToFP(Value *V, Type *DestTy, const Twine &Name = ""){ | |||
1953 | if (IsFPConstrained) | |||
1954 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_sitofp, | |||
1955 | V, DestTy, nullptr, Name); | |||
1956 | return CreateCast(Instruction::SIToFP, V, DestTy, Name); | |||
1957 | } | |||
1958 | ||||
1959 | Value *CreateFPTrunc(Value *V, Type *DestTy, | |||
1960 | const Twine &Name = "") { | |||
1961 | if (IsFPConstrained) | |||
1962 | return CreateConstrainedFPCast( | |||
1963 | Intrinsic::experimental_constrained_fptrunc, V, DestTy, nullptr, | |||
1964 | Name); | |||
1965 | return CreateCast(Instruction::FPTrunc, V, DestTy, Name); | |||
1966 | } | |||
1967 | ||||
1968 | Value *CreateFPExt(Value *V, Type *DestTy, const Twine &Name = "") { | |||
1969 | if (IsFPConstrained) | |||
1970 | return CreateConstrainedFPCast(Intrinsic::experimental_constrained_fpext, | |||
1971 | V, DestTy, nullptr, Name); | |||
1972 | return CreateCast(Instruction::FPExt, V, DestTy, Name); | |||
1973 | } | |||
1974 | ||||
1975 | Value *CreatePtrToInt(Value *V, Type *DestTy, | |||
1976 | const Twine &Name = "") { | |||
1977 | return CreateCast(Instruction::PtrToInt, V, DestTy, Name); | |||
1978 | } | |||
1979 | ||||
1980 | Value *CreateIntToPtr(Value *V, Type *DestTy, | |||
1981 | const Twine &Name = "") { | |||
1982 | return CreateCast(Instruction::IntToPtr, V, DestTy, Name); | |||
1983 | } | |||
1984 | ||||
1985 | Value *CreateBitCast(Value *V, Type *DestTy, | |||
1986 | const Twine &Name = "") { | |||
1987 | return CreateCast(Instruction::BitCast, V, DestTy, Name); | |||
1988 | } | |||
1989 | ||||
1990 | Value *CreateAddrSpaceCast(Value *V, Type *DestTy, | |||
1991 | const Twine &Name = "") { | |||
1992 | return CreateCast(Instruction::AddrSpaceCast, V, DestTy, Name); | |||
1993 | } | |||
1994 | ||||
1995 | Value *CreateZExtOrBitCast(Value *V, Type *DestTy, | |||
1996 | const Twine &Name = "") { | |||
1997 | if (V->getType() == DestTy) | |||
1998 | return V; | |||
1999 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2000 | return Insert(Folder.CreateZExtOrBitCast(VC, DestTy), Name); | |||
2001 | return Insert(CastInst::CreateZExtOrBitCast(V, DestTy), Name); | |||
2002 | } | |||
2003 | ||||
2004 | Value *CreateSExtOrBitCast(Value *V, Type *DestTy, | |||
2005 | const Twine &Name = "") { | |||
2006 | if (V->getType() == DestTy) | |||
2007 | return V; | |||
2008 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2009 | return Insert(Folder.CreateSExtOrBitCast(VC, DestTy), Name); | |||
2010 | return Insert(CastInst::CreateSExtOrBitCast(V, DestTy), Name); | |||
2011 | } | |||
2012 | ||||
2013 | Value *CreateTruncOrBitCast(Value *V, Type *DestTy, | |||
2014 | const Twine &Name = "") { | |||
2015 | if (V->getType() == DestTy) | |||
2016 | return V; | |||
2017 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2018 | return Insert(Folder.CreateTruncOrBitCast(VC, DestTy), Name); | |||
2019 | return Insert(CastInst::CreateTruncOrBitCast(V, DestTy), Name); | |||
2020 | } | |||
2021 | ||||
2022 | Value *CreateCast(Instruction::CastOps Op, Value *V, Type *DestTy, | |||
2023 | const Twine &Name = "") { | |||
2024 | if (V->getType() == DestTy) | |||
2025 | return V; | |||
2026 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2027 | return Insert(Folder.CreateCast(Op, VC, DestTy), Name); | |||
2028 | return Insert(CastInst::Create(Op, V, DestTy), Name); | |||
2029 | } | |||
2030 | ||||
2031 | Value *CreatePointerCast(Value *V, Type *DestTy, | |||
2032 | const Twine &Name = "") { | |||
2033 | if (V->getType() == DestTy) | |||
2034 | return V; | |||
2035 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2036 | return Insert(Folder.CreatePointerCast(VC, DestTy), Name); | |||
2037 | return Insert(CastInst::CreatePointerCast(V, DestTy), Name); | |||
2038 | } | |||
2039 | ||||
2040 | Value *CreatePointerBitCastOrAddrSpaceCast(Value *V, Type *DestTy, | |||
2041 | const Twine &Name = "") { | |||
2042 | if (V->getType() == DestTy) | |||
2043 | return V; | |||
2044 | ||||
2045 | if (auto *VC = dyn_cast<Constant>(V)) { | |||
2046 | return Insert(Folder.CreatePointerBitCastOrAddrSpaceCast(VC, DestTy), | |||
2047 | Name); | |||
2048 | } | |||
2049 | ||||
2050 | return Insert(CastInst::CreatePointerBitCastOrAddrSpaceCast(V, DestTy), | |||
2051 | Name); | |||
2052 | } | |||
2053 | ||||
2054 | Value *CreateIntCast(Value *V, Type *DestTy, bool isSigned, | |||
2055 | const Twine &Name = "") { | |||
2056 | if (V->getType() == DestTy) | |||
2057 | return V; | |||
2058 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2059 | return Insert(Folder.CreateIntCast(VC, DestTy, isSigned), Name); | |||
2060 | return Insert(CastInst::CreateIntegerCast(V, DestTy, isSigned), Name); | |||
2061 | } | |||
2062 | ||||
2063 | Value *CreateBitOrPointerCast(Value *V, Type *DestTy, | |||
2064 | const Twine &Name = "") { | |||
2065 | if (V->getType() == DestTy) | |||
2066 | return V; | |||
2067 | if (V->getType()->isPtrOrPtrVectorTy() && DestTy->isIntOrIntVectorTy()) | |||
2068 | return CreatePtrToInt(V, DestTy, Name); | |||
2069 | if (V->getType()->isIntOrIntVectorTy() && DestTy->isPtrOrPtrVectorTy()) | |||
2070 | return CreateIntToPtr(V, DestTy, Name); | |||
2071 | ||||
2072 | return CreateBitCast(V, DestTy, Name); | |||
2073 | } | |||
2074 | ||||
2075 | Value *CreateFPCast(Value *V, Type *DestTy, const Twine &Name = "") { | |||
2076 | if (V->getType() == DestTy) | |||
2077 | return V; | |||
2078 | if (auto *VC = dyn_cast<Constant>(V)) | |||
2079 | return Insert(Folder.CreateFPCast(VC, DestTy), Name); | |||
2080 | return Insert(CastInst::CreateFPCast(V, DestTy), Name); | |||
2081 | } | |||
2082 | ||||
2083 | CallInst *CreateConstrainedFPCast( | |||
2084 | Intrinsic::ID ID, Value *V, Type *DestTy, | |||
2085 | Instruction *FMFSource = nullptr, const Twine &Name = "", | |||
2086 | MDNode *FPMathTag = nullptr, | |||
2087 | Optional<RoundingMode> Rounding = None, | |||
2088 | Optional<fp::ExceptionBehavior> Except = None); | |||
2089 | ||||
2090 | // Provided to resolve 'CreateIntCast(Ptr, Ptr, "...")', giving a | |||
2091 | // compile time error, instead of converting the string to bool for the | |||
2092 | // isSigned parameter. | |||
2093 | Value *CreateIntCast(Value *, Type *, const char *) = delete; | |||
2094 | ||||
2095 | //===--------------------------------------------------------------------===// | |||
2096 | // Instruction creation methods: Compare Instructions | |||
2097 | //===--------------------------------------------------------------------===// | |||
2098 | ||||
2099 | Value *CreateICmpEQ(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2100 | return CreateICmp(ICmpInst::ICMP_EQ, LHS, RHS, Name); | |||
2101 | } | |||
2102 | ||||
2103 | Value *CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2104 | return CreateICmp(ICmpInst::ICMP_NE, LHS, RHS, Name); | |||
2105 | } | |||
2106 | ||||
2107 | Value *CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2108 | return CreateICmp(ICmpInst::ICMP_UGT, LHS, RHS, Name); | |||
2109 | } | |||
2110 | ||||
2111 | Value *CreateICmpUGE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2112 | return CreateICmp(ICmpInst::ICMP_UGE, LHS, RHS, Name); | |||
2113 | } | |||
2114 | ||||
2115 | Value *CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2116 | return CreateICmp(ICmpInst::ICMP_ULT, LHS, RHS, Name); | |||
2117 | } | |||
2118 | ||||
2119 | Value *CreateICmpULE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2120 | return CreateICmp(ICmpInst::ICMP_ULE, LHS, RHS, Name); | |||
2121 | } | |||
2122 | ||||
2123 | Value *CreateICmpSGT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2124 | return CreateICmp(ICmpInst::ICMP_SGT, LHS, RHS, Name); | |||
2125 | } | |||
2126 | ||||
2127 | Value *CreateICmpSGE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2128 | return CreateICmp(ICmpInst::ICMP_SGE, LHS, RHS, Name); | |||
2129 | } | |||
2130 | ||||
2131 | Value *CreateICmpSLT(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2132 | return CreateICmp(ICmpInst::ICMP_SLT, LHS, RHS, Name); | |||
2133 | } | |||
2134 | ||||
2135 | Value *CreateICmpSLE(Value *LHS, Value *RHS, const Twine &Name = "") { | |||
2136 | return CreateICmp(ICmpInst::ICMP_SLE, LHS, RHS, Name); | |||
2137 | } | |||
2138 | ||||
2139 | Value *CreateFCmpOEQ(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2140 | MDNode *FPMathTag = nullptr) { | |||
2141 | return CreateFCmp(FCmpInst::FCMP_OEQ, LHS, RHS, Name, FPMathTag); | |||
2142 | } | |||
2143 | ||||
2144 | Value *CreateFCmpOGT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2145 | MDNode *FPMathTag = nullptr) { | |||
2146 | return CreateFCmp(FCmpInst::FCMP_OGT, LHS, RHS, Name, FPMathTag); | |||
2147 | } | |||
2148 | ||||
2149 | Value *CreateFCmpOGE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2150 | MDNode *FPMathTag = nullptr) { | |||
2151 | return CreateFCmp(FCmpInst::FCMP_OGE, LHS, RHS, Name, FPMathTag); | |||
2152 | } | |||
2153 | ||||
2154 | Value *CreateFCmpOLT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2155 | MDNode *FPMathTag = nullptr) { | |||
2156 | return CreateFCmp(FCmpInst::FCMP_OLT, LHS, RHS, Name, FPMathTag); | |||
2157 | } | |||
2158 | ||||
2159 | Value *CreateFCmpOLE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2160 | MDNode *FPMathTag = nullptr) { | |||
2161 | return CreateFCmp(FCmpInst::FCMP_OLE, LHS, RHS, Name, FPMathTag); | |||
2162 | } | |||
2163 | ||||
2164 | Value *CreateFCmpONE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2165 | MDNode *FPMathTag = nullptr) { | |||
2166 | return CreateFCmp(FCmpInst::FCMP_ONE, LHS, RHS, Name, FPMathTag); | |||
2167 | } | |||
2168 | ||||
2169 | Value *CreateFCmpORD(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2170 | MDNode *FPMathTag = nullptr) { | |||
2171 | return CreateFCmp(FCmpInst::FCMP_ORD, LHS, RHS, Name, FPMathTag); | |||
2172 | } | |||
2173 | ||||
2174 | Value *CreateFCmpUNO(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2175 | MDNode *FPMathTag = nullptr) { | |||
2176 | return CreateFCmp(FCmpInst::FCMP_UNO, LHS, RHS, Name, FPMathTag); | |||
2177 | } | |||
2178 | ||||
2179 | Value *CreateFCmpUEQ(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2180 | MDNode *FPMathTag = nullptr) { | |||
2181 | return CreateFCmp(FCmpInst::FCMP_UEQ, LHS, RHS, Name, FPMathTag); | |||
2182 | } | |||
2183 | ||||
2184 | Value *CreateFCmpUGT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2185 | MDNode *FPMathTag = nullptr) { | |||
2186 | return CreateFCmp(FCmpInst::FCMP_UGT, LHS, RHS, Name, FPMathTag); | |||
2187 | } | |||
2188 | ||||
2189 | Value *CreateFCmpUGE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2190 | MDNode *FPMathTag = nullptr) { | |||
2191 | return CreateFCmp(FCmpInst::FCMP_UGE, LHS, RHS, Name, FPMathTag); | |||
2192 | } | |||
2193 | ||||
2194 | Value *CreateFCmpULT(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2195 | MDNode *FPMathTag = nullptr) { | |||
2196 | return CreateFCmp(FCmpInst::FCMP_ULT, LHS, RHS, Name, FPMathTag); | |||
2197 | } | |||
2198 | ||||
2199 | Value *CreateFCmpULE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2200 | MDNode *FPMathTag = nullptr) { | |||
2201 | return CreateFCmp(FCmpInst::FCMP_ULE, LHS, RHS, Name, FPMathTag); | |||
2202 | } | |||
2203 | ||||
2204 | Value *CreateFCmpUNE(Value *LHS, Value *RHS, const Twine &Name = "", | |||
2205 | MDNode *FPMathTag = nullptr) { | |||
2206 | return CreateFCmp(FCmpInst::FCMP_UNE, LHS, RHS, Name, FPMathTag); | |||
2207 | } | |||
2208 | ||||
2209 | Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2210 | const Twine &Name = "") { | |||
2211 | if (auto *V = Folder.FoldICmp(P, LHS, RHS)) | |||
2212 | return V; | |||
2213 | return Insert(new ICmpInst(P, LHS, RHS), Name); | |||
2214 | } | |||
2215 | ||||
2216 | // Create a quiet floating-point comparison (i.e. one that raises an FP | |||
2217 | // exception only in the case where an input is a signaling NaN). | |||
2218 | // Note that this differs from CreateFCmpS only if IsFPConstrained is true. | |||
2219 | Value *CreateFCmp(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2220 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2221 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, false); | |||
2222 | } | |||
2223 | ||||
2224 | Value *CreateCmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS, | |||
2225 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2226 | return CmpInst::isFPPredicate(Pred) | |||
2227 | ? CreateFCmp(Pred, LHS, RHS, Name, FPMathTag) | |||
2228 | : CreateICmp(Pred, LHS, RHS, Name); | |||
2229 | } | |||
2230 | ||||
2231 | // Create a signaling floating-point comparison (i.e. one that raises an FP | |||
2232 | // exception whenever an input is any NaN, signaling or quiet). | |||
2233 | // Note that this differs from CreateFCmp only if IsFPConstrained is true. | |||
2234 | Value *CreateFCmpS(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2235 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2236 | return CreateFCmpHelper(P, LHS, RHS, Name, FPMathTag, true); | |||
2237 | } | |||
2238 | ||||
2239 | private: | |||
2240 | // Helper routine to create either a signaling or a quiet FP comparison. | |||
2241 | Value *CreateFCmpHelper(CmpInst::Predicate P, Value *LHS, Value *RHS, | |||
2242 | const Twine &Name, MDNode *FPMathTag, | |||
2243 | bool IsSignaling); | |||
2244 | ||||
2245 | public: | |||
2246 | CallInst *CreateConstrainedFPCmp( | |||
2247 | Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, | |||
2248 | const Twine &Name = "", Optional<fp::ExceptionBehavior> Except = None); | |||
2249 | ||||
2250 | //===--------------------------------------------------------------------===// | |||
2251 | // Instruction creation methods: Other Instructions | |||
2252 | //===--------------------------------------------------------------------===// | |||
2253 | ||||
2254 | PHINode *CreatePHI(Type *Ty, unsigned NumReservedValues, | |||
2255 | const Twine &Name = "") { | |||
2256 | PHINode *Phi = PHINode::Create(Ty, NumReservedValues); | |||
2257 | if (isa<FPMathOperator>(Phi)) | |||
2258 | setFPAttrs(Phi, nullptr /* MDNode* */, FMF); | |||
2259 | return Insert(Phi, Name); | |||
2260 | } | |||
2261 | ||||
2262 | private: | |||
2263 | CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops, | |||
2264 | const Twine &Name = "", | |||
2265 | Instruction *FMFSource = nullptr, | |||
2266 | ArrayRef<OperandBundleDef> OpBundles = {}); | |||
2267 | ||||
2268 | public: | |||
2269 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, | |||
2270 | ArrayRef<Value *> Args = None, const Twine &Name = "", | |||
2271 | MDNode *FPMathTag = nullptr) { | |||
2272 | CallInst *CI = CallInst::Create(FTy, Callee, Args, DefaultOperandBundles); | |||
2273 | if (IsFPConstrained) | |||
2274 | setConstrainedFPCallAttr(CI); | |||
2275 | if (isa<FPMathOperator>(CI)) | |||
2276 | setFPAttrs(CI, FPMathTag, FMF); | |||
2277 | return Insert(CI, Name); | |||
2278 | } | |||
2279 | ||||
2280 | CallInst *CreateCall(FunctionType *FTy, Value *Callee, ArrayRef<Value *> Args, | |||
2281 | ArrayRef<OperandBundleDef> OpBundles, | |||
2282 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2283 | CallInst *CI = CallInst::Create(FTy, Callee, Args, OpBundles); | |||
2284 | if (IsFPConstrained) | |||
2285 | setConstrainedFPCallAttr(CI); | |||
2286 | if (isa<FPMathOperator>(CI)) | |||
2287 | setFPAttrs(CI, FPMathTag, FMF); | |||
2288 | return Insert(CI, Name); | |||
2289 | } | |||
2290 | ||||
2291 | CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args = None, | |||
2292 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2293 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, Name, | |||
2294 | FPMathTag); | |||
2295 | } | |||
2296 | ||||
2297 | CallInst *CreateCall(FunctionCallee Callee, ArrayRef<Value *> Args, | |||
2298 | ArrayRef<OperandBundleDef> OpBundles, | |||
2299 | const Twine &Name = "", MDNode *FPMathTag = nullptr) { | |||
2300 | return CreateCall(Callee.getFunctionType(), Callee.getCallee(), Args, | |||
2301 | OpBundles, Name, FPMathTag); | |||
2302 | } | |||
2303 | ||||
2304 | CallInst *CreateConstrainedFPCall( | |||
2305 | Function *Callee, ArrayRef<Value *> Args, const Twine &Name = "", | |||
2306 | Optional<RoundingMode> Rounding = None, | |||
2307 | Optional<fp::ExceptionBehavior> Except = None); | |||
2308 | ||||
2309 | Value *CreateSelect(Value *C, Value *True, Value *False, | |||
2310 | const Twine &Name = "", Instruction *MDFrom = nullptr); | |||
2311 | ||||
2312 | VAArgInst *CreateVAArg(Value *List, Type *Ty, const Twine &Name = "") { | |||
2313 | return Insert(new VAArgInst(List, Ty), Name); | |||
2314 | } | |||
2315 | ||||
2316 | Value *CreateExtractElement(Value *Vec, Value *Idx, | |||
2317 | const Twine &Name = "") { | |||
2318 | if (Value *V = Folder.FoldExtractElement(Vec, Idx)) | |||
2319 | return V; | |||
2320 | return Insert(ExtractElementInst::Create(Vec, Idx), Name); | |||
2321 | } | |||
2322 | ||||
2323 | Value *CreateExtractElement(Value *Vec, uint64_t Idx, | |||
2324 | const Twine &Name = "") { | |||
2325 | return CreateExtractElement(Vec, getInt64(Idx), Name); | |||
2326 | } | |||
2327 | ||||
2328 | Value *CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, | |||
2329 | const Twine &Name = "") { | |||
2330 | return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name); | |||
2331 | } | |||
2332 | ||||
2333 | Value *CreateInsertElement(Type *VecTy, Value *NewElt, uint64_t Idx, | |||
2334 | const Twine &Name = "") { | |||
2335 | return CreateInsertElement(PoisonValue::get(VecTy), NewElt, Idx, Name); | |||
2336 | } | |||
2337 | ||||
2338 | Value *CreateInsertElement(Value *Vec, Value *NewElt, Value *Idx, | |||
2339 | const Twine &Name = "") { | |||
2340 | if (Value *V = Folder.FoldInsertElement(Vec, NewElt, Idx)) | |||
2341 | return V; | |||
2342 | return Insert(InsertElementInst::Create(Vec, NewElt, Idx), Name); | |||
2343 | } | |||
2344 | ||||
2345 | Value *CreateInsertElement(Value *Vec, Value *NewElt, uint64_t Idx, | |||
2346 | const Twine &Name = "") { | |||
2347 | return CreateInsertElement(Vec, NewElt, getInt64(Idx), Name); | |||
2348 | } | |||
2349 | ||||
2350 | Value *CreateShuffleVector(Value *V1, Value *V2, Value *Mask, | |||
2351 | const Twine &Name = "") { | |||
2352 | SmallVector<int, 16> IntMask; | |||
2353 | ShuffleVectorInst::getShuffleMask(cast<Constant>(Mask), IntMask); | |||
2354 | return CreateShuffleVector(V1, V2, IntMask, Name); | |||
2355 | } | |||
2356 | ||||
2357 | /// See class ShuffleVectorInst for a description of the mask representation. | |||
2358 | Value *CreateShuffleVector(Value *V1, Value *V2, ArrayRef<int> Mask, | |||
2359 | const Twine &Name = "") { | |||
2360 | if (Value *V = Folder.FoldShuffleVector(V1, V2, Mask)) | |||
2361 | return V; | |||
2362 | return Insert(new ShuffleVectorInst(V1, V2, Mask), Name); | |||
2363 | } | |||
2364 | ||||
2365 | /// Create a unary shuffle. The second vector operand of the IR instruction | |||
2366 | /// is poison. | |||
2367 | Value *CreateShuffleVector(Value *V, ArrayRef<int> Mask, | |||
2368 | const Twine &Name = "") { | |||
2369 | return CreateShuffleVector(V, PoisonValue::get(V->getType()), Mask, Name); | |||
2370 | } | |||
2371 | ||||
2372 | Value *CreateExtractValue(Value *Agg, ArrayRef<unsigned> Idxs, | |||
2373 | const Twine &Name = "") { | |||
2374 | if (auto *V = Folder.FoldExtractValue(Agg, Idxs)) | |||
2375 | return V; | |||
2376 | return Insert(ExtractValueInst::Create(Agg, Idxs), Name); | |||
2377 | } | |||
2378 | ||||
2379 | Value *CreateInsertValue(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, | |||
2380 | const Twine &Name = "") { | |||
2381 | if (auto *V = Folder.FoldInsertValue(Agg, Val, Idxs)) | |||
2382 | return V; | |||
2383 | return Insert(InsertValueInst::Create(Agg, Val, Idxs), Name); | |||
2384 | } | |||
2385 | ||||
2386 | LandingPadInst *CreateLandingPad(Type *Ty, unsigned NumClauses, | |||
2387 | const Twine &Name = "") { | |||
2388 | return Insert(LandingPadInst::Create(Ty, NumClauses), Name); | |||
2389 | } | |||
2390 | ||||
2391 | Value *CreateFreeze(Value *V, const Twine &Name = "") { | |||
2392 | return Insert(new FreezeInst(V), Name); | |||
2393 | } | |||
2394 | ||||
2395 | //===--------------------------------------------------------------------===// | |||
2396 | // Utility creation methods | |||
2397 | //===--------------------------------------------------------------------===// | |||
2398 | ||||
2399 | /// Return a boolean value testing if \p Arg == 0. | |||
2400 | Value *CreateIsNull(Value *Arg, const Twine &Name = "") { | |||
2401 | return CreateICmpEQ(Arg, ConstantInt::getNullValue(Arg->getType()), Name); | |||
2402 | } | |||
2403 | ||||
2404 | /// Return a boolean value testing if \p Arg != 0. | |||
2405 | Value *CreateIsNotNull(Value *Arg, const Twine &Name = "") { | |||
2406 | return CreateICmpNE(Arg, ConstantInt::getNullValue(Arg->getType()), Name); | |||
2407 | } | |||
2408 | ||||
2409 | /// Return a boolean value testing if \p Arg < 0. | |||
2410 | Value *CreateIsNeg(Value *Arg, const Twine &Name = "") { | |||
2411 | return CreateICmpSLT(Arg, ConstantInt::getNullValue(Arg->getType()), Name); | |||
2412 | } | |||
2413 | ||||
2414 | /// Return a boolean value testing if \p Arg > -1. | |||
2415 | Value *CreateIsNotNeg(Value *Arg, const Twine &Name = "") { | |||
2416 | return CreateICmpSGT(Arg, ConstantInt::getAllOnesValue(Arg->getType()), | |||
2417 | Name); | |||
2418 | } | |||
2419 | ||||
2420 | /// Return the i64 difference between two pointer values, dividing out | |||
2421 | /// the size of the pointed-to objects. | |||
2422 | /// | |||
2423 | /// This is intended to implement C-style pointer subtraction. As such, the | |||
2424 | /// pointers must be appropriately aligned for their element types and | |||
2425 | /// pointing into the same object. | |||
2426 | Value *CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, | |||
2427 | const Twine &Name = ""); | |||
2428 | ||||
2429 | /// Create a launder.invariant.group intrinsic call. If Ptr type is | |||
2430 | /// different from pointer to i8, it's casted to pointer to i8 in the same | |||
2431 | /// address space before call and casted back to Ptr type after call. | |||
2432 | Value *CreateLaunderInvariantGroup(Value *Ptr); | |||
2433 | ||||
2434 | /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is | |||
2435 | /// different from pointer to i8, it's casted to pointer to i8 in the same | |||
2436 | /// address space before call and casted back to Ptr type after call. | |||
2437 | Value *CreateStripInvariantGroup(Value *Ptr); | |||
2438 | ||||
2439 | /// Return a vector value that contains the vector V reversed | |||
2440 | Value *CreateVectorReverse(Value *V, const Twine &Name = ""); | |||
2441 | ||||
2442 | /// Return a vector splice intrinsic if using scalable vectors, otherwise | |||
2443 | /// return a shufflevector. If the immediate is positive, a vector is | |||
2444 | /// extracted from concat(V1, V2), starting at Imm. If the immediate | |||
2445 | /// is negative, we extract -Imm elements from V1 and the remaining | |||
2446 | /// elements from V2. Imm is a signed integer in the range | |||
2447 | /// -VL <= Imm < VL (where VL is the runtime vector length of the | |||
2448 | /// source/result vector) | |||
2449 | Value *CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, | |||
2450 | const Twine &Name = ""); | |||
2451 | ||||
2452 | /// Return a vector value that contains \arg V broadcasted to \p | |||
2453 | /// NumElts elements. | |||
2454 | Value *CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name = ""); | |||
2455 | ||||
2456 | /// Return a vector value that contains \arg V broadcasted to \p | |||
2457 | /// EC elements. | |||
2458 | Value *CreateVectorSplat(ElementCount EC, Value *V, const Twine &Name = ""); | |||
2459 | ||||
2460 | /// Return a value that has been extracted from a larger integer type. | |||
2461 | Value *CreateExtractInteger(const DataLayout &DL, Value *From, | |||
2462 | IntegerType *ExtractedTy, uint64_t Offset, | |||
2463 | const Twine &Name); | |||
2464 | ||||
2465 | Value *CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, | |||
2466 | unsigned Dimension, unsigned LastIndex, | |||
2467 | MDNode *DbgInfo); | |||
2468 | ||||
2469 | Value *CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex, | |||
2470 | MDNode *DbgInfo); | |||
2471 | ||||
2472 | Value *CreatePreserveStructAccessIndex(Type *ElTy, Value *Base, | |||
2473 | unsigned Index, unsigned FieldIndex, | |||
2474 | MDNode *DbgInfo); | |||
2475 | ||||
2476 | private: | |||
2477 | /// Helper function that creates an assume intrinsic call that | |||
2478 | /// represents an alignment assumption on the provided pointer \p PtrValue | |||
2479 | /// with offset \p OffsetValue and alignment value \p AlignValue. | |||
2480 | CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL, | |||
2481 | Value *PtrValue, Value *AlignValue, | |||
2482 | Value *OffsetValue); | |||
2483 | ||||
2484 | public: | |||
2485 | /// Create an assume intrinsic call that represents an alignment | |||
2486 | /// assumption on the provided pointer. | |||
2487 | /// | |||
2488 | /// An optional offset can be provided, and if it is provided, the offset | |||
2489 | /// must be subtracted from the provided pointer to get the pointer with the | |||
2490 | /// specified alignment. | |||
2491 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, | |||
2492 | unsigned Alignment, | |||
2493 | Value *OffsetValue = nullptr); | |||
2494 | ||||
2495 | /// Create an assume intrinsic call that represents an alignment | |||
2496 | /// assumption on the provided pointer. | |||
2497 | /// | |||
2498 | /// An optional offset can be provided, and if it is provided, the offset | |||
2499 | /// must be subtracted from the provided pointer to get the pointer with the | |||
2500 | /// specified alignment. | |||
2501 | /// | |||
2502 | /// This overload handles the condition where the Alignment is dependent | |||
2503 | /// on an existing value rather than a static value. | |||
2504 | CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, | |||
2505 | Value *Alignment, | |||
2506 | Value *OffsetValue = nullptr); | |||
2507 | }; | |||
2508 | ||||
2509 | /// This provides a uniform API for creating instructions and inserting | |||
2510 | /// them into a basic block: either at the end of a BasicBlock, or at a specific | |||
2511 | /// iterator location in a block. | |||
2512 | /// | |||
2513 | /// Note that the builder does not expose the full generality of LLVM | |||
2514 | /// instructions. For access to extra instruction properties, use the mutators | |||
2515 | /// (e.g. setVolatile) on the instructions after they have been | |||
2516 | /// created. Convenience state exists to specify fast-math flags and fp-math | |||
2517 | /// tags. | |||
2518 | /// | |||
2519 | /// The first template argument specifies a class to use for creating constants. | |||
2520 | /// This defaults to creating minimally folded constants. The second template | |||
2521 | /// argument allows clients to specify custom insertion hooks that are called on | |||
2522 | /// every newly created insertion. | |||
2523 | template <typename FolderTy = ConstantFolder, | |||
2524 | typename InserterTy = IRBuilderDefaultInserter> | |||
2525 | class IRBuilder : public IRBuilderBase { | |||
2526 | private: | |||
2527 | FolderTy Folder; | |||
2528 | InserterTy Inserter; | |||
2529 | ||||
2530 | public: | |||
2531 | IRBuilder(LLVMContext &C, FolderTy Folder, InserterTy Inserter = InserterTy(), | |||
2532 | MDNode *FPMathTag = nullptr, | |||
2533 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2534 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles), | |||
2535 | Folder(Folder), Inserter(Inserter) {} | |||
2536 | ||||
2537 | explicit IRBuilder(LLVMContext &C, MDNode *FPMathTag = nullptr, | |||
2538 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2539 | : IRBuilderBase(C, this->Folder, this->Inserter, FPMathTag, OpBundles) {} | |||
2540 | ||||
2541 | explicit IRBuilder(BasicBlock *TheBB, FolderTy Folder, | |||
2542 | MDNode *FPMathTag = nullptr, | |||
2543 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2544 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2545 | FPMathTag, OpBundles), Folder(Folder) { | |||
2546 | SetInsertPoint(TheBB); | |||
2547 | } | |||
2548 | ||||
2549 | explicit IRBuilder(BasicBlock *TheBB, MDNode *FPMathTag = nullptr, | |||
2550 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2551 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2552 | FPMathTag, OpBundles) { | |||
2553 | SetInsertPoint(TheBB); | |||
2554 | } | |||
2555 | ||||
2556 | explicit IRBuilder(Instruction *IP, MDNode *FPMathTag = nullptr, | |||
2557 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2558 | : IRBuilderBase(IP->getContext(), this->Folder, this->Inserter, | |||
| ||||
2559 | FPMathTag, OpBundles) { | |||
2560 | SetInsertPoint(IP); | |||
2561 | } | |||
2562 | ||||
2563 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, FolderTy Folder, | |||
2564 | MDNode *FPMathTag = nullptr, | |||
2565 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2566 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2567 | FPMathTag, OpBundles), Folder(Folder) { | |||
2568 | SetInsertPoint(TheBB, IP); | |||
2569 | } | |||
2570 | ||||
2571 | IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, | |||
2572 | MDNode *FPMathTag = nullptr, | |||
2573 | ArrayRef<OperandBundleDef> OpBundles = None) | |||
2574 | : IRBuilderBase(TheBB->getContext(), this->Folder, this->Inserter, | |||
2575 | FPMathTag, OpBundles) { | |||
2576 | SetInsertPoint(TheBB, IP); | |||
2577 | } | |||
2578 | ||||
2579 | /// Avoid copying the full IRBuilder. Prefer using InsertPointGuard | |||
2580 | /// or FastMathFlagGuard instead. | |||
2581 | IRBuilder(const IRBuilder &) = delete; | |||
2582 | ||||
2583 | InserterTy &getInserter() { return Inserter; } | |||
2584 | }; | |||
2585 | ||||
2586 | // Create wrappers for C Binding types (see CBindingWrapping.h). | |||
2587 | DEFINE_SIMPLE_CONVERSION_FUNCTIONS(IRBuilder<>, LLVMBuilderRef)inline IRBuilder<> *unwrap(LLVMBuilderRef P) { return reinterpret_cast <IRBuilder<>*>(P); } inline LLVMBuilderRef wrap(const IRBuilder<> *P) { return reinterpret_cast<LLVMBuilderRef >(const_cast<IRBuilder<>*>(P)); } | |||
2588 | ||||
2589 | } // end namespace llvm | |||
2590 | ||||
2591 | #endif // LLVM_IR_IRBUILDER_H |