LLVM 23.0.0git
IRBuilder.cpp
Go to the documentation of this file.
1//===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements the IRBuilder class, which is used as a convenient way
10// to create LLVM instructions with a consistent and simplified interface.
11//
12//===----------------------------------------------------------------------===//
13
14#include "llvm/IR/IRBuilder.h"
15#include "llvm/ADT/ArrayRef.h"
16#include "llvm/IR/Constant.h"
17#include "llvm/IR/Constants.h"
19#include "llvm/IR/Function.h"
20#include "llvm/IR/GlobalValue.h"
23#include "llvm/IR/Intrinsics.h"
24#include "llvm/IR/LLVMContext.h"
25#include "llvm/IR/Module.h"
26#include "llvm/IR/NoFolder.h"
27#include "llvm/IR/Operator.h"
29#include "llvm/IR/Statepoint.h"
30#include "llvm/IR/Type.h"
31#include "llvm/IR/Value.h"
33#include <cassert>
34#include <cstdint>
35#include <optional>
36#include <vector>
37
38using namespace llvm;
39
40/// CreateGlobalString - Make a new global variable with an initializer that
41/// has array of i8 type filled in with the nul terminated string value
42/// specified. If Name is specified, it is the name of the global variable
43/// created.
45 const Twine &Name,
46 unsigned AddressSpace,
47 Module *M, bool AddNull) {
48 Constant *StrConstant = ConstantDataArray::getString(Context, Str, AddNull);
49 if (!M)
50 M = BB->getParent()->getParent();
51 auto *GV = new GlobalVariable(
52 *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
53 StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
54 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
55 GV->setAlignment(M->getDataLayout().getPrefTypeAlign(getInt8Ty()));
56 return GV;
57}
58
60 assert(BB && BB->getParent() && "No current function!");
61 return BB->getParent()->getReturnType();
62}
63
66 // We prefer to set our current debug location if any has been set, but if
67 // our debug location is empty and I has a valid location, we shouldn't
68 // overwrite it.
69 I->setDebugLoc(StoredDL.orElse(I->getDebugLoc()));
70}
71
73 Type *SrcTy = V->getType();
74 if (SrcTy == DestTy)
75 return V;
76
77 if (SrcTy->isAggregateType()) {
78 unsigned NumElements;
79 if (SrcTy->isStructTy()) {
80 assert(DestTy->isStructTy() && "Expected StructType");
81 assert(SrcTy->getStructNumElements() == DestTy->getStructNumElements() &&
82 "Expected StructTypes with equal number of elements");
83 NumElements = SrcTy->getStructNumElements();
84 } else {
85 assert(SrcTy->isArrayTy() && DestTy->isArrayTy() && "Expected ArrayType");
86 assert(SrcTy->getArrayNumElements() == DestTy->getArrayNumElements() &&
87 "Expected ArrayTypes with equal number of elements");
88 NumElements = SrcTy->getArrayNumElements();
89 }
90
91 Value *Result = PoisonValue::get(DestTy);
92 for (unsigned I = 0; I < NumElements; ++I) {
93 Type *ElementTy = SrcTy->isStructTy() ? DestTy->getStructElementType(I)
94 : DestTy->getArrayElementType();
95 Value *Element =
97
98 Result = CreateInsertValue(Result, Element, ArrayRef(I));
99 }
100 return Result;
101 }
102
103 return CreateBitOrPointerCast(V, DestTy);
104}
105
107 Value *V, Type *NewTy) {
108 Type *OldTy = V->getType();
109
110 if (OldTy == NewTy)
111 return V;
112
113 assert(!(isa<IntegerType>(OldTy) && isa<IntegerType>(NewTy)) &&
114 "Integer types must be the exact same to convert.");
115
116 // A variant of bitcast that supports a mixture of fixed and scalable types
117 // that are know to have the same size.
118 auto CreateBitCastLike = [this](Value *In, Type *Ty) -> Value * {
119 Type *InTy = In->getType();
120 if (InTy == Ty)
121 return In;
122
124 // For vscale_range(2) expand <4 x i32> to <vscale x 4 x i16> -->
125 // <4 x i32> to <vscale x 2 x i32> to <vscale x 4 x i16>
127 return CreateBitCast(
128 CreateInsertVector(VTy, PoisonValue::get(VTy), In, getInt64(0)), Ty);
129 }
130
132 // For vscale_range(2) expand <vscale x 4 x i16> to <4 x i32> -->
133 // <vscale x 4 x i16> to <vscale x 2 x i32> to <4 x i32>
135 return CreateExtractVector(Ty, CreateBitCast(In, VTy), getInt64(0));
136 }
137
138 return CreateBitCast(In, Ty);
139 };
140
141 // See if we need inttoptr for this type pair. May require additional bitcast.
142 if (OldTy->isIntOrIntVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
143 // Expand <2 x i32> to i8* --> <2 x i32> to i64 to i8*
144 // Expand i128 to <2 x i8*> --> i128 to <2 x i64> to <2 x i8*>
145 // Expand <4 x i32> to <2 x i8*> --> <4 x i32> to <2 x i64> to <2 x i8*>
146 // Directly handle i64 to i8*
147 return CreateIntToPtr(CreateBitCastLike(V, DL.getIntPtrType(NewTy)), NewTy);
148 }
149
150 // See if we need ptrtoint for this type pair. May require additional bitcast.
151 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isIntOrIntVectorTy()) {
152 // Expand <2 x i8*> to i128 --> <2 x i8*> to <2 x i64> to i128
153 // Expand i8* to <2 x i32> --> i8* to i64 to <2 x i32>
154 // Expand <2 x i8*> to <4 x i32> --> <2 x i8*> to <2 x i64> to <4 x i32>
155 // Expand i8* to i64 --> i8* to i64 to i64
156 return CreateBitCastLike(CreatePtrToInt(V, DL.getIntPtrType(OldTy)), NewTy);
157 }
158
159 if (OldTy->isPtrOrPtrVectorTy() && NewTy->isPtrOrPtrVectorTy()) {
160 unsigned OldAS = OldTy->getPointerAddressSpace();
161 unsigned NewAS = NewTy->getPointerAddressSpace();
162 // To convert pointers with different address spaces (they are already
163 // checked convertible, i.e. they have the same pointer size), so far we
164 // cannot use `bitcast` (which has restrict on the same address space) or
165 // `addrspacecast` (which is not always no-op casting). Instead, use a pair
166 // of no-op `ptrtoint`/`inttoptr` casts through an integer with the same bit
167 // size.
168 if (OldAS != NewAS) {
169 return CreateIntToPtr(
170 CreateBitCastLike(CreatePtrToInt(V, DL.getIntPtrType(OldTy)),
171 DL.getIntPtrType(NewTy)),
172 NewTy);
173 }
174 }
175
176 return CreateBitCastLike(V, NewTy);
177}
178
179CallInst *
180IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
181 const Twine &Name, FMFSource FMFSource,
182 ArrayRef<OperandBundleDef> OpBundles) {
183 CallInst *CI = CreateCall(Callee, Ops, OpBundles, Name);
184 if (isa<FPMathOperator>(CI))
186 return CI;
187}
188
190 Value *VScale = B.CreateVScale(Ty);
191 if (Scale == 1)
192 return VScale;
193
194 return B.CreateNUWMul(VScale, ConstantInt::get(Ty, Scale));
195}
196
198 if (EC.isFixed() || EC.isZero())
199 return ConstantInt::get(Ty, EC.getKnownMinValue());
200
201 return CreateVScaleMultiple(*this, Ty, EC.getKnownMinValue());
202}
203
205 if (Size.isFixed() || Size.isZero())
206 return ConstantInt::get(Ty, Size.getKnownMinValue());
207
208 return CreateVScaleMultiple(*this, Ty, Size.getKnownMinValue());
209}
210
212 const DataLayout &DL = BB->getDataLayout();
213 TypeSize ElemSize = DL.getTypeAllocSize(AI->getAllocatedType());
214 Value *Size = CreateTypeSize(DestTy, ElemSize);
215 if (AI->isArrayAllocation())
217 return Size;
218}
219
221 Type *STy = DstType->getScalarType();
222 if (isa<ScalableVectorType>(DstType)) {
223 Type *StepVecType = DstType;
224 // TODO: We expect this special case (element type < 8 bits) to be
225 // temporary - once the intrinsic properly supports < 8 bits this code
226 // can be removed.
227 if (STy->getScalarSizeInBits() < 8)
228 StepVecType =
230 Value *Res = CreateIntrinsic(Intrinsic::stepvector, {StepVecType}, {},
231 nullptr, Name);
232 if (StepVecType != DstType)
233 Res = CreateTrunc(Res, DstType);
234 return Res;
235 }
236
237 unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
238
239 // Create a vector of consecutive numbers from zero to VF.
240 // It's okay if the values wrap around.
242 for (unsigned i = 0; i < NumEls; ++i)
243 Indices.push_back(
244 ConstantInt::get(STy, i, /*IsSigned=*/false, /*ImplicitTrunc=*/true));
245
246 // Add the consecutive indices to the vector value.
247 return ConstantVector::get(Indices);
248}
249
251 MaybeAlign Align, bool isVolatile,
252 const AAMDNodes &AAInfo) {
253 Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
254 Type *Tys[] = {Ptr->getType(), Size->getType()};
255
256 CallInst *CI = CreateIntrinsic(Intrinsic::memset, Tys, Ops);
257
258 if (Align)
259 cast<MemSetInst>(CI)->setDestAlignment(*Align);
260 CI->setAAMetadata(AAInfo);
261 return CI;
262}
263
265 Value *Val, Value *Size,
266 bool IsVolatile,
267 const AAMDNodes &AAInfo) {
268 Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)};
269 Type *Tys[] = {Dst->getType(), Size->getType()};
270
271 CallInst *CI = CreateIntrinsic(Intrinsic::memset_inline, Tys, Ops);
272
273 if (DstAlign)
274 cast<MemSetInst>(CI)->setDestAlignment(*DstAlign);
275 CI->setAAMetadata(AAInfo);
276 return CI;
277}
278
280 Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
281 const AAMDNodes &AAInfo) {
282
283 Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
284 Type *Tys[] = {Ptr->getType(), Size->getType()};
285
286 CallInst *CI =
287 CreateIntrinsic(Intrinsic::memset_element_unordered_atomic, Tys, Ops);
288
289 cast<AnyMemSetInst>(CI)->setDestAlignment(Alignment);
290 CI->setAAMetadata(AAInfo);
291 return CI;
292}
293
295 MaybeAlign DstAlign, Value *Src,
296 MaybeAlign SrcAlign, Value *Size,
297 bool isVolatile,
298 const AAMDNodes &AAInfo) {
299 assert((IntrID == Intrinsic::memcpy || IntrID == Intrinsic::memcpy_inline ||
300 IntrID == Intrinsic::memmove) &&
301 "Unexpected intrinsic ID");
302 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
303 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
304
305 CallInst *CI = CreateIntrinsic(IntrID, Tys, Ops);
306
307 auto* MCI = cast<MemTransferInst>(CI);
308 if (DstAlign)
309 MCI->setDestAlignment(*DstAlign);
310 if (SrcAlign)
311 MCI->setSourceAlignment(*SrcAlign);
312 MCI->setAAMetadata(AAInfo);
313 return CI;
314}
315
317 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
318 uint32_t ElementSize, const AAMDNodes &AAInfo) {
319 assert(DstAlign >= ElementSize &&
320 "Pointer alignment must be at least element size");
321 assert(SrcAlign >= ElementSize &&
322 "Pointer alignment must be at least element size");
323 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
324 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
325
326 CallInst *CI =
327 CreateIntrinsic(Intrinsic::memcpy_element_unordered_atomic, Tys, Ops);
328
329 // Set the alignment of the pointer args.
330 auto *AMCI = cast<AnyMemCpyInst>(CI);
331 AMCI->setDestAlignment(DstAlign);
332 AMCI->setSourceAlignment(SrcAlign);
333 AMCI->setAAMetadata(AAInfo);
334 return CI;
335}
336
337/// isConstantOne - Return true only if val is constant int 1
338static bool isConstantOne(const Value *Val) {
339 assert(Val && "isConstantOne does not work with nullptr Val");
340 const ConstantInt *CVal = dyn_cast<ConstantInt>(Val);
341 return CVal && CVal->isOne();
342}
343
345 Value *AllocSize, Value *ArraySize,
347 Function *MallocF, const Twine &Name) {
348 // malloc(type) becomes:
349 // i8* malloc(typeSize)
350 // malloc(type, arraySize) becomes:
351 // i8* malloc(typeSize*arraySize)
352 if (!ArraySize)
353 ArraySize = ConstantInt::get(IntPtrTy, 1);
354 else if (ArraySize->getType() != IntPtrTy)
355 ArraySize = CreateIntCast(ArraySize, IntPtrTy, false);
356
357 if (!isConstantOne(ArraySize)) {
358 if (isConstantOne(AllocSize)) {
359 AllocSize = ArraySize; // Operand * 1 = Operand
360 } else {
361 // Multiply type size by the array size...
362 AllocSize = CreateMul(ArraySize, AllocSize, "mallocsize");
363 }
364 }
365
366 assert(AllocSize->getType() == IntPtrTy && "malloc arg is wrong size");
367 // Create the call to Malloc.
368 Module *M = BB->getParent()->getParent();
370 FunctionCallee MallocFunc = MallocF;
371 if (!MallocFunc)
372 // prototype malloc as "void *malloc(size_t)"
373 MallocFunc = M->getOrInsertFunction("malloc", BPTy, IntPtrTy);
374 CallInst *MCall = CreateCall(MallocFunc, AllocSize, OpB, Name);
375
376 MCall->setTailCall();
377 if (Function *F = dyn_cast<Function>(MallocFunc.getCallee())) {
378 MCall->setCallingConv(F->getCallingConv());
379 F->setReturnDoesNotAlias();
380 }
381
382 assert(!MCall->getType()->isVoidTy() && "Malloc has void return type");
383
384 return MCall;
385}
386
388 Value *AllocSize, Value *ArraySize,
389 Function *MallocF, const Twine &Name) {
390
391 return CreateMalloc(IntPtrTy, AllocTy, AllocSize, ArraySize, {}, MallocF,
392 Name);
393}
394
395/// CreateFree - Generate the IR for a call to the builtin free function.
398 assert(Source->getType()->isPointerTy() &&
399 "Can not free something of nonpointer type!");
400
401 Module *M = BB->getParent()->getParent();
402
403 Type *VoidTy = Type::getVoidTy(M->getContext());
404 Type *VoidPtrTy = PointerType::getUnqual(M->getContext());
405 // prototype free as "void free(void*)"
406 FunctionCallee FreeFunc = M->getOrInsertFunction("free", VoidTy, VoidPtrTy);
407 CallInst *Result = CreateCall(FreeFunc, Source, Bundles, "");
408 Result->setTailCall();
409 if (Function *F = dyn_cast<Function>(FreeFunc.getCallee()))
410 Result->setCallingConv(F->getCallingConv());
411
412 return Result;
413}
414
416 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
417 uint32_t ElementSize, const AAMDNodes &AAInfo) {
418 assert(DstAlign >= ElementSize &&
419 "Pointer alignment must be at least element size");
420 assert(SrcAlign >= ElementSize &&
421 "Pointer alignment must be at least element size");
422 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
423 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
424
425 CallInst *CI =
426 CreateIntrinsic(Intrinsic::memmove_element_unordered_atomic, Tys, Ops);
427
428 // Set the alignment of the pointer args.
429 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
430 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
431 CI->setAAMetadata(AAInfo);
432 return CI;
433}
434
435CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) {
436 Value *Ops[] = {Src};
437 Type *Tys[] = { Src->getType() };
438 return CreateIntrinsic(ID, Tys, Ops);
439}
440
442 Value *Ops[] = {Acc, Src};
443 return CreateIntrinsic(Intrinsic::vector_reduce_fadd, {Src->getType()}, Ops);
444}
445
447 Value *Ops[] = {Acc, Src};
448 return CreateIntrinsic(Intrinsic::vector_reduce_fmul, {Src->getType()}, Ops);
449}
450
452 return getReductionIntrinsic(Intrinsic::vector_reduce_add, Src);
453}
454
456 return getReductionIntrinsic(Intrinsic::vector_reduce_mul, Src);
457}
458
460 return getReductionIntrinsic(Intrinsic::vector_reduce_and, Src);
461}
462
464 return getReductionIntrinsic(Intrinsic::vector_reduce_or, Src);
465}
466
468 return getReductionIntrinsic(Intrinsic::vector_reduce_xor, Src);
469}
470
472 auto ID =
473 IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
474 return getReductionIntrinsic(ID, Src);
475}
476
478 auto ID =
479 IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
480 return getReductionIntrinsic(ID, Src);
481}
482
484 return getReductionIntrinsic(Intrinsic::vector_reduce_fmax, Src);
485}
486
488 return getReductionIntrinsic(Intrinsic::vector_reduce_fmin, Src);
489}
490
492 return getReductionIntrinsic(Intrinsic::vector_reduce_fmaximum, Src);
493}
494
496 return getReductionIntrinsic(Intrinsic::vector_reduce_fminimum, Src);
497}
498
501 "lifetime.start only applies to pointers.");
502 return CreateIntrinsic(Intrinsic::lifetime_start, {Ptr->getType()}, {Ptr});
503}
504
507 "lifetime.end only applies to pointers.");
508 return CreateIntrinsic(Intrinsic::lifetime_end, {Ptr->getType()}, {Ptr});
509}
510
512
514 "invariant.start only applies to pointers.");
515 if (!Size)
516 Size = getInt64(-1);
517 else
518 assert(Size->getType() == getInt64Ty() &&
519 "invariant.start requires the size to be an i64");
520
521 Value *Ops[] = {Size, Ptr};
522 // Fill in the single overloaded type: memory object type.
523 Type *ObjectPtr[1] = {Ptr->getType()};
524 return CreateIntrinsic(Intrinsic::invariant_start, ObjectPtr, Ops);
525}
526
528 if (auto *V = dyn_cast<GlobalVariable>(Ptr))
529 return V->getAlign();
530 if (auto *A = dyn_cast<GlobalAlias>(Ptr))
531 return getAlign(A->getAliaseeObject());
532 return {};
533}
534
536 assert(isa<GlobalValue>(Ptr) && cast<GlobalValue>(Ptr)->isThreadLocal() &&
537 "threadlocal_address only applies to thread local variables.");
538 CallInst *CI = CreateIntrinsic(llvm::Intrinsic::threadlocal_address,
539 {Ptr->getType()}, {Ptr});
540 if (MaybeAlign A = getAlign(Ptr)) {
543 }
544 return CI;
545}
546
547CallInst *
549 ArrayRef<OperandBundleDef> OpBundles) {
550 assert(Cond->getType() == getInt1Ty() &&
551 "an assumption condition must be of type i1");
552
553 Value *Ops[] = { Cond };
554 Module *M = BB->getParent()->getParent();
555 Function *FnAssume = Intrinsic::getOrInsertDeclaration(M, Intrinsic::assume);
556 return CreateCall(FnAssume, Ops, OpBundles);
557}
558
560 return CreateIntrinsic(Intrinsic::experimental_noalias_scope_decl, {},
561 {Scope});
562}
563
564/// Create a call to a Masked Load intrinsic.
565/// \p Ty - vector type to load
566/// \p Ptr - base pointer for the load
567/// \p Alignment - alignment of the source location
568/// \p Mask - vector of booleans which indicates what vector lanes should
569/// be accessed in memory
570/// \p PassThru - pass-through value that is used to fill the masked-off lanes
571/// of the result
572/// \p Name - name of the result variable
574 Value *Mask, Value *PassThru,
575 const Twine &Name) {
576 auto *PtrTy = cast<PointerType>(Ptr->getType());
577 assert(Ty->isVectorTy() && "Type should be vector");
578 assert(Mask && "Mask should not be all-ones (null)");
579 if (!PassThru)
580 PassThru = PoisonValue::get(Ty);
581 Type *OverloadedTypes[] = { Ty, PtrTy };
582 Value *Ops[] = {Ptr, Mask, PassThru};
583 CallInst *CI =
584 CreateMaskedIntrinsic(Intrinsic::masked_load, Ops, OverloadedTypes, Name);
585 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), Alignment));
586 return CI;
587}
588
589/// Create a call to a Masked Store intrinsic.
590/// \p Val - data to be stored,
591/// \p Ptr - base pointer for the store
592/// \p Alignment - alignment of the destination location
593/// \p Mask - vector of booleans which indicates what vector lanes should
594/// be accessed in memory
596 Align Alignment, Value *Mask) {
597 auto *PtrTy = cast<PointerType>(Ptr->getType());
598 Type *DataTy = Val->getType();
599 assert(DataTy->isVectorTy() && "Val should be a vector");
600 assert(Mask && "Mask should not be all-ones (null)");
601 Type *OverloadedTypes[] = { DataTy, PtrTy };
602 Value *Ops[] = {Val, Ptr, Mask};
603 CallInst *CI =
604 CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
605 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), Alignment));
606 return CI;
607}
608
609/// Create a call to a Masked intrinsic, with given intrinsic Id,
610/// an array of operands - Ops, and an array of overloaded types -
611/// OverloadedTypes.
612CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
614 ArrayRef<Type *> OverloadedTypes,
615 const Twine &Name) {
616 return CreateIntrinsic(Id, OverloadedTypes, Ops, {}, Name);
617}
618
619/// Create a call to a Masked Gather intrinsic.
620/// \p Ty - vector type to gather
621/// \p Ptrs - vector of pointers for loading
622/// \p Align - alignment for one element
623/// \p Mask - vector of booleans which indicates what vector lanes should
624/// be accessed in memory
625/// \p PassThru - pass-through value that is used to fill the masked-off lanes
626/// of the result
627/// \p Name - name of the result variable
629 Align Alignment, Value *Mask,
630 Value *PassThru,
631 const Twine &Name) {
632 auto *VecTy = cast<VectorType>(Ty);
633 ElementCount NumElts = VecTy->getElementCount();
634 auto *PtrsTy = cast<VectorType>(Ptrs->getType());
635 assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
636
637 if (!Mask)
638 Mask = getAllOnesMask(NumElts);
639
640 if (!PassThru)
641 PassThru = PoisonValue::get(Ty);
642
643 Type *OverloadedTypes[] = {Ty, PtrsTy};
644 Value *Ops[] = {Ptrs, Mask, PassThru};
645
646 // We specify only one type when we create this intrinsic. Types of other
647 // arguments are derived from this type.
648 CallInst *CI = CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops,
649 OverloadedTypes, Name);
650 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), Alignment));
651 return CI;
652}
653
654/// Create a call to a Masked Scatter intrinsic.
655/// \p Data - data to be stored,
656/// \p Ptrs - the vector of pointers, where the \p Data elements should be
657/// stored
658/// \p Align - alignment for one element
659/// \p Mask - vector of booleans which indicates what vector lanes should
660/// be accessed in memory
662 Align Alignment, Value *Mask) {
663 auto *PtrsTy = cast<VectorType>(Ptrs->getType());
664 auto *DataTy = cast<VectorType>(Data->getType());
665 ElementCount NumElts = PtrsTy->getElementCount();
666
667 if (!Mask)
668 Mask = getAllOnesMask(NumElts);
669
670 Type *OverloadedTypes[] = {DataTy, PtrsTy};
671 Value *Ops[] = {Data, Ptrs, Mask};
672
673 // We specify only one type when we create this intrinsic. Types of other
674 // arguments are derived from this type.
675 CallInst *CI =
676 CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
677 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), Alignment));
678 return CI;
679}
680
681/// Create a call to Masked Expand Load intrinsic
682/// \p Ty - vector type to load
683/// \p Ptr - base pointer for the load
684/// \p Align - alignment of \p Ptr
685/// \p Mask - vector of booleans which indicates what vector lanes should
686/// be accessed in memory
687/// \p PassThru - pass-through value that is used to fill the masked-off lanes
688/// of the result
689/// \p Name - name of the result variable
691 MaybeAlign Align, Value *Mask,
692 Value *PassThru,
693 const Twine &Name) {
694 assert(Ty->isVectorTy() && "Type should be vector");
695 assert(Mask && "Mask should not be all-ones (null)");
696 if (!PassThru)
697 PassThru = PoisonValue::get(Ty);
698 Type *OverloadedTypes[] = {Ty};
699 Value *Ops[] = {Ptr, Mask, PassThru};
700 CallInst *CI = CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
701 OverloadedTypes, Name);
702 if (Align)
704 return CI;
705}
706
707/// Create a call to Masked Compress Store intrinsic
708/// \p Val - data to be stored,
709/// \p Ptr - base pointer for the store
710/// \p Align - alignment of \p Ptr
711/// \p Mask - vector of booleans which indicates what vector lanes should
712/// be accessed in memory
715 Value *Mask) {
716 Type *DataTy = Val->getType();
717 assert(DataTy->isVectorTy() && "Val should be a vector");
718 assert(Mask && "Mask should not be all-ones (null)");
719 Type *OverloadedTypes[] = {DataTy};
720 Value *Ops[] = {Val, Ptr, Mask};
721 CallInst *CI = CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
722 OverloadedTypes);
723 if (Align)
725 return CI;
726}
727
728template <typename T0>
729static std::vector<Value *>
731 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
732 std::vector<Value *> Args;
733 Args.push_back(B.getInt64(ID));
734 Args.push_back(B.getInt32(NumPatchBytes));
735 Args.push_back(ActualCallee);
736 Args.push_back(B.getInt32(CallArgs.size()));
737 Args.push_back(B.getInt32(Flags));
738 llvm::append_range(Args, CallArgs);
739 // GC Transition and Deopt args are now always handled via operand bundle.
740 // They will be removed from the signature of gc.statepoint shortly.
741 Args.push_back(B.getInt32(0));
742 Args.push_back(B.getInt32(0));
743 // GC args are now encoded in the gc-live operand bundle
744 return Args;
745}
746
747template<typename T1, typename T2, typename T3>
748static std::vector<OperandBundleDef>
749getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs,
750 std::optional<ArrayRef<T2>> DeoptArgs,
751 ArrayRef<T3> GCArgs) {
752 std::vector<OperandBundleDef> Rval;
753 if (DeoptArgs)
754 Rval.emplace_back("deopt", SmallVector<Value *, 16>(*DeoptArgs));
755 if (TransitionArgs)
756 Rval.emplace_back("gc-transition",
757 SmallVector<Value *, 16>(*TransitionArgs));
758 if (GCArgs.size())
759 Rval.emplace_back("gc-live", SmallVector<Value *, 16>(GCArgs));
760 return Rval;
761}
762
763template <typename T0, typename T1, typename T2, typename T3>
765 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
766 FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
767 std::optional<ArrayRef<T1>> TransitionArgs,
768 std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
769 const Twine &Name) {
770 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
771 // Fill in the one generic type'd argument (the function is also vararg)
773 M, Intrinsic::experimental_gc_statepoint,
774 {ActualCallee.getCallee()->getType()});
775
776 std::vector<Value *> Args = getStatepointArgs(
777 *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs);
778
779 CallInst *CI = Builder->CreateCall(
780 FnStatepoint, Args,
781 getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
782 CI->addParamAttr(2,
783 Attribute::get(Builder->getContext(), Attribute::ElementType,
784 ActualCallee.getFunctionType()));
785 return CI;
786}
787
789 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
790 ArrayRef<Value *> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
791 ArrayRef<Value *> GCArgs, const Twine &Name) {
793 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
794 CallArgs, std::nullopt /* No Transition Args */, DeoptArgs, GCArgs, Name);
795}
796
798 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
799 uint32_t Flags, ArrayRef<Value *> CallArgs,
800 std::optional<ArrayRef<Use>> TransitionArgs,
801 std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
802 const Twine &Name) {
804 this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
805 DeoptArgs, GCArgs, Name);
806}
807
809 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
810 ArrayRef<Use> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
811 ArrayRef<Value *> GCArgs, const Twine &Name) {
813 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
814 CallArgs, std::nullopt, DeoptArgs, GCArgs, Name);
815}
816
817template <typename T0, typename T1, typename T2, typename T3>
819 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
820 FunctionCallee ActualInvokee, BasicBlock *NormalDest,
821 BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs,
822 std::optional<ArrayRef<T1>> TransitionArgs,
823 std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
824 const Twine &Name) {
825 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
826 // Fill in the one generic type'd argument (the function is also vararg)
828 M, Intrinsic::experimental_gc_statepoint,
829 {ActualInvokee.getCallee()->getType()});
830
831 std::vector<Value *> Args =
832 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(),
833 Flags, InvokeArgs);
834
835 InvokeInst *II = Builder->CreateInvoke(
836 FnStatepoint, NormalDest, UnwindDest, Args,
837 getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
838 II->addParamAttr(2,
839 Attribute::get(Builder->getContext(), Attribute::ElementType,
840 ActualInvokee.getFunctionType()));
841 return II;
842}
843
845 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
846 BasicBlock *NormalDest, BasicBlock *UnwindDest,
847 ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
848 ArrayRef<Value *> GCArgs, const Twine &Name) {
850 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
851 uint32_t(StatepointFlags::None), InvokeArgs,
852 std::nullopt /* No Transition Args*/, DeoptArgs, GCArgs, Name);
853}
854
856 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
857 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
858 ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs,
859 std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
860 const Twine &Name) {
862 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
863 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
864}
865
867 uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
868 BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
869 std::optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs,
870 const Twine &Name) {
872 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
873 uint32_t(StatepointFlags::None), InvokeArgs, std::nullopt, DeoptArgs,
874 GCArgs, Name);
875}
876
878 Type *ResultType, const Twine &Name) {
879 Intrinsic::ID ID = Intrinsic::experimental_gc_result;
880 Type *Types[] = {ResultType};
881
882 Value *Args[] = {Statepoint};
883 return CreateIntrinsic(ID, Types, Args, {}, Name);
884}
885
887 int BaseOffset, int DerivedOffset,
888 Type *ResultType, const Twine &Name) {
889 Type *Types[] = {ResultType};
890
891 Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)};
892 return CreateIntrinsic(Intrinsic::experimental_gc_relocate, Types, Args, {},
893 Name);
894}
895
897 const Twine &Name) {
898 Type *PtrTy = DerivedPtr->getType();
899 return CreateIntrinsic(Intrinsic::experimental_gc_get_pointer_base,
900 {PtrTy, PtrTy}, {DerivedPtr}, {}, Name);
901}
902
904 const Twine &Name) {
905 Type *PtrTy = DerivedPtr->getType();
906 return CreateIntrinsic(Intrinsic::experimental_gc_get_pointer_offset, {PtrTy},
907 {DerivedPtr}, {}, Name);
908}
909
912 const Twine &Name) {
913 Module *M = BB->getModule();
914 Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, {V->getType()});
915 return createCallHelper(Fn, {V}, Name, FMFSource);
916}
917
920 const Twine &Name) {
921 Module *M = BB->getModule();
922 Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, {LHS->getType()});
923 if (Value *V = Folder.FoldBinaryIntrinsic(ID, LHS, RHS, Fn->getReturnType(),
924 /*FMFSource=*/nullptr))
925 return V;
926 return createCallHelper(Fn, {LHS, RHS}, Name, FMFSource);
927}
928
930 ArrayRef<Type *> Types,
933 const Twine &Name) {
934 Module *M = BB->getModule();
936 return createCallHelper(Fn, Args, Name, FMFSource);
937}
938
942 const Twine &Name) {
943 Module *M = BB->getModule();
944
945 SmallVector<Type *> ArgTys;
946 ArgTys.reserve(Args.size());
947 for (auto &I : Args)
948 ArgTys.push_back(I->getType());
949
950 Function *Fn = Intrinsic::getOrInsertDeclaration(M, ID, RetTy, ArgTys);
951 return createCallHelper(Fn, Args, Name, FMFSource);
952}
953
956 const Twine &Name, MDNode *FPMathTag, std::optional<RoundingMode> Rounding,
957 std::optional<fp::ExceptionBehavior> Except) {
958 Value *RoundingV = getConstrainedFPRounding(Rounding);
959 Value *ExceptV = getConstrainedFPExcept(Except);
960
961 FastMathFlags UseFMF = FMFSource.get(FMF);
962
963 CallInst *C = CreateIntrinsic(ID, {L->getType()},
964 {L, R, RoundingV, ExceptV}, nullptr, Name);
966 setFPAttrs(C, FPMathTag, UseFMF);
967 return C;
968}
969
972 FMFSource FMFSource, const Twine &Name, MDNode *FPMathTag,
973 std::optional<RoundingMode> Rounding,
974 std::optional<fp::ExceptionBehavior> Except) {
975 Value *RoundingV = getConstrainedFPRounding(Rounding);
976 Value *ExceptV = getConstrainedFPExcept(Except);
977
978 FastMathFlags UseFMF = FMFSource.get(FMF);
979
980 llvm::SmallVector<Value *, 5> ExtArgs(Args);
981 ExtArgs.push_back(RoundingV);
982 ExtArgs.push_back(ExceptV);
983
984 CallInst *C = CreateIntrinsic(ID, Types, ExtArgs, nullptr, Name);
986 setFPAttrs(C, FPMathTag, UseFMF);
987 return C;
988}
989
992 const Twine &Name, MDNode *FPMathTag,
993 std::optional<fp::ExceptionBehavior> Except) {
994 Value *ExceptV = getConstrainedFPExcept(Except);
995
996 FastMathFlags UseFMF = FMFSource.get(FMF);
997
998 CallInst *C =
999 CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, nullptr, Name);
1001 setFPAttrs(C, FPMathTag, UseFMF);
1002 return C;
1003}
1004
1006 const Twine &Name, MDNode *FPMathTag) {
1008 assert(Ops.size() == 2 && "Invalid number of operands!");
1009 return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
1010 Ops[0], Ops[1], Name, FPMathTag);
1011 }
1013 assert(Ops.size() == 1 && "Invalid number of operands!");
1014 return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
1015 Ops[0], Name, FPMathTag);
1016 }
1017 llvm_unreachable("Unexpected opcode!");
1018}
1019
1022 const Twine &Name, MDNode *FPMathTag, std::optional<RoundingMode> Rounding,
1023 std::optional<fp::ExceptionBehavior> Except) {
1024 Value *ExceptV = getConstrainedFPExcept(Except);
1025
1026 FastMathFlags UseFMF = FMFSource.get(FMF);
1027
1028 CallInst *C;
1030 Value *RoundingV = getConstrainedFPRounding(Rounding);
1031 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
1032 nullptr, Name);
1033 } else
1034 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
1035 Name);
1036
1038
1040 setFPAttrs(C, FPMathTag, UseFMF);
1041 return C;
1042}
1043
1044Value *IRBuilderBase::CreateFCmpHelper(CmpInst::Predicate P, Value *LHS,
1045 Value *RHS, const Twine &Name,
1046 MDNode *FPMathTag, FMFSource FMFSource,
1047 bool IsSignaling) {
1048 if (IsFPConstrained) {
1049 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
1050 : Intrinsic::experimental_constrained_fcmp;
1051 return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
1052 }
1053
1054 if (auto *V = Folder.FoldCmp(P, LHS, RHS))
1055 return V;
1056 return Insert(
1057 setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMFSource.get(FMF)),
1058 Name);
1059}
1060
1063 const Twine &Name, std::optional<fp::ExceptionBehavior> Except) {
1064 Value *PredicateV = getConstrainedFPPredicate(P);
1065 Value *ExceptV = getConstrainedFPExcept(Except);
1066
1067 CallInst *C = CreateIntrinsic(ID, {L->getType()},
1068 {L, R, PredicateV, ExceptV}, nullptr, Name);
1070 return C;
1071}
1072
1074 Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
1075 std::optional<RoundingMode> Rounding,
1076 std::optional<fp::ExceptionBehavior> Except) {
1077 llvm::SmallVector<Value *, 6> UseArgs(Args);
1078
1079 if (Intrinsic::hasConstrainedFPRoundingModeOperand(Callee->getIntrinsicID()))
1080 UseArgs.push_back(getConstrainedFPRounding(Rounding));
1081 UseArgs.push_back(getConstrainedFPExcept(Except));
1082
1083 CallInst *C = CreateCall(Callee, UseArgs, Name);
1085 return C;
1086}
1087
1089 Value *False,
1091 const Twine &Name) {
1092 Value *Ret = CreateSelectFMF(C, True, False, {}, Name);
1093 if (auto *SI = dyn_cast<SelectInst>(Ret)) {
1095 }
1096 return Ret;
1097}
1098
1100 Value *False,
1103 const Twine &Name) {
1104 Value *Ret = CreateSelectFMF(C, True, False, FMFSource, Name);
1105 if (auto *SI = dyn_cast<SelectInst>(Ret))
1107 return Ret;
1108}
1109
1111 const Twine &Name, Instruction *MDFrom) {
1112 return CreateSelectFMF(C, True, False, {}, Name, MDFrom);
1113}
1114
1116 FMFSource FMFSource, const Twine &Name,
1117 Instruction *MDFrom) {
1118 if (auto *V = Folder.FoldSelect(C, True, False))
1119 return V;
1120
1121 SelectInst *Sel = SelectInst::Create(C, True, False);
1122 if (MDFrom) {
1123 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
1124 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
1125 Sel = addBranchMetadata(Sel, Prof, Unpred);
1126 }
1127 if (isa<FPMathOperator>(Sel))
1128 setFPAttrs(Sel, /*MDNode=*/nullptr, FMFSource.get(FMF));
1129 return Insert(Sel, Name);
1130}
1131
1133 assert(LHS->getType() == RHS->getType() &&
1134 "Pointer subtraction operand types must match!");
1135 Value *LHSAddr = CreatePtrToAddr(LHS);
1136 Value *RHSAddr = CreatePtrToAddr(RHS);
1137 return CreateSub(LHSAddr, RHSAddr, Name);
1138}
1140 const Twine &Name) {
1141 const DataLayout &DL = BB->getDataLayout();
1142 TypeSize ElemSize = DL.getTypeAllocSize(ElemTy);
1143 if (ElemSize == TypeSize::getFixed(1))
1144 return CreatePtrDiff(LHS, RHS, Name);
1145
1146 Value *Diff = CreatePtrDiff(LHS, RHS);
1147 return CreateExactSDiv(Diff, CreateTypeSize(Diff->getType(), ElemSize), Name);
1148}
1149
1152 "launder.invariant.group only applies to pointers.");
1153 auto *PtrType = Ptr->getType();
1154 Module *M = BB->getParent()->getParent();
1155 Function *FnLaunderInvariantGroup = Intrinsic::getOrInsertDeclaration(
1156 M, Intrinsic::launder_invariant_group, {PtrType});
1157
1158 assert(FnLaunderInvariantGroup->getReturnType() == PtrType &&
1159 FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
1160 PtrType &&
1161 "LaunderInvariantGroup should take and return the same type");
1162
1163 return CreateCall(FnLaunderInvariantGroup, {Ptr});
1164}
1165
1168 "strip.invariant.group only applies to pointers.");
1169
1170 auto *PtrType = Ptr->getType();
1171 Module *M = BB->getParent()->getParent();
1172 Function *FnStripInvariantGroup = Intrinsic::getOrInsertDeclaration(
1173 M, Intrinsic::strip_invariant_group, {PtrType});
1174
1175 assert(FnStripInvariantGroup->getReturnType() == PtrType &&
1176 FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
1177 PtrType &&
1178 "StripInvariantGroup should take and return the same type");
1179
1180 return CreateCall(FnStripInvariantGroup, {Ptr});
1181}
1182
1184 auto *Ty = cast<VectorType>(V->getType());
1185 if (isa<ScalableVectorType>(Ty)) {
1186 Module *M = BB->getParent()->getParent();
1187 Function *F =
1188 Intrinsic::getOrInsertDeclaration(M, Intrinsic::vector_reverse, Ty);
1189 return Insert(CallInst::Create(F, V), Name);
1190 }
1191 // Keep the original behaviour for fixed vector
1192 SmallVector<int, 8> ShuffleMask;
1193 int NumElts = Ty->getElementCount().getKnownMinValue();
1194 for (int i = 0; i < NumElts; ++i)
1195 ShuffleMask.push_back(NumElts - i - 1);
1196 return CreateShuffleVector(V, ShuffleMask, Name);
1197}
1198
1199static SmallVector<int, 8> getSpliceMask(int64_t Imm, unsigned NumElts) {
1200 unsigned Idx = (NumElts + Imm) % NumElts;
1202 for (unsigned I = 0; I < NumElts; ++I)
1203 Mask.push_back(Idx + I);
1204 return Mask;
1205}
1206
1208 Value *Offset, const Twine &Name) {
1209 assert(isa<VectorType>(V1->getType()) && "Unexpected type");
1210 assert(V1->getType() == V2->getType() &&
1211 "Splice expects matching operand types!");
1212
1213 // Emit a shufflevector for fixed vectors with a constant offset
1214 if (auto *COffset = dyn_cast<ConstantInt>(Offset))
1215 if (auto *FVTy = dyn_cast<FixedVectorType>(V1->getType()))
1216 return CreateShuffleVector(
1217 V1, V2,
1218 getSpliceMask(COffset->getZExtValue(), FVTy->getNumElements()));
1219
1220 return CreateIntrinsic(Intrinsic::vector_splice_left, V1->getType(),
1221 {V1, V2, Offset}, {}, Name);
1222}
1223
1225 Value *Offset,
1226 const Twine &Name) {
1227 assert(isa<VectorType>(V1->getType()) && "Unexpected type");
1228 assert(V1->getType() == V2->getType() &&
1229 "Splice expects matching operand types!");
1230
1231 // Emit a shufflevector for fixed vectors with a constant offset
1232 if (auto *COffset = dyn_cast<ConstantInt>(Offset))
1233 if (auto *FVTy = dyn_cast<FixedVectorType>(V1->getType()))
1234 return CreateShuffleVector(
1235 V1, V2,
1236 getSpliceMask(-COffset->getZExtValue(), FVTy->getNumElements()));
1237
1238 return CreateIntrinsic(Intrinsic::vector_splice_right, V1->getType(),
1239 {V1, V2, Offset}, {}, Name);
1240}
1241
1243 const Twine &Name) {
1244 auto EC = ElementCount::getFixed(NumElts);
1245 return CreateVectorSplat(EC, V, Name);
1246}
1247
1249 const Twine &Name) {
1250 assert(EC.isNonZero() && "Cannot splat to an empty vector!");
1251
1252 // First insert it into a poison vector so we can shuffle it.
1253 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
1254 V = CreateInsertElement(Poison, V, getInt64(0), Name + ".splatinsert");
1255
1256 // Shuffle the value across the desired number of elements.
1258 Zeros.resize(EC.getKnownMinValue());
1259 return CreateShuffleVector(V, Zeros, Name + ".splat");
1260}
1261
1263 const Twine &Name) {
1264 assert(Ops.size() >= 2 && Ops.size() <= 8 &&
1265 "Unexpected number of operands to interleave");
1266
1267 // Make sure all operands are the same type.
1268 assert(isa<VectorType>(Ops[0]->getType()) && "Unexpected type");
1269
1270#ifndef NDEBUG
1271 for (unsigned I = 1; I < Ops.size(); I++) {
1272 assert(Ops[I]->getType() == Ops[0]->getType() &&
1273 "Vector interleave expects matching operand types!");
1274 }
1275#endif
1276
1277 unsigned IID = Intrinsic::getInterleaveIntrinsicID(Ops.size());
1278 auto *SubvecTy = cast<VectorType>(Ops[0]->getType());
1279 Type *DestTy = VectorType::get(SubvecTy->getElementType(),
1280 SubvecTy->getElementCount() * Ops.size());
1281 return CreateIntrinsic(IID, {DestTy}, Ops, {}, Name);
1282}
1283
1285 unsigned Dimension,
1286 unsigned LastIndex,
1287 MDNode *DbgInfo) {
1288 auto *BaseType = Base->getType();
1290 "Invalid Base ptr type for preserve.array.access.index.");
1291
1292 Value *LastIndexV = getInt32(LastIndex);
1293 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1294 SmallVector<Value *, 4> IdxList(Dimension, Zero);
1295 IdxList.push_back(LastIndexV);
1296
1297 Type *ResultType = GetElementPtrInst::getGEPReturnType(Base, IdxList);
1298
1299 Value *DimV = getInt32(Dimension);
1300 CallInst *Fn =
1301 CreateIntrinsic(Intrinsic::preserve_array_access_index,
1302 {ResultType, BaseType}, {Base, DimV, LastIndexV});
1303 Fn->addParamAttr(
1304 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1305 if (DbgInfo)
1306 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1307
1308 return Fn;
1309}
1310
1312 Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
1313 assert(isa<PointerType>(Base->getType()) &&
1314 "Invalid Base ptr type for preserve.union.access.index.");
1315 auto *BaseType = Base->getType();
1316
1317 Value *DIIndex = getInt32(FieldIndex);
1318 CallInst *Fn = CreateIntrinsic(Intrinsic::preserve_union_access_index,
1319 {BaseType, BaseType}, {Base, DIIndex});
1320 if (DbgInfo)
1321 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1322
1323 return Fn;
1324}
1325
1327 Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
1328 MDNode *DbgInfo) {
1329 auto *BaseType = Base->getType();
1331 "Invalid Base ptr type for preserve.struct.access.index.");
1332
1333 Value *GEPIndex = getInt32(Index);
1334 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1335 Type *ResultType =
1336 GetElementPtrInst::getGEPReturnType(Base, {Zero, GEPIndex});
1337
1338 Value *DIIndex = getInt32(FieldIndex);
1339 CallInst *Fn =
1340 CreateIntrinsic(Intrinsic::preserve_struct_access_index,
1341 {ResultType, BaseType}, {Base, GEPIndex, DIIndex});
1342 Fn->addParamAttr(
1343 0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1344 if (DbgInfo)
1345 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1346
1347 return Fn;
1348}
1349
1351 ConstantInt *TestV = getInt32(Test);
1352 return CreateIntrinsic(Intrinsic::is_fpclass, {FPNum->getType()},
1353 {FPNum, TestV});
1354}
1355
1356CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
1357 Value *PtrValue,
1358 Value *AlignValue,
1359 Value *OffsetValue) {
1360 SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
1361 if (OffsetValue)
1362 Vals.push_back(OffsetValue);
1363 OperandBundleDefT<Value *> AlignOpB("align", Vals);
1364 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
1365}
1366
1368 Value *PtrValue,
1369 unsigned Alignment,
1370 Value *OffsetValue) {
1371 assert(isa<PointerType>(PtrValue->getType()) &&
1372 "trying to create an alignment assumption on a non-pointer?");
1373 assert(Alignment != 0 && "Invalid Alignment");
1374 auto *PtrTy = cast<PointerType>(PtrValue->getType());
1375 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1376 Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
1377 return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
1378}
1379
1381 Value *PtrValue,
1382 Value *Alignment,
1383 Value *OffsetValue) {
1384 assert(isa<PointerType>(PtrValue->getType()) &&
1385 "trying to create an alignment assumption on a non-pointer?");
1386 return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
1387}
1388
1390 Value *SizeValue) {
1391 assert(isa<PointerType>(PtrValue->getType()) &&
1392 "trying to create an deferenceable assumption on a non-pointer?");
1393 SmallVector<Value *, 4> Vals({PtrValue, SizeValue});
1394 OperandBundleDefT<Value *> DereferenceableOpB("dereferenceable", Vals);
1396 {DereferenceableOpB});
1397}
1398
1402void ConstantFolder::anchor() {}
1403void NoFolder::anchor() {}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
static bool isConstantOne(const Value *Val)
isConstantOne - Return true only if val is constant int 1
static InvokeInst * CreateGCStatepointInvokeCommon(IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, ArrayRef< T0 > InvokeArgs, std::optional< ArrayRef< T1 > > TransitionArgs, std::optional< ArrayRef< T2 > > DeoptArgs, ArrayRef< T3 > GCArgs, const Twine &Name)
static CallInst * CreateGCStatepointCallCommon(IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, uint32_t Flags, ArrayRef< T0 > CallArgs, std::optional< ArrayRef< T1 > > TransitionArgs, std::optional< ArrayRef< T2 > > DeoptArgs, ArrayRef< T3 > GCArgs, const Twine &Name)
static Value * CreateVScaleMultiple(IRBuilderBase &B, Type *Ty, uint64_t Scale)
static std::vector< OperandBundleDef > getStatepointBundles(std::optional< ArrayRef< T1 > > TransitionArgs, std::optional< ArrayRef< T2 > > DeoptArgs, ArrayRef< T3 > GCArgs)
static std::vector< Value * > getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags, ArrayRef< T0 > CallArgs)
static SmallVector< int, 8 > getSpliceMask(int64_t Imm, unsigned NumElts)
Module.h This file contains the declarations for the Module class.
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
#define F(x, y, z)
Definition MD5.cpp:54
#define I(x, y, z)
Definition MD5.cpp:57
uint64_t IntrinsicInst * II
#define P(N)
This file contains the declarations for profiling metadata utility functions.
const SmallVectorImpl< MachineOperand > & Cond
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
static const char PassName[]
Value * RHS
Value * LHS
an instruction to allocate memory on the stack
Type * getAllocatedType() const
Return the type that is being allocated by the instruction.
LLVM_ABI bool isArrayAllocation() const
Return true if there is an allocation size parameter to the allocation instruction that is not 1.
const Value * getArraySize() const
Get the number of elements allocated.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
size_t size() const
size - Get the array size.
Definition ArrayRef.h:142
static LLVM_ABI Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static LLVM_ABI Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
Definition BasicBlock.h:62
void setCallingConv(CallingConv::ID CC)
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
void setTailCall(bool IsTc=true)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
static LLVM_ABI Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
This is the shared class of boolean and integer constants.
Definition Constants.h:87
bool isOne() const
This is just a convenience method to make client code smaller for a common case.
Definition Constants.h:225
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
A debug info location.
Definition DebugLoc.h:123
static constexpr ElementCount getFixed(ScalarTy MinVal)
Definition TypeSize.h:309
This instruction compares its operands according to the predicate given to the constructor.
This provides a helper for copying FMF from an instruction or setting specified flags.
Definition IRBuilder.h:93
FastMathFlags get(FastMathFlags Default) const
Definition IRBuilder.h:103
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
FunctionType * getFunctionType()
Type * getParamType(unsigned i) const
Parameter type accessors.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Definition Function.h:211
Type * getReturnType() const
Returns the type of the ret val.
Definition Function.h:216
static Type * getGEPReturnType(Value *Ptr, ArrayRef< Value * > IdxList)
Returns the pointer type returned by the GEP instruction, which may be a vector of pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
Definition GlobalValue.h:61
Common base class shared among various IRBuilders.
Definition IRBuilder.h:114
Value * CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name="")
Definition IRBuilder.h:1479
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
Definition IRBuilder.h:497
BasicBlock * BB
Definition IRBuilder.h:146
LLVM_ABI CallInst * CreateMulReduce(Value *Src)
Create a vector int mul reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateFAddReduce(Value *Acc, Value *Src)
Create a sequential vector fadd reduction intrinsic of the source vector.
LLVM_ABI Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
CallInst * CreateExtractVector(Type *DstType, Value *SrcVec, Value *Idx, const Twine &Name="")
Create a call to the vector.extract intrinsic.
Definition IRBuilder.h:1096
LLVM_ABI Value * CreateSelectFMFWithUnknownProfile(Value *C, Value *True, Value *False, FMFSource FMFSource, StringRef PassName, const Twine &Name="")
LLVM_ABI CallInst * CreateConstrainedFPUnroundedBinOp(Intrinsic::ID ID, Value *L, Value *R, FMFSource FMFSource={}, const Twine &Name="", MDNode *FPMathTag=nullptr, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
Definition IRBuilder.h:2561
LLVM_ABI CallInst * CreateThreadLocalAddress(Value *Ptr)
Create a call to llvm.threadlocal.address intrinsic.
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Definition IRBuilder.h:546
LLVM_ABI CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
Value * CreateInsertValue(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2615
LLVM_ABI Value * CreateAllocationSize(Type *DestTy, AllocaInst *AI)
Get allocation size of an alloca as a runtime Value* (handles both static and dynamic allocas and vsc...
LLVM_ABI Type * getCurrentFunctionReturnType() const
Get the return type of the current function that we're emitting into.
Definition IRBuilder.cpp:59
LLVM_ABI CallInst * CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name="")
Create a call to the experimental.gc.pointer.base intrinsic to get the base pointer for the specified...
LLVM_ABI CallInst * CreateLifetimeStart(Value *Ptr)
Create a lifetime.start intrinsic.
CallInst * CreateInsertVector(Type *DstType, Value *SrcVec, Value *SubVec, Value *Idx, const Twine &Name="")
Create a call to the vector.insert intrinsic.
Definition IRBuilder.h:1110
LLVM_ABI CallInst * CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, ArrayRef< Value * > CallArgs, std::optional< ArrayRef< Value * > > DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create a call to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
LLVM_ABI Value * CreateVectorSpliceRight(Value *V1, Value *V2, Value *Offset, const Twine &Name="")
Create a vector.splice.right intrinsic call, or a shufflevector that produces the same result if the ...
LLVM_ABI CallInst * CreateLifetimeEnd(Value *Ptr)
Create a lifetime.end intrinsic.
Value * CreateZExtOrTrunc(Value *V, Type *DestTy, const Twine &Name="")
Create a ZExt or Trunc from the integer value V to DestTy.
Definition IRBuilder.h:2072
LLVM_ABI CallInst * CreateConstrainedFPCmp(Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, const Twine &Name="", std::optional< fp::ExceptionBehavior > Except=std::nullopt)
LLVM_ABI Value * CreateSelectFMF(Value *C, Value *True, Value *False, FMFSource FMFSource, const Twine &Name="", Instruction *MDFrom=nullptr)
LLVM_ABI CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateAssumption(Value *Cond, ArrayRef< OperandBundleDef > OpBundles={})
Create an assume intrinsic call that allows the optimizer to assume that the provided condition will ...
Value * CreatePtrToAddr(Value *V, const Twine &Name="")
Definition IRBuilder.h:2162
LLVM_ABI Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
Value * CreateExtractValue(Value *Agg, ArrayRef< unsigned > Idxs, const Twine &Name="")
Definition IRBuilder.h:2608
LLVM_ABI Value * CreatePreserveStructAccessIndex(Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, MDNode *DbgInfo)
LLVM_ABI CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
LLVM_ABI CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
LLVM_ABI CallInst * CreateConstrainedFPCall(Function *Callee, ArrayRef< Value * > Args, const Twine &Name="", std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
LLVMContext & Context
Definition IRBuilder.h:148
LLVM_ABI Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
LLVM_ABI CallInst * CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name="")
Create a call to the experimental.gc.get.pointer.offset intrinsic to get the offset of the specified ...
Value * CreateIntToPtr(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2171
LLVM_ABI CallInst * CreateAddReduce(Value *Src)
Create a vector int add reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateConstrainedFPBinOp(Intrinsic::ID ID, Value *L, Value *R, FMFSource FMFSource={}, const Twine &Name="", MDNode *FPMathTag=nullptr, std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
IntegerType * getIntPtrTy(const DataLayout &DL, unsigned AddrSpace=0)
Fetch the type of an integer with size at least as big as that of a pointer in the given address spac...
Definition IRBuilder.h:610
LLVM_ABI Value * CreateAggregateCast(Value *V, Type *DestTy)
Cast between aggregate types that must have identical structure but may differ in their leaf types.
Definition IRBuilder.cpp:72
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
Definition IRBuilder.h:566
LLVM_ABI CallInst * CreateElementUnorderedAtomicMemMove(Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, uint32_t ElementSize, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert an element unordered-atomic memmove between the specified pointers.
LLVM_ABI Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
LLVM_ABI CallInst * CreateXorReduce(Value *Src)
Create a vector int XOR reduction intrinsic of the source vector.
FastMathFlags FMF
Definition IRBuilder.h:153
LLVM_ABI Value * CreateBitPreservingCastChain(const DataLayout &DL, Value *V, Type *NewTy)
Create a chain of casts to convert V to NewTy, preserving the bit pattern of V.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Definition IRBuilder.h:527
LLVM_ABI Value * CreateVectorSpliceLeft(Value *V1, Value *V2, Value *Offset, const Twine &Name="")
Create a vector.splice.left intrinsic call, or a shufflevector that produces the same result if the r...
Value * getAllOnesMask(ElementCount NumElts)
Return an all true boolean vector (mask) with NumElts lanes.
Definition IRBuilder.h:861
Value * CreateUnOp(Instruction::UnaryOps Opc, Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1816
LLVM_ABI CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateMalloc(Type *IntPtrTy, Type *AllocTy, Value *AllocSize, Value *ArraySize, ArrayRef< OperandBundleDef > OpB, Function *MallocF=nullptr, const Twine &Name="")
LLVM_ABI CallInst * CreateFPMinReduce(Value *Src)
Create a vector float min reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateFPMaximumReduce(Value *Src)
Create a vector float maximum reduction intrinsic of the source vector.
LLVM_ABI Value * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
LLVM_ABI Value * createIsFPClass(Value *FPNum, unsigned Test)
LLVM_ABI CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
LLVM_ABI CallInst * CreateFPMaxReduce(Value *Src)
Create a vector float max reduction intrinsic of the source vector.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Definition IRBuilder.h:522
LLVM_ABI CallInst * CreateFree(Value *Source, ArrayRef< OperandBundleDef > Bundles={})
Generate the IR for a call to the builtin free function.
Value * CreateBitOrPointerCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2258
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Definition IRBuilder.h:172
LLVM_ABI DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Definition IRBuilder.cpp:64
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1423
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2176
LLVM_ABI CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, FMFSource FMFSource={}, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
CallInst * CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, uint64_t Size, Align Alignment, uint32_t ElementSize, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert an element unordered-atomic memset of the region of memory starting at the given po...
Definition IRBuilder.h:650
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert a memset to the specified pointer and the specified value.
Definition IRBuilder.h:629
LLVM_ABI Value * CreateNAryOp(unsigned Opc, ArrayRef< Value * > Ops, const Twine &Name="", MDNode *FPMathTag=nullptr)
Create either a UnaryOperator or BinaryOperator depending on Opc.
LLVM_ABI CallInst * CreateConstrainedFPIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, FMFSource FMFSource, const Twine &Name, MDNode *FPMathTag=nullptr, std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
This function is like CreateIntrinsic for constrained fp intrinsics.
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
Definition IRBuilder.h:2583
LLVMContext & getContext() const
Definition IRBuilder.h:203
LLVM_ABI Value * CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex, MDNode *DbgInfo)
LLVM_ABI CallInst * CreateIntMaxReduce(Value *Src, bool IsSigned=false)
Create a vector integer max reduction intrinsic of the source vector.
LLVM_ABI Value * CreateSelectWithUnknownProfile(Value *C, Value *True, Value *False, StringRef PassName, const Twine &Name="")
LLVM_ABI CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
Definition IRBuilder.h:2166
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args={}, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:2487
LLVM_ABI CallInst * CreateGCResult(Instruction *Statepoint, Type *ResultType, const Twine &Name="")
Create a call to the experimental.gc.result intrinsic to extract the result from a call wrapped in a ...
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="", bool IsNUW=false, bool IsNSW=false)
Definition IRBuilder.h:2040
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition IRBuilder.h:1711
LLVM_ABI Value * CreatePtrDiff(Value *LHS, Value *RHS, const Twine &Name="")
Return the difference between two pointer values.
LLVM_ABI Value * CreateTypeSize(Type *Ty, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
LLVM_ABI CallInst * CreateDereferenceableAssumption(Value *PtrValue, Value *SizeValue)
Create an assume intrinsic call that represents an dereferencable assumption on the provided pointer.
Value * CreateIntCast(Value *V, Type *DestTy, bool isSigned, const Twine &Name="")
Definition IRBuilder.h:2249
LLVM_ABI CallInst * CreateIntMinReduce(Value *Src, bool IsSigned=false)
Create a vector integer min reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateElementUnorderedAtomicMemCpy(Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, uint32_t ElementSize, const AAMDNodes &AAInfo=AAMDNodes())
Create and insert an element unordered-atomic memcpy between the specified pointers.
void setConstrainedFPCallAttr(CallBase *I)
Definition IRBuilder.h:395
LLVM_ABI InvokeInst * CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > InvokeArgs, std::optional< ArrayRef< Value * > > DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create an invoke to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
LLVM_ABI CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, MaybeAlign Align, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
const IRBuilderFolder & Folder
Definition IRBuilder.h:149
LLVM_ABI CallInst * CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool isVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI Value * CreateVectorInterleave(ArrayRef< Value * > Ops, const Twine &Name="")
LLVM_ABI CallInst * CreateFMulReduce(Value *Acc, Value *Src)
Create a sequential vector fmul reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, Value *Val, Value *Size, bool IsVolatile=false, const AAMDNodes &AAInfo=AAMDNodes())
LLVM_ABI void SetInstDebugLocation(Instruction *I) const
If this builder has a current debug location, set it on the specified instruction.
Definition IRBuilder.cpp:65
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
Definition IRBuilder.h:551
LLVM_ABI CallInst * CreateGCRelocate(Instruction *Statepoint, int BaseOffset, int DerivedOffset, Type *ResultType, const Twine &Name="")
Create a call to the experimental.gc.relocate intrinsics to project the relocated value of one pointe...
LLVM_ABI Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
LLVM_ABI Value * CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, MDNode *DbgInfo)
LLVM_ABI CallInst * CreateInvariantStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a call to invariant.start intrinsic.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition IRBuilder.h:1440
LLVM_ABI Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
LLVM_ABI CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
LLVM_ABI GlobalVariable * CreateGlobalString(StringRef Str, const Twine &Name="", unsigned AddressSpace=0, Module *M=nullptr, bool AddNull=true)
Make a new global variable with initializer type i8*.
Definition IRBuilder.cpp:44
LLVM_ABI Value * CreateElementCount(Type *Ty, ElementCount EC)
Create an expression which evaluates to the number of elements in EC at runtime.
LLVM_ABI CallInst * CreateFPMinimumReduce(Value *Src)
Create a vector float minimum reduction intrinsic of the source vector.
LLVM_ABI CallInst * CreateConstrainedFPCast(Intrinsic::ID ID, Value *V, Type *DestTy, FMFSource FMFSource={}, const Twine &Name="", MDNode *FPMathTag=nullptr, std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
LLVM_ABI Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
LLVM_ABI CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
virtual Value * FoldCmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const =0
virtual ~IRBuilderFolder()
LLVM_ABI void setAAMetadata(const AAMDNodes &N)
Sets the AA metadata on this instruction from the AAMDNodes structure.
bool isBinaryOp() const
LLVM_ABI void setFastMathFlags(FastMathFlags FMF)
Convenience function for setting multiple fast-math flags on this instruction, which must be an opera...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
LLVM_ABI void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
bool isUnaryOp() const
Invoke instruction.
Metadata node.
Definition Metadata.h:1080
A Module instance is used to store all the information related to an LLVM module.
Definition Module.h:67
A container for an operand bundle being viewed as a set of values rather than a set of uses.
static PointerType * getUnqual(Type *ElementType)
This constructs a pointer to an object of the specified type in the default address space (address sp...
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", InsertPosition InsertBefore=nullptr, const Instruction *MDFrom=nullptr)
void reserve(size_type N)
void resize(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition Twine.h:82
static constexpr TypeSize getFixed(ScalarTy ExactSize)
Definition TypeSize.h:343
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
LLVM_ABI Type * getStructElementType(unsigned N) const
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
bool isArrayTy() const
True if this is an instance of ArrayType.
Definition Type.h:264
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
Type * getArrayElementType() const
Definition Type.h:408
LLVM_ABI unsigned getStructNumElements() const
LLVM_ABI unsigned getPointerAddressSpace() const
Get the address space of this pointer or pointer vector type.
LLVM_ABI uint64_t getArrayNumElements() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Definition Type.h:352
bool isStructTy() const
True if this is an instance of StructType.
Definition Type.h:261
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:230
bool isPtrOrPtrVectorTy() const
Return true if this is a pointer type or a vector of pointer types.
Definition Type.h:270
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVMContext & getContext() const
All values hold a context through their type.
Definition Value.h:259
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
static VectorType * getWithSizeAndScalar(VectorType *SizeTy, Type *EltTy)
This static method attempts to construct a VectorType with the same size-in-bits as SizeTy but with a...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
LLVM_ABI Function * getOrInsertDeclaration(Module *M, ID id, ArrayRef< Type * > Tys={})
Look up the Function declaration of the intrinsic id in the Module M.
LLVM_ABI bool hasConstrainedFPRoundingModeOperand(ID QID)
Returns true if the intrinsic ID is for one of the "ConstrainedFloating-Point Intrinsics" that take r...
LLVM_ABI Intrinsic::ID getInterleaveIntrinsicID(unsigned Factor)
Returns the corresponding llvm.vector.interleaveN intrinsic for factor N.
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
@ Offset
Definition DWP.cpp:532
MaybeAlign getAlign(const CallInst &I, unsigned Index)
LLVM_ABI void setExplicitlyUnknownBranchWeightsIfProfiled(Instruction &I, StringRef PassName, const Function *F=nullptr)
Like setExplicitlyUnknownBranchWeights(...), but only sets unknown branch weights in the new instruct...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
Definition STLExtras.h:2208
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:547
FunctionAddr VTableAddr uintptr_t uintptr_t Data
Definition InstrProf.h:189
ArrayRef(const T &OneElt) -> ArrayRef< T >
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
A collection of metadata nodes that might be associated with a memory access used by the alias-analys...
Definition Metadata.h:763
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.
Definition Alignment.h:106