54 GV->setAlignment(
Align(1));
64 auto *PT = cast<PointerType>(
Ptr->getType());
65 if (PT->isOpaqueOrPointeeTypeMatches(
getInt8Ty()))
73 for (
auto &KV : MetadataToCopy)
74 if (KV.first == LLVMContext::MD_dbg)
75 return {cast<DILocation>(KV.second)};
80 for (
const auto &KV : MetadataToCopy)
81 if (KV.first == LLVMContext::MD_dbg) {
98 assert(isa<ConstantInt>(Scaling) &&
"Expected constant integer");
99 if (cast<ConstantInt>(Scaling)->
isZero())
105 return cast<ConstantInt>(Scaling)->isOne() ? CI :
CreateMul(CI, Scaling);
120 if (isa<ScalableVectorType>(DstType)) {
121 Type *StepVecType = DstType;
129 {StepVecType}, {},
nullptr,
Name);
130 if (StepVecType != DstType)
135 unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
139 for (
unsigned i = 0; i < NumEls; ++i)
150 Ptr = getCastedInt8PtrValue(
Ptr);
159 cast<MemSetInst>(CI)->setDestAlignment(*
Align);
166 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
169 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
176 bool IsVolatile,
MDNode *TBAATag,
179 Dst = getCastedInt8PtrValue(Dst);
181 Type *Tys[] = {Dst->getType(),
Size->getType()};
188 cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
195 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
198 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
207 Ptr = getCastedInt8PtrValue(
Ptr);
212 M, Intrinsic::memset_element_unordered_atomic, Tys);
216 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
223 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
226 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
235 Dst = getCastedInt8PtrValue(Dst);
236 Src = getCastedInt8PtrValue(Src);
239 Type *Tys[] = { Dst->getType(), Src->getType(),
Size->getType() };
245 auto* MCI = cast<MemTransferInst>(CI);
247 MCI->setDestAlignment(*DstAlign);
249 MCI->setSourceAlignment(*SrcAlign);
257 CI->
setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
260 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
263 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
272 Dst = getCastedInt8PtrValue(Dst);
273 Src = getCastedInt8PtrValue(Src);
276 Type *Tys[] = {Dst->getType(), Src->getType(),
Size->getType()};
283 auto *MCI = cast<MemCpyInlineInst>(CI);
285 MCI->setDestAlignment(*DstAlign);
287 MCI->setSourceAlignment(*SrcAlign);
291 MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
295 MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
298 MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
301 MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
310 assert(DstAlign >= ElementSize &&
311 "Pointer alignment must be at least element size");
312 assert(SrcAlign >= ElementSize &&
313 "Pointer alignment must be at least element size");
314 Dst = getCastedInt8PtrValue(Dst);
315 Src = getCastedInt8PtrValue(Src);
318 Type *Tys[] = {Dst->getType(), Src->getType(),
Size->getType()};
321 M, Intrinsic::memcpy_element_unordered_atomic, Tys);
326 auto *AMCI = cast<AtomicMemCpyInst>(CI);
327 AMCI->setDestAlignment(DstAlign);
328 AMCI->setSourceAlignment(SrcAlign);
336 CI->
setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
339 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
342 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
352 Dst = getCastedInt8PtrValue(Dst);
353 Src = getCastedInt8PtrValue(Src);
356 Type *Tys[] = { Dst->getType(), Src->getType(),
Size->getType() };
362 auto *MMI = cast<MemMoveInst>(CI);
364 MMI->setDestAlignment(*DstAlign);
366 MMI->setSourceAlignment(*SrcAlign);
373 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
376 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
385 assert(DstAlign >= ElementSize &&
386 "Pointer alignment must be at least element size");
387 assert(SrcAlign >= ElementSize &&
388 "Pointer alignment must be at least element size");
389 Dst = getCastedInt8PtrValue(Dst);
390 Src = getCastedInt8PtrValue(Src);
393 Type *Tys[] = {Dst->getType(), Src->getType(),
Size->getType()};
396 M, Intrinsic::memmove_element_unordered_atomic, Tys);
410 CI->
setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
413 CI->
setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
416 CI->
setMetadata(LLVMContext::MD_noalias, NoAliasTag);
423 Value *Ops[] = {Src};
424 Type *Tys[] = { Src->getType() };
431 Value *Ops[] = {Acc, Src};
439 Value *Ops[] = {Acc, Src};
446 return getReductionIntrinsic(Intrinsic::vector_reduce_add, Src);
450 return getReductionIntrinsic(Intrinsic::vector_reduce_mul, Src);
454 return getReductionIntrinsic(Intrinsic::vector_reduce_and, Src);
458 return getReductionIntrinsic(Intrinsic::vector_reduce_or, Src);
462 return getReductionIntrinsic(Intrinsic::vector_reduce_xor, Src);
467 IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
468 return getReductionIntrinsic(
ID, Src);
473 IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
474 return getReductionIntrinsic(
ID, Src);
478 return getReductionIntrinsic(Intrinsic::vector_reduce_fmax, Src);
482 return getReductionIntrinsic(Intrinsic::vector_reduce_fmin, Src);
486 assert(isa<PointerType>(
Ptr->getType()) &&
487 "lifetime.start only applies to pointers.");
488 Ptr = getCastedInt8PtrValue(
Ptr);
493 "lifetime.start requires the size to be an i64");
502 assert(isa<PointerType>(
Ptr->getType()) &&
503 "lifetime.end only applies to pointers.");
504 Ptr = getCastedInt8PtrValue(
Ptr);
509 "lifetime.end requires the size to be an i64");
519 assert(isa<PointerType>(
Ptr->getType()) &&
520 "invariant.start only applies to pointers.");
521 Ptr = getCastedInt8PtrValue(
Ptr);
526 "invariant.start requires the size to be an i64");
530 Type *ObjectPtr[1] = {
Ptr->getType()};
538 if (
auto *O = dyn_cast<GlobalObject>(
Ptr))
539 return O->getAlign();
540 if (
auto *
A = dyn_cast<GlobalAlias>(
Ptr))
541 return A->getAliaseeObject()->getAlign();
552 if (
auto *CE = dyn_cast<ConstantExpr>(V))
554 V = CE->getOperand(0);
556 assert(isa<GlobalValue>(V) && cast<GlobalValue>(V)->isThreadLocal() &&
557 "threadlocal_address only applies to thread local variables.");
572 "an assumption condition must be of type i1");
583 M, Intrinsic::experimental_noalias_scope_decl, {});
599 auto *PtrTy = cast<PointerType>(
Ptr->getType());
601 assert(PtrTy->isOpaqueOrPointeeTypeMatches(Ty) &&
"Wrong element type");
602 assert(Mask &&
"Mask should not be all-ones (null)");
605 Type *OverloadedTypes[] = { Ty, PtrTy };
607 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
608 OverloadedTypes,
Name);
619 auto *PtrTy = cast<PointerType>(
Ptr->getType());
622 assert(PtrTy->isOpaqueOrPointeeTypeMatches(DataTy) &&
"Wrong element type");
623 assert(Mask &&
"Mask should not be all-ones (null)");
624 Type *OverloadedTypes[] = { DataTy, PtrTy };
626 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
654 auto *VecTy = cast<VectorType>(Ty);
656 auto *PtrsTy = cast<VectorType>(Ptrs->
getType());
657 assert(cast<PointerType>(PtrsTy->getElementType())
658 ->isOpaqueOrPointeeTypeMatches(
659 cast<VectorType>(Ty)->getElementType()) &&
660 "Element type mismatch");
661 assert(NumElts == PtrsTy->getElementCount() &&
"Element count mismatch");
670 Type *OverloadedTypes[] = {Ty, PtrsTy};
675 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
688 auto *PtrsTy = cast<VectorType>(Ptrs->
getType());
689 auto *DataTy = cast<VectorType>(
Data->getType());
693 auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
694 assert(NumElts == DataTy->getElementCount() &&
695 PtrTy->isOpaqueOrPointeeTypeMatches(DataTy->getElementType()) &&
696 "Incompatible pointer and data types");
703 Type *OverloadedTypes[] = {DataTy, PtrsTy};
708 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
722 auto *PtrTy = cast<PointerType>(
Ptr->getType());
724 assert(PtrTy->isOpaqueOrPointeeTypeMatches(
725 cast<FixedVectorType>(Ty)->getElementType()) &&
726 "Wrong element type");
728 assert(Mask &&
"Mask should not be all-ones (null)");
731 Type *OverloadedTypes[] = {Ty};
732 Value *Ops[] = {
Ptr, Mask, PassThru};
733 return CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
734 OverloadedTypes,
Name);
744 auto *PtrTy = cast<PointerType>(
Ptr->getType());
747 assert(PtrTy->isOpaqueOrPointeeTypeMatches(
748 cast<FixedVectorType>(DataTy)->getElementType()) &&
749 "Wrong element type");
751 assert(Mask &&
"Mask should not be all-ones (null)");
752 Type *OverloadedTypes[] = {DataTy};
754 return CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
758template <
typename T0>
759static std::vector<Value *>
762 std::vector<Value *> Args;
763 Args.push_back(
B.getInt64(
ID));
764 Args.push_back(
B.getInt32(NumPatchBytes));
765 Args.push_back(ActualCallee);
766 Args.push_back(
B.getInt32(CallArgs.
size()));
767 Args.push_back(
B.getInt32(
Flags));
771 Args.push_back(
B.getInt32(0));
772 Args.push_back(
B.getInt32(0));
777template<
typename T1,
typename T2,
typename T3>
778static std::vector<OperandBundleDef>
782 std::vector<OperandBundleDef> Rval;
786 Rval.emplace_back(
"deopt", DeoptValues);
788 if (TransitionArgs) {
791 Rval.emplace_back(
"gc-transition", TransitionValues);
796 Rval.emplace_back(
"gc-live", LiveValues);
801template <
typename T0,
typename T1,
typename T2,
typename T3>
808 Module *M =
Builder->GetInsertBlock()->getParent()->getParent();
830 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
832 CallArgs, std::nullopt , DeoptArgs, GCArgs,
Name);
841 return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
842 this,
ID, NumPatchBytes, ActualCallee,
Flags, CallArgs, TransitionArgs,
843 DeoptArgs, GCArgs,
Name);
850 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
852 CallArgs, std::nullopt, DeoptArgs, GCArgs,
Name);
855template <
typename T0,
typename T1,
typename T2,
typename T3>
863 Module *M =
Builder->GetInsertBlock()->getParent()->getParent();
869 std::vector<Value *> Args =
874 FnStatepoint, NormalDest, UnwindDest, Args,
887 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
888 this,
ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
890 std::nullopt , DeoptArgs, GCArgs,
Name);
899 return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
900 this,
ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
Flags,
901 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs,
Name);
909 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
910 this,
ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
919 Type *Types[] = {ResultType};
922 Value *Args[] = {Statepoint};
927 int BaseOffset,
int DerivedOffset,
930 Type *Types[] = {ResultType};
943 M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
952 M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
961 return createCallHelper(Fn, {V},
Name, FMFSource);
970 return createCallHelper(Fn, {
LHS,
RHS},
Name, FMFSource);
980 return createCallHelper(Fn, Args,
Name, FMFSource);
1000 matchIntrinsicSignature(FTy,
TableRef, OverloadTys);
1003 "Wrong types for intrinsic!");
1007 return createCallHelper(Fn, Args,
Name, FMFSource);
1013 std::optional<RoundingMode> Rounding,
1014 std::optional<fp::ExceptionBehavior> Except) {
1015 Value *RoundingV = getConstrainedFPRounding(Rounding);
1016 Value *ExceptV = getConstrainedFPExcept(Except);
1023 {L, R, RoundingV, ExceptV},
nullptr,
Name);
1025 setFPAttrs(
C, FPMathTag, UseFMF);
1032 assert(Ops.
size() == 2 &&
"Invalid number of operands!");
1034 Ops[0], Ops[1],
Name, FPMathTag);
1037 assert(Ops.
size() == 1 &&
"Invalid number of operands!");
1039 Ops[0],
Name, FPMathTag);
1047 std::optional<RoundingMode> Rounding,
1048 std::optional<fp::ExceptionBehavior> Except) {
1049 Value *ExceptV = getConstrainedFPExcept(Except);
1056 bool HasRoundingMD =
false;
1060#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
1061 case Intrinsic::INTRINSIC: \
1062 HasRoundingMD = ROUND_MODE; \
1064#include "llvm/IR/ConstrainedOps.def"
1066 if (HasRoundingMD) {
1067 Value *RoundingV = getConstrainedFPRounding(Rounding);
1076 if (isa<FPMathOperator>(
C))
1077 setFPAttrs(
C, FPMathTag, UseFMF);
1081Value *IRBuilderBase::CreateFCmpHelper(
1083 MDNode *FPMathTag,
bool IsSignaling) {
1085 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
1086 : Intrinsic::experimental_constrained_fcmp;
1090 if (
auto *LC = dyn_cast<Constant>(LHS))
1091 if (
auto *RC = dyn_cast<Constant>(RHS))
1098 const Twine &
Name, std::optional<fp::ExceptionBehavior> Except) {
1099 Value *PredicateV = getConstrainedFPPredicate(
P);
1100 Value *ExceptV = getConstrainedFPExcept(Except);
1103 {L, R, PredicateV, ExceptV},
nullptr,
Name);
1110 std::optional<RoundingMode> Rounding,
1111 std::optional<fp::ExceptionBehavior> Except) {
1115 bool HasRoundingMD =
false;
1116 switch (
Callee->getIntrinsicID()) {
1119#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
1120 case Intrinsic::INTRINSIC: \
1121 HasRoundingMD = ROUND_MODE; \
1123#include "llvm/IR/ConstrainedOps.def"
1126 UseArgs.
push_back(getConstrainedFPRounding(Rounding));
1127 UseArgs.
push_back(getConstrainedFPExcept(Except));
1143 Sel = addBranchMetadata(Sel, Prof, Unpred);
1145 if (isa<FPMathOperator>(Sel))
1146 setFPAttrs(Sel,
nullptr ,
FMF);
1153 "Pointer subtraction operand types must match!");
1155 ->isOpaqueOrPointeeTypeMatches(ElemTy) &&
1156 "Pointer type must match element type");
1165 assert(isa<PointerType>(
Ptr->getType()) &&
1166 "launder.invariant.group only applies to pointers.");
1168 auto *PtrType =
Ptr->getType();
1169 auto *Int8PtrTy =
getInt8PtrTy(PtrType->getPointerAddressSpace());
1170 if (PtrType != Int8PtrTy)
1174 M, Intrinsic::launder_invariant_group, {Int8PtrTy});
1179 "LaunderInvariantGroup should take and return the same type");
1183 if (PtrType != Int8PtrTy)
1189 assert(isa<PointerType>(
Ptr->getType()) &&
1190 "strip.invariant.group only applies to pointers.");
1193 auto *PtrType =
Ptr->getType();
1194 auto *Int8PtrTy =
getInt8PtrTy(PtrType->getPointerAddressSpace());
1195 if (PtrType != Int8PtrTy)
1199 M, Intrinsic::strip_invariant_group, {Int8PtrTy});
1204 "StripInvariantGroup should take and return the same type");
1208 if (PtrType != Int8PtrTy)
1214 auto *Ty = cast<VectorType>(V->getType());
1215 if (isa<ScalableVectorType>(Ty)) {
1218 M, Intrinsic::experimental_vector_reverse, Ty);
1223 int NumElts = Ty->getElementCount().getKnownMinValue();
1224 for (
int i = 0; i < NumElts; ++i)
1231 assert(isa<VectorType>(V1->
getType()) &&
"Unexpected type");
1233 "Splice expects matching operand types!");
1235 if (
auto *VTy = dyn_cast<ScalableVectorType>(V1->
getType())) {
1238 M, Intrinsic::experimental_vector_splice, VTy);
1244 unsigned NumElts = cast<FixedVectorType>(V1->
getType())->getNumElements();
1245 assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
1246 "Invalid immediate for vector splice!");
1249 unsigned Idx = (NumElts + Imm) % NumElts;
1251 for (
unsigned I = 0;
I < NumElts; ++
I)
1252 Mask.push_back(
Idx +
I);
1265 assert(EC.isNonZero() &&
"Cannot splat to an empty vector!");
1273 Zeros.
resize(EC.getKnownMinValue());
1280 auto *IntTy = cast<IntegerType>(
From->getType());
1282 DL.getTypeStoreSize(IntTy) &&
1283 "Element extends past full value");
1286 if (
DL.isBigEndian())
1287 ShAmt = 8 * (
DL.getTypeStoreSize(IntTy) -
1288 DL.getTypeStoreSize(ExtractedTy) -
Offset);
1293 "Cannot extract to a larger integer!");
1294 if (ExtractedTy != IntTy) {
1301 Type *ElTy,
Value *
Base,
unsigned Dimension,
unsigned LastIndex,
1305 "Invalid Base ptr type for preserve.array.access.index.");
1306 assert(cast<PointerType>(
BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
1307 "Pointer element type mismatch");
1319 M, Intrinsic::preserve_array_access_index, {ResultType,
BaseType});
1323 CreateCall(FnPreserveArrayAccessIndex, {
Base, DimV, LastIndexV});
1327 Fn->
setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1335 "Invalid Base ptr type for preserve.union.access.index.");
1346 Fn->
setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1356 "Invalid Base ptr type for preserve.struct.access.index.");
1357 assert(cast<PointerType>(
BaseType)->isOpaqueOrPointeeTypeMatches(ElTy) &&
1358 "Pointer element type mismatch");
1367 M, Intrinsic::preserve_struct_access_index, {ResultType,
BaseType});
1371 {
Base, GEPIndex, DIIndex});
1375 Fn->
setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1383 Value *OffsetValue) {
1394 Value *OffsetValue) {
1396 "trying to create an alignment assumption on a non-pointer?");
1397 assert(Alignment != 0 &&
"Invalid Alignment");
1398 auto *PtrTy = cast<PointerType>(PtrValue->
getType());
1401 return CreateAlignmentAssumptionHelper(
DL, PtrValue, AlignValue, OffsetValue);
1407 Value *OffsetValue) {
1409 "trying to create an alignment assumption on a non-pointer?");
1410 return CreateAlignmentAssumptionHelper(
DL, PtrValue, Alignment, OffsetValue);
1416void ConstantFolder::anchor() {}
1417void NoFolder::anchor() {}
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
amdgpu Simplify well known AMD library false FunctionCallee Callee
ArrayRef< TableEntry > TableRef
SmallVector< MachineOperand, 4 > Cond
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static InvokeInst * CreateGCStatepointInvokeCommon(IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags, ArrayRef< T0 > InvokeArgs, std::optional< ArrayRef< T1 > > TransitionArgs, std::optional< ArrayRef< T2 > > DeoptArgs, ArrayRef< T3 > GCArgs, const Twine &Name)
static CallInst * CreateGCStatepointCallCommon(IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, uint32_t Flags, ArrayRef< T0 > CallArgs, std::optional< ArrayRef< T1 > > TransitionArgs, std::optional< ArrayRef< T2 > > DeoptArgs, ArrayRef< T3 > GCArgs, const Twine &Name)
static std::vector< OperandBundleDef > getStatepointBundles(std::optional< ArrayRef< T1 > > TransitionArgs, std::optional< ArrayRef< T2 > > DeoptArgs, ArrayRef< T3 > GCArgs)
static std::vector< Value * > getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags, ArrayRef< T0 > CallArgs)
static bool isZero(Value *V, const DataLayout &DL, DominatorTree *DT, AssumptionCache *AC)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static SymbolRef::Type getType(const Symbol *Sym)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static Attribute get(LLVMContext &Context, AttrKind Kind, uint64_t Val=0)
Return a uniquified Attribute object.
static Attribute getWithAlignment(LLVMContext &Context, Align Alignment)
Return a uniquified Attribute object that has the specific alignment set.
LLVM Basic Block Representation.
const Function * getParent() const
Return the enclosing method, or null if none.
const Module * getModule() const
Return the module owning the function this basic block belongs to, or nullptr if the function does no...
void addRetAttr(Attribute::AttrKind Kind)
Adds the attribute to the return value.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind)
Adds the attribute to the indicated argument.
This class represents a function call, abstracting a target machine's calling convention.
static CallInst * Create(FunctionType *Ty, Value *F, const Twine &NameStr="", Instruction *InsertBefore=nullptr)
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
static Constant * getString(LLVMContext &Context, StringRef Initializer, bool AddNull=true)
This method constructs a CDS and initializes it with a text string.
static Constant * getSizeOf(Type *Ty)
getSizeOf constant expr - computes the (alloc) size of a type (in address-units, not bits) in a targe...
This is the shared class of boolean and integer constants.
static ConstantInt * getTrue(LLVMContext &Context)
static Constant * get(Type *Ty, uint64_t V, bool IsSigned=false)
If Ty is a vector type, return a Constant with a splat of the given value.
static Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
static Constant * getAllOnesValue(Type *Ty)
A parsed version of the target data layout string in and methods for querying it.
static constexpr ElementCount getFixed(ScalarTy MinVal)
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
A handy container for a FunctionType+Callee-pointer pair, which can be passed around as a single enti...
FunctionType * getFunctionType()
Class to represent function types.
Type * getParamType(unsigned i) const
Parameter type accessors.
static FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
FunctionType * getFunctionType() const
Returns the FunctionType for me.
Type * getReturnType() const
Returns the type of the ret val.
static Type * getGEPReturnType(Type *ElTy, Value *Ptr, ArrayRef< Value * > IdxList)
Returns the pointer type returned by the GEP instruction, which may be a vector of pointers.
Module * getParent()
Get the module that this global value is contained inside of...
@ PrivateLinkage
Like Internal, but omit from symbol table.
Common base class shared among various IRBuilders.
Value * CreateExactSDiv(Value *LHS, Value *RHS, const Twine &Name="")
CallInst * CreateElementUnorderedAtomicMemCpy(Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, uint32_t ElementSize, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert an element unordered-atomic memcpy between the specified pointers.
ConstantInt * getInt1(bool V)
Get a constant value representing either true or false.
CallInst * CreateMaskedCompressStore(Value *Val, Value *Ptr, Value *Mask=nullptr)
Create a call to Masked Compress Store intrinsic.
CallInst * CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 1 operand which is mangled on its type.
Value * CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS, const Twine &Name="")
Return the i64 difference between two pointer values, dividing out the size of the pointed-to objects...
CallInst * CreateMulReduce(Value *Src)
Create a vector int mul reduction intrinsic of the source vector.
CallInst * CreateFAddReduce(Value *Acc, Value *Src)
Create a sequential vector fadd reduction intrinsic of the source vector.
CallInst * CreateMaskedExpandLoad(Type *Ty, Value *Ptr, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Expand Load intrinsic.
Value * CreateTrunc(Value *V, Type *DestTy, const Twine &Name="")
Value * CreateVScale(Constant *Scaling, const Twine &Name="")
Create a call to llvm.vscale, multiplied by Scaling.
Value * CreateLaunderInvariantGroup(Value *Ptr)
Create a launder.invariant.group intrinsic call.
Value * CreateInsertElement(Type *VecTy, Value *NewElt, Value *Idx, const Twine &Name="")
CallInst * CreateThreadLocalAddress(Value *Ptr)
Create a call to llvm.threadlocal.address intrinsic.
IntegerType * getInt1Ty()
Fetch the type representing a single bit.
Type * getCurrentFunctionReturnType() const
Get the return type of the current function that we're emitting into.
CallInst * CreateGCGetPointerBase(Value *DerivedPtr, const Twine &Name="")
Create a call to the experimental.gc.pointer.base intrinsic to get the base pointer for the specified...
CallInst * CreateGCStatepointCall(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee, ArrayRef< Value * > CallArgs, std::optional< ArrayRef< Value * > > DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create a call to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
GlobalVariable * CreateGlobalString(StringRef Str, const Twine &Name="", unsigned AddressSpace=0, Module *M=nullptr)
Make a new global variable with initializer type i8*.
CallInst * CreateLifetimeStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.start intrinsic.
CallInst * CreateConstrainedFPCmp(Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R, const Twine &Name="", std::optional< fp::ExceptionBehavior > Except=std::nullopt)
CallInst * CreateAndReduce(Value *Src)
Create a vector int AND reduction intrinsic of the source vector.
Value * CreateVectorSplice(Value *V1, Value *V2, int64_t Imm, const Twine &Name="")
Return a vector splice intrinsic if using scalable vectors, otherwise return a shufflevector.
Value * CreateVectorSplat(unsigned NumElts, Value *V, const Twine &Name="")
Return a vector value that contains.
CallInst * CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool IsVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
CallInst * CreateIntrinsic(Intrinsic::ID ID, ArrayRef< Type * > Types, ArrayRef< Value * > Args, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with Args, mangled using Types.
Value * CreatePreserveStructAccessIndex(Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex, MDNode *DbgInfo)
CallInst * CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue, unsigned Alignment, Value *OffsetValue=nullptr)
Create an assume intrinsic call that represents an alignment assumption on the provided pointer.
CallInst * CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment, Value *Mask, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Load intrinsic.
CallInst * CreateConstrainedFPCall(Function *Callee, ArrayRef< Value * > Args, const Twine &Name="", std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
CallInst * CreateMemSet(Value *Ptr, Value *Val, uint64_t Size, MaybeAlign Align, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert a memset to the specified pointer and the specified value.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
CallInst * CreateGCGetPointerOffset(Value *DerivedPtr, const Twine &Name="")
Create a call to the experimental.gc.get.pointer.offset intrinsic to get the offset of the specified ...
Value * CreateTypeSize(Type *DstType, TypeSize Size)
Create an expression which evaluates to the number of units in Size at runtime.
CallInst * CreateAddReduce(Value *Src)
Create a vector int add reduction intrinsic of the source vector.
Value * CreateLShr(Value *LHS, Value *RHS, const Twine &Name="", bool isExact=false)
IntegerType * getIntPtrTy(const DataLayout &DL, unsigned AddrSpace=0)
Fetch the type of an integer with size at least as big as that of a pointer in the given address spac...
BasicBlock * GetInsertBlock() const
IntegerType * getInt64Ty()
Fetch the type representing a 64-bit integer.
CallInst * CreateMemTransferInst(Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, Value *Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Value * CreateVectorReverse(Value *V, const Twine &Name="")
Return a vector value that contains the vector V reversed.
CallInst * CreateXorReduce(Value *Src)
Create a vector int XOR reduction intrinsic of the source vector.
ConstantInt * getInt64(uint64_t C)
Get a constant 64-bit value.
Value * CreateUnOp(Instruction::UnaryOps Opc, Value *V, const Twine &Name="", MDNode *FPMathTag=nullptr)
CallInst * CreateOrReduce(Value *Src)
Create a vector int OR reduction intrinsic of the source vector.
CallInst * CreateFPMinReduce(Value *Src)
Create a vector float min reduction intrinsic of the source vector.
CallInst * CreateFPMaxReduce(Value *Src)
Create a vector float max reduction intrinsic of the source vector.
ConstantInt * getInt32(uint32_t C)
Get a constant 32-bit value.
Value * CreateExtractInteger(const DataLayout &DL, Value *From, IntegerType *ExtractedTy, uint64_t Offset, const Twine &Name)
Return a value that has been extracted from a larger integer type.
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
CallInst * CreateConstrainedFPBinOp(Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource=nullptr, const Twine &Name="", MDNode *FPMathTag=nullptr, std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
DebugLoc getCurrentDebugLocation() const
Get location information used by debugging information.
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
PointerType * getInt8PtrTy(unsigned AddrSpace=0)
Fetch the type representing a pointer to an 8-bit integer value.
Value * CreateNAryOp(unsigned Opc, ArrayRef< Value * > Ops, const Twine &Name="", MDNode *FPMathTag=nullptr)
Create either a UnaryOperator or BinaryOperator depending on Opc.
CallInst * CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS, Value *RHS, Instruction *FMFSource=nullptr, const Twine &Name="")
Create a call to intrinsic ID with 2 operands which is mangled on the first type.
CallInst * CreateAssumption(Value *Cond, ArrayRef< OperandBundleDef > OpBundles=std::nullopt)
Create an assume intrinsic call that allows the optimizer to assume that the provided condition will ...
Value * CreateShuffleVector(Value *V1, Value *V2, Value *Mask, const Twine &Name="")
LLVMContext & getContext() const
Value * CreatePreserveUnionAccessIndex(Value *Base, unsigned FieldIndex, MDNode *DbgInfo)
CallInst * CreateIntMaxReduce(Value *Src, bool IsSigned=false)
Create a vector integer max reduction intrinsic of the source vector.
CallInst * CreateMaskedStore(Value *Val, Value *Ptr, Align Alignment, Value *Mask)
Create a call to Masked Store intrinsic.
Value * CreatePtrToInt(Value *V, Type *DestTy, const Twine &Name="")
CallInst * CreateGCResult(Instruction *Statepoint, Type *ResultType, const Twine &Name="")
Create a call to the experimental.gc.result intrinsic to extract the result from a call wrapped in a ...
CallInst * CreateLifetimeEnd(Value *Ptr, ConstantInt *Size=nullptr)
Create a lifetime.end intrinsic.
Value * CreateBinOp(Instruction::BinaryOps Opc, Value *LHS, Value *RHS, const Twine &Name="", MDNode *FPMathTag=nullptr)
Value * CreateElementCount(Type *DstType, ElementCount EC)
Create an expression which evaluates to the number of elements in EC at runtime.
CallInst * CreateConstrainedFPCast(Intrinsic::ID ID, Value *V, Type *DestTy, Instruction *FMFSource=nullptr, const Twine &Name="", MDNode *FPMathTag=nullptr, std::optional< RoundingMode > Rounding=std::nullopt, std::optional< fp::ExceptionBehavior > Except=std::nullopt)
CallInst * CreateElementUnorderedAtomicMemMove(Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size, uint32_t ElementSize, MDNode *TBAATag=nullptr, MDNode *TBAAStructTag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert an element unordered-atomic memmove between the specified pointers.
CallInst * CreateIntMinReduce(Value *Src, bool IsSigned=false)
Create a vector integer min reduction intrinsic of the source vector.
void setConstrainedFPCallAttr(CallBase *I)
InvokeInst * CreateGCStatepointInvoke(uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef< Value * > InvokeArgs, std::optional< ArrayRef< Value * > > DeoptArgs, ArrayRef< Value * > GCArgs, const Twine &Name="")
Create an invoke to the experimental.gc.statepoint intrinsic to start a new statepoint sequence.
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value * > Args=std::nullopt, const Twine &Name="", MDNode *FPMathTag=nullptr)
const IRBuilderFolder & Folder
CallInst * CreateMemSetInline(Value *Dst, MaybeAlign DstAlign, Value *Val, Value *Size, bool IsVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
CallInst * CreateElementUnorderedAtomicMemSet(Value *Ptr, Value *Val, uint64_t Size, Align Alignment, uint32_t ElementSize, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Create and insert an element unordered-atomic memset of the region of memory starting at the given po...
CallInst * CreateFMulReduce(Value *Acc, Value *Src)
Create a sequential vector fmul reduction intrinsic of the source vector.
void SetInstDebugLocation(Instruction *I) const
If this builder has a current debug location, set it on the specified instruction.
IntegerType * getInt8Ty()
Fetch the type representing an 8-bit integer.
CallInst * CreateGCRelocate(Instruction *Statepoint, int BaseOffset, int DerivedOffset, Type *ResultType, const Twine &Name="")
Create a call to the experimental.gc.relocate intrinsics to project the relocated value of one pointe...
Value * CreateStepVector(Type *DstType, const Twine &Name="")
Creates a vector of type DstType with the linear sequence <0, 1, ...>
CallInst * CreateMemMove(Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign, uint64_t Size, bool isVolatile=false, MDNode *TBAATag=nullptr, MDNode *ScopeTag=nullptr, MDNode *NoAliasTag=nullptr)
Value * CreatePreserveArrayAccessIndex(Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex, MDNode *DbgInfo)
CallInst * CreateInvariantStart(Value *Ptr, ConstantInt *Size=nullptr)
Create a call to invariant.start intrinsic.
Value * CreateMul(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Instruction * CreateNoAliasScopeDeclaration(Value *Scope)
Create a llvm.experimental.noalias.scope.decl intrinsic call.
CallInst * CreateMaskedScatter(Value *Val, Value *Ptrs, Align Alignment, Value *Mask=nullptr)
Create a call to Masked Scatter intrinsic.
Value * CreateStripInvariantGroup(Value *Ptr)
Create a strip.invariant.group intrinsic call.
CallInst * CreateMaskedGather(Type *Ty, Value *Ptrs, Align Alignment, Value *Mask=nullptr, Value *PassThru=nullptr, const Twine &Name="")
Create a call to Masked Gather intrinsic.
~IRBuilderCallbackInserter() override
virtual ~IRBuilderDefaultInserter()
virtual Value * FoldSelect(Value *C, Value *True, Value *False) const =0
virtual Value * CreateFCmp(CmpInst::Predicate P, Constant *LHS, Constant *RHS) const =0
virtual ~IRBuilderFolder()
void copyFastMathFlags(FastMathFlags FMF)
Convenience function for transferring all fast-math flag values to this instruction,...
MDNode * getMetadata(unsigned KindID) const
Get the metadata of given kind attached to this Instruction.
void setMetadata(unsigned KindID, MDNode *Node)
Set the metadata of the specified kind to the specified node.
FastMathFlags getFastMathFlags() const LLVM_READONLY
Convenience function for getting all the fast-math flags, which must be an operator which supports th...
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
A Module instance is used to store all the information related to an LLVM module.
A container for an operand bundle being viewed as a set of values rather than a set of uses.
static PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents the LLVM 'select' instruction.
static SelectInst * Create(Value *C, Value *S1, Value *S2, const Twine &NameStr="", Instruction *InsertBefore=nullptr, Instruction *MDFrom=nullptr)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static IntegerType * getInt1Ty(LLVMContext &C)
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
static IntegerType * getInt32Ty(LLVMContext &C)
static IntegerType * getInt64Ty(LLVMContext &C)
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVMContext & getContext() const
All values hold a context through their type.
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl< IITDescriptor > &T)
Return the IIT table descriptor for the specified intrinsic into an array of IITDescriptors.
MatchIntrinsicTypesResult
@ MatchIntrinsicTypes_Match
Function * getDeclaration(Module *M, ID id, ArrayRef< Type * > Tys=std::nullopt)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
This is an optimization pass for GlobalISel generic memory operations.
bool getAlign(const Function &F, unsigned index, unsigned &align)
void append_range(Container &C, Range &&R)
Wrapper function to append a range to a container.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
This struct is a compact representation of a valid (power of two) or undefined (0) alignment.