24#include "llvm/IR/IntrinsicsSPIRV.h"
32#include <unordered_set>
56 cl::desc(
"Emit OpName for all instructions"),
60#define GET_BuiltinGroup_DECL
61#include "SPIRVGenTables.inc"
66static bool isaGEP(
const Value *V) {
70class SPIRVEmitIntrinsics
72 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
73 SPIRVTargetMachine *TM =
nullptr;
74 SPIRVGlobalRegistry *GR =
nullptr;
76 bool TrackConstants =
true;
77 bool HaveFunPtrs =
false;
78 DenseMap<Instruction *, Constant *> AggrConsts;
79 DenseMap<Instruction *, Type *> AggrConstTypes;
80 DenseSet<Instruction *> AggrStores;
81 std::unordered_set<Value *> Named;
84 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
87 bool CanTodoType =
true;
88 unsigned TodoTypeSz = 0;
89 DenseMap<Value *, bool> TodoType;
90 void insertTodoType(
Value *
Op) {
92 if (CanTodoType && !isaGEP(
Op)) {
93 auto It = TodoType.try_emplace(
Op,
true);
99 auto It = TodoType.find(
Op);
100 if (It != TodoType.end() && It->second) {
108 auto It = TodoType.find(
Op);
109 return It != TodoType.end() && It->second;
113 std::unordered_set<Instruction *> TypeValidated;
116 enum WellKnownTypes { Event };
119 Type *deduceElementType(
Value *
I,
bool UnknownElemTypeI8);
120 Type *deduceElementTypeHelper(
Value *
I,
bool UnknownElemTypeI8);
121 Type *deduceElementTypeHelper(
Value *
I, std::unordered_set<Value *> &Visited,
122 bool UnknownElemTypeI8,
123 bool IgnoreKnownType =
false);
124 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
125 bool UnknownElemTypeI8);
126 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
127 std::unordered_set<Value *> &Visited,
128 bool UnknownElemTypeI8);
130 std::unordered_set<Value *> &Visited,
131 bool UnknownElemTypeI8);
133 bool UnknownElemTypeI8);
136 Type *deduceNestedTypeHelper(User *U,
bool UnknownElemTypeI8);
137 Type *deduceNestedTypeHelper(User *U,
Type *Ty,
138 std::unordered_set<Value *> &Visited,
139 bool UnknownElemTypeI8);
142 void deduceOperandElementType(Instruction *
I,
143 SmallPtrSet<Instruction *, 4> *IncompleteRets,
144 const SmallPtrSet<Value *, 4> *AskOps =
nullptr,
145 bool IsPostprocessing =
false);
150 Type *reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
151 bool IsPostprocessing);
153 void replaceMemInstrUses(Instruction *Old, Instruction *New,
IRBuilder<> &
B);
155 bool insertAssignPtrTypeIntrs(Instruction *
I,
IRBuilder<> &
B,
156 bool UnknownElemTypeI8);
158 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType,
Value *V,
160 void replacePointerOperandWithPtrCast(Instruction *
I,
Value *Pointer,
161 Type *ExpectedElementType,
162 unsigned OperandToReplace,
164 void insertPtrCastOrAssignTypeInstr(Instruction *
I,
IRBuilder<> &
B);
165 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
167 void insertConstantsForFPFastMathDefault(
Module &M);
168 void processGlobalValue(GlobalVariable &GV,
IRBuilder<> &
B);
170 void processParamTypesByFunHeader(Function *
F,
IRBuilder<> &
B);
171 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx);
172 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx,
173 std::unordered_set<Function *> &FVisited);
175 bool deduceOperandElementTypeCalledFunction(
177 Type *&KnownElemTy,
bool &Incomplete);
178 void deduceOperandElementTypeFunctionPointer(
180 Type *&KnownElemTy,
bool IsPostprocessing);
181 bool deduceOperandElementTypeFunctionRet(
182 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
183 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
186 CallInst *buildSpvPtrcast(Function *
F,
Value *
Op,
Type *ElemTy);
187 void replaceUsesOfWithSpvPtrcast(
Value *
Op,
Type *ElemTy, Instruction *
I,
188 DenseMap<Function *, CallInst *> Ptrcasts);
190 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
193 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
194 void propagateElemTypeRec(
Value *
Op,
Type *PtrElemTy,
Type *CastElemTy,
195 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
196 std::unordered_set<Value *> &Visited,
197 DenseMap<Function *, CallInst *> Ptrcasts);
200 void replaceAllUsesWithAndErase(
IRBuilder<> &
B, Instruction *Src,
201 Instruction *Dest,
bool DeleteOld =
true);
205 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP);
208 bool postprocessTypes(
Module &M);
209 bool processFunctionPointers(
Module &M);
210 void parseFunDeclarations(
Module &M);
212 void useRoundingMode(ConstrainedFPIntrinsic *FPI,
IRBuilder<> &
B);
214 void emitUnstructuredLoopControls(Function &
F,
IRBuilder<> &
B);
230 bool walkLogicalAccessChain(
231 GetElementPtrInst &
GEP,
232 const std::function<
void(
Type *PointedType, uint64_t Index)>
241 Type *getGEPType(GetElementPtrInst *
GEP);
248 Type *getGEPTypeLogical(GetElementPtrInst *
GEP);
250 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP);
254 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM =
nullptr)
255 : ModulePass(ID), TM(TM) {}
258 Instruction *visitGetElementPtrInst(GetElementPtrInst &
I);
261 Instruction *visitInsertElementInst(InsertElementInst &
I);
262 Instruction *visitExtractElementInst(ExtractElementInst &
I);
264 Instruction *visitExtractValueInst(ExtractValueInst &
I);
268 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I);
272 StringRef getPassName()
const override {
return "SPIRV emit intrinsics"; }
274 bool runOnModule(
Module &M)
override;
276 void getAnalysisUsage(AnalysisUsage &AU)
const override {
277 ModulePass::getAnalysisUsage(AU);
286 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
287 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
288 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
291bool expectIgnoredInIRTranslation(
const Instruction *
I) {
295 switch (
II->getIntrinsicID()) {
296 case Intrinsic::invariant_start:
297 case Intrinsic::spv_resource_handlefrombinding:
298 case Intrinsic::spv_resource_getpointer:
308 if (
II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
309 Value *V =
II->getArgOperand(0);
310 return getPointerRoot(V);
318char SPIRVEmitIntrinsics::ID = 0;
341 B.SetInsertPoint(
I->getParent()->getFirstNonPHIOrDbgOrAlloca());
347 B.SetCurrentDebugLocation(
I->getDebugLoc());
348 if (
I->getType()->isVoidTy())
349 B.SetInsertPoint(
I->getNextNode());
351 B.SetInsertPoint(*
I->getInsertionPointAfterDef());
356 switch (Intr->getIntrinsicID()) {
357 case Intrinsic::invariant_start:
358 case Intrinsic::invariant_end:
366 if (
I->getType()->isTokenTy())
368 "does not support token type",
373 if (!
I->hasName() ||
I->getType()->isAggregateType() ||
374 expectIgnoredInIRTranslation(
I))
385 if (
F &&
F->getName().starts_with(
"llvm.spv.alloca"))
396 std::vector<Value *> Args = {
399 B.CreateIntrinsic(Intrinsic::spv_assign_name, {
I->getType()}, Args);
402void SPIRVEmitIntrinsics::replaceAllUsesWith(
Value *Src,
Value *Dest,
406 if (isTodoType(Src)) {
409 insertTodoType(Dest);
413void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(
IRBuilder<> &
B,
418 std::string
Name = Src->hasName() ? Src->getName().str() :
"";
419 Src->eraseFromParent();
422 if (Named.insert(Dest).second)
447Type *SPIRVEmitIntrinsics::reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
448 bool IsPostprocessing) {
463 if (UnknownElemTypeI8) {
464 if (!IsPostprocessing)
472CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *
F,
Value *
Op,
480 B.SetInsertPointPastAllocas(OpA->getParent());
483 B.SetInsertPoint(
F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
485 Type *OpTy =
Op->getType();
489 CallInst *PtrCasted =
490 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
495void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
497 DenseMap<Function *, CallInst *> Ptrcasts) {
499 CallInst *PtrCastedI =
nullptr;
500 auto It = Ptrcasts.
find(
F);
501 if (It == Ptrcasts.
end()) {
502 PtrCastedI = buildSpvPtrcast(
F,
Op, ElemTy);
503 Ptrcasts[
F] = PtrCastedI;
505 PtrCastedI = It->second;
507 I->replaceUsesOfWith(
Op, PtrCastedI);
510void SPIRVEmitIntrinsics::propagateElemType(
512 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
513 DenseMap<Function *, CallInst *> Ptrcasts;
515 for (
auto *U :
Users) {
518 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
523 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
524 replaceUsesOfWithSpvPtrcast(
Op, ElemTy, UI, Ptrcasts);
528void SPIRVEmitIntrinsics::propagateElemTypeRec(
530 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
531 std::unordered_set<Value *> Visited;
532 DenseMap<Function *, CallInst *> Ptrcasts;
533 propagateElemTypeRec(
Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
534 std::move(Ptrcasts));
537void SPIRVEmitIntrinsics::propagateElemTypeRec(
539 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
540 std::unordered_set<Value *> &Visited,
541 DenseMap<Function *, CallInst *> Ptrcasts) {
542 if (!Visited.insert(
Op).second)
545 for (
auto *U :
Users) {
548 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
553 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
554 replaceUsesOfWithSpvPtrcast(
Op, CastElemTy, UI, Ptrcasts);
562SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
563 bool UnknownElemTypeI8) {
564 std::unordered_set<Value *> Visited;
565 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
569Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
570 Type *ValueTy,
Value *Operand, std::unordered_set<Value *> &Visited,
571 bool UnknownElemTypeI8) {
576 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
587Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
588 Value *
Op, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8) {
600 for (User *OpU :
Op->users()) {
602 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
615 if ((DemangledName.
starts_with(
"__spirv_ocl_printf(") ||
624Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
Value *
I,
625 bool UnknownElemTypeI8) {
626 std::unordered_set<Value *> Visited;
627 return deduceElementTypeHelper(
I, Visited, UnknownElemTypeI8);
630void SPIRVEmitIntrinsics::maybeAssignPtrType(
Type *&Ty,
Value *
Op,
Type *RefTy,
631 bool UnknownElemTypeI8) {
633 if (!UnknownElemTypeI8)
640bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
641 GetElementPtrInst &
GEP,
642 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
643 const std::function<
void(
Type *,
Value *)> &OnDynamicIndexing) {
651 Value *Src = getPointerRoot(
GEP.getPointerOperand());
652 Type *CurType = deduceElementType(Src,
true);
661 OnDynamicIndexing(AT->getElementType(), Operand);
662 return AT ==
nullptr;
670 uint32_t EltTypeSize =
DL.getTypeSizeInBits(AT->getElementType()) / 8;
674 CurType = AT->getElementType();
675 OnLiteralIndexing(CurType, Index);
677 uint32_t StructSize =
DL.getTypeSizeInBits(ST) / 8;
680 const auto &STL =
DL.getStructLayout(ST);
681 unsigned Element = STL->getElementContainingOffset(
Offset);
682 Offset -= STL->getElementOffset(Element);
683 CurType =
ST->getElementType(Element);
684 OnLiteralIndexing(CurType, Element);
686 Type *EltTy = VT->getElementType();
687 TypeSize EltSizeBits =
DL.getTypeSizeInBits(EltTy);
688 assert(EltSizeBits % 8 == 0 &&
689 "Element type size in bits must be a multiple of 8.");
690 uint32_t EltTypeSize = EltSizeBits / 8;
695 OnLiteralIndexing(CurType, Index);
707SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP) {
710 B.SetInsertPoint(&
GEP);
712 std::vector<Value *> Indices;
713 Indices.push_back(ConstantInt::get(
714 IntegerType::getInt32Ty(CurrF->
getContext()), 0,
false));
715 walkLogicalAccessChain(
717 [&Indices, &
B](
Type *EltType, uint64_t Index) {
719 ConstantInt::get(
B.getInt64Ty(), Index,
false));
722 uint32_t EltTypeSize =
DL.getTypeSizeInBits(EltType) / 8;
724 Offset, ConstantInt::get(
Offset->getType(), EltTypeSize,
726 Indices.push_back(Index);
730 SmallVector<Value *, 4>
Args;
731 Args.push_back(
B.getInt1(
GEP.isInBounds()));
732 Args.push_back(
GEP.getOperand(0));
734 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
735 replaceAllUsesWithAndErase(
B, &
GEP, NewI);
739Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *
GEP) {
741 Type *CurType =
GEP->getResultElementType();
743 bool Interrupted = walkLogicalAccessChain(
744 *
GEP, [&CurType](
Type *EltType, uint64_t Index) { CurType = EltType; },
747 return Interrupted ?
GEP->getResultElementType() : CurType;
750Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *
Ref) {
751 if (
Ref->getSourceElementType() ==
752 IntegerType::getInt8Ty(CurrF->
getContext()) &&
754 return getGEPTypeLogical(
Ref);
761 Ty =
Ref->getSourceElementType();
765 Ty =
Ref->getResultElementType();
770Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
771 Value *
I, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8,
772 bool IgnoreKnownType) {
778 if (!IgnoreKnownType)
783 if (!Visited.insert(
I).second)
790 maybeAssignPtrType(Ty,
I,
Ref->getAllocatedType(), UnknownElemTypeI8);
792 Ty = getGEPType(
Ref);
794 Ty = SGEP->getResultElementType();
799 KnownTy =
Op->getType();
801 maybeAssignPtrType(Ty,
I, ElemTy, UnknownElemTypeI8);
804 Ty = SPIRV::getOriginalFunctionType(*Fn);
807 Ty = deduceElementTypeByValueDeep(
809 Ref->getNumOperands() > 0 ?
Ref->getOperand(0) :
nullptr, Visited,
813 Type *RefTy = deduceElementTypeHelper(
Ref->getPointerOperand(), Visited,
815 maybeAssignPtrType(Ty,
I, RefTy, UnknownElemTypeI8);
817 maybeAssignPtrType(Ty,
I,
Ref->getDestTy(), UnknownElemTypeI8);
819 if (
Type *Src =
Ref->getSrcTy(), *Dest =
Ref->getDestTy();
821 Ty = deduceElementTypeHelper(
Ref->getOperand(0), Visited,
826 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
830 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
832 Type *BestTy =
nullptr;
834 DenseMap<Type *, unsigned> PhiTys;
835 for (
int i =
Ref->getNumIncomingValues() - 1; i >= 0; --i) {
836 Ty = deduceElementTypeByUsersDeep(
Ref->getIncomingValue(i), Visited,
843 if (It.first->second > MaxN) {
844 MaxN = It.first->second;
852 for (
Value *
Op : {
Ref->getTrueValue(),
Ref->getFalseValue()}) {
853 Ty = deduceElementTypeByUsersDeep(
Op, Visited, UnknownElemTypeI8);
858 static StringMap<unsigned> ResTypeByArg = {
862 {
"__spirv_GenericCastToPtr_ToGlobal", 0},
863 {
"__spirv_GenericCastToPtr_ToLocal", 0},
864 {
"__spirv_GenericCastToPtr_ToPrivate", 0},
865 {
"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
866 {
"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
867 {
"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
871 if (
II &&
II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
873 if (HandleType->getTargetExtName() ==
"spirv.Image" ||
874 HandleType->getTargetExtName() ==
"spirv.SignedImage") {
875 for (User *U :
II->users()) {
880 }
else if (HandleType->getTargetExtName() ==
"spirv.VulkanBuffer") {
882 Ty = HandleType->getTypeParameter(0);
894 }
else if (
II &&
II->getIntrinsicID() ==
895 Intrinsic::spv_generic_cast_to_ptr_explicit) {
896 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
898 }
else if (Function *CalledF = CI->getCalledFunction()) {
899 std::string DemangledName =
901 if (DemangledName.length() > 0)
902 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
903 auto AsArgIt = ResTypeByArg.
find(DemangledName);
904 if (AsArgIt != ResTypeByArg.
end())
905 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
906 Visited, UnknownElemTypeI8);
913 if (Ty && !IgnoreKnownType) {
924Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
925 bool UnknownElemTypeI8) {
926 std::unordered_set<Value *> Visited;
927 return deduceNestedTypeHelper(U,
U->getType(), Visited, UnknownElemTypeI8);
930Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
931 User *U,
Type *OrigTy, std::unordered_set<Value *> &Visited,
932 bool UnknownElemTypeI8) {
941 if (!Visited.insert(U).second)
947 for (
unsigned i = 0; i <
U->getNumOperands(); ++i) {
949 assert(
Op &&
"Operands should not be null.");
950 Type *OpTy =
Op->getType();
954 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
961 Change |= Ty != OpTy;
969 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
970 Type *OpTy = ArrTy->getElementType();
974 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
981 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
987 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
988 Type *OpTy = VecTy->getElementType();
992 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
999 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
1009Type *SPIRVEmitIntrinsics::deduceElementType(
Value *
I,
bool UnknownElemTypeI8) {
1010 if (
Type *Ty = deduceElementTypeHelper(
I, UnknownElemTypeI8))
1012 if (!UnknownElemTypeI8)
1015 return IntegerType::getInt8Ty(
I->getContext());
1019 Value *PointerOperand) {
1025 return I->getType();
1033bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1035 Type *&KnownElemTy,
bool &Incomplete) {
1039 std::string DemangledName =
1041 if (DemangledName.length() > 0 &&
1043 const SPIRVSubtarget &
ST = TM->
getSubtarget<SPIRVSubtarget>(*CalledF);
1044 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1045 DemangledName,
ST.getPreferredInstructionSet());
1046 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1047 for (
unsigned i = 0, PtrCnt = 0; i < CI->
arg_size() && PtrCnt < 2; ++i) {
1053 KnownElemTy = ElemTy;
1054 Ops.push_back(std::make_pair(
Op, i));
1056 }
else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1063 case SPIRV::OpAtomicFAddEXT:
1064 case SPIRV::OpAtomicFMinEXT:
1065 case SPIRV::OpAtomicFMaxEXT:
1066 case SPIRV::OpAtomicLoad:
1067 case SPIRV::OpAtomicCompareExchangeWeak:
1068 case SPIRV::OpAtomicCompareExchange:
1069 case SPIRV::OpAtomicExchange:
1070 case SPIRV::OpAtomicIAdd:
1071 case SPIRV::OpAtomicISub:
1072 case SPIRV::OpAtomicOr:
1073 case SPIRV::OpAtomicXor:
1074 case SPIRV::OpAtomicAnd:
1075 case SPIRV::OpAtomicUMin:
1076 case SPIRV::OpAtomicUMax:
1077 case SPIRV::OpAtomicSMin:
1078 case SPIRV::OpAtomicSMax: {
1083 Incomplete = isTodoType(
Op);
1084 Ops.push_back(std::make_pair(
Op, 0));
1086 case SPIRV::OpAtomicStore: {
1095 Incomplete = isTodoType(
Op);
1096 Ops.push_back(std::make_pair(
Op, 0));
1105void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1107 Type *&KnownElemTy,
bool IsPostprocessing) {
1111 Ops.push_back(std::make_pair(
Op, std::numeric_limits<unsigned>::max()));
1112 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1113 bool IsNewFTy =
false, IsIncomplete =
false;
1116 Type *ArgTy = Arg->getType();
1121 if (isTodoType(Arg))
1122 IsIncomplete =
true;
1124 IsIncomplete =
true;
1127 ArgTy = FTy->getFunctionParamType(ParmIdx);
1131 Type *RetTy = FTy->getReturnType();
1138 IsIncomplete =
true;
1140 IsIncomplete =
true;
1143 if (!IsPostprocessing && IsIncomplete)
1146 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1149bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1150 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1151 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
1163 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(
I,
Op)};
1164 for (User *U :
F->users()) {
1172 propagateElemType(CI, PrevElemTy, VisitedSubst);
1182 for (Instruction *IncompleteRetI : *IncompleteRets)
1183 deduceOperandElementType(IncompleteRetI,
nullptr, AskOps,
1185 }
else if (IncompleteRets) {
1188 TypeValidated.insert(
I);
1196void SPIRVEmitIntrinsics::deduceOperandElementType(
1197 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1198 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing) {
1200 Type *KnownElemTy =
nullptr;
1201 bool Incomplete =
false;
1207 Incomplete = isTodoType(
I);
1208 for (
unsigned i = 0; i <
Ref->getNumIncomingValues(); i++) {
1211 Ops.push_back(std::make_pair(
Op, i));
1217 Incomplete = isTodoType(
I);
1218 Ops.push_back(std::make_pair(
Ref->getPointerOperand(), 0));
1225 Incomplete = isTodoType(
I);
1226 Ops.push_back(std::make_pair(
Ref->getOperand(0), 0));
1230 KnownElemTy =
Ref->getSourceElementType();
1231 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1236 KnownElemTy =
Ref->getBaseType();
1237 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1240 KnownElemTy =
I->getType();
1246 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1250 reconstructType(
Ref->getValueOperand(),
false, IsPostprocessing)))
1255 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1263 Incomplete = isTodoType(
Ref->getPointerOperand());
1264 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1272 Incomplete = isTodoType(
Ref->getPointerOperand());
1273 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1279 Incomplete = isTodoType(
I);
1280 for (
unsigned i = 0; i <
Ref->getNumOperands(); i++) {
1283 Ops.push_back(std::make_pair(
Op, i));
1291 if (deduceOperandElementTypeFunctionRet(
I, IncompleteRets, AskOps,
1292 IsPostprocessing, KnownElemTy,
Op,
1295 Incomplete = isTodoType(CurrF);
1296 Ops.push_back(std::make_pair(
Op, 0));
1302 bool Incomplete0 = isTodoType(Op0);
1303 bool Incomplete1 = isTodoType(Op1);
1305 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1307 : GR->findDeducedElementType(Op0);
1309 KnownElemTy = ElemTy0;
1310 Incomplete = Incomplete0;
1311 Ops.push_back(std::make_pair(Op1, 1));
1312 }
else if (ElemTy1) {
1313 KnownElemTy = ElemTy1;
1314 Incomplete = Incomplete1;
1315 Ops.push_back(std::make_pair(Op0, 0));
1319 deduceOperandElementTypeCalledFunction(CI,
Ops, KnownElemTy, Incomplete);
1320 else if (HaveFunPtrs)
1321 deduceOperandElementTypeFunctionPointer(CI,
Ops, KnownElemTy,
1326 if (!KnownElemTy ||
Ops.size() == 0)
1331 for (
auto &OpIt :
Ops) {
1335 Type *AskTy =
nullptr;
1336 CallInst *AskCI =
nullptr;
1337 if (IsPostprocessing && AskOps) {
1343 if (Ty == KnownElemTy)
1346 Type *OpTy =
Op->getType();
1347 if (
Op->hasUseList() &&
1354 else if (!IsPostprocessing)
1358 if (AssignCI ==
nullptr) {
1367 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1368 std::make_pair(
I,
Op)};
1369 propagateElemTypeRec(
Op, KnownElemTy, PrevElemTy, VisitedSubst);
1373 CallInst *PtrCastI =
1374 buildSpvPtrcast(
I->getParent()->getParent(),
Op, KnownElemTy);
1375 if (OpIt.second == std::numeric_limits<unsigned>::max())
1378 I->setOperand(OpIt.second, PtrCastI);
1381 TypeValidated.insert(
I);
1384void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1389 if (isAssignTypeInstr(U)) {
1390 B.SetInsertPoint(U);
1391 SmallVector<Value *, 2>
Args = {
New,
U->getOperand(1)};
1392 CallInst *AssignCI =
1393 B.CreateIntrinsic(Intrinsic::spv_assign_type, {
New->getType()},
Args);
1395 U->eraseFromParent();
1398 U->replaceUsesOfWith(Old, New);
1403 New->copyMetadata(*Old);
1407void SPIRVEmitIntrinsics::preprocessUndefs(
IRBuilder<> &
B) {
1408 std::queue<Instruction *> Worklist;
1412 while (!Worklist.empty()) {
1414 bool BPrepared =
false;
1417 for (
auto &
Op :
I->operands()) {
1419 if (!AggrUndef || !
Op->getType()->isAggregateType())
1426 auto *IntrUndef =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
1427 Worklist.push(IntrUndef);
1428 I->replaceUsesOfWith(
Op, IntrUndef);
1429 AggrConsts[IntrUndef] = AggrUndef;
1430 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1435void SPIRVEmitIntrinsics::preprocessCompositeConstants(
IRBuilder<> &
B) {
1436 std::queue<Instruction *> Worklist;
1440 while (!Worklist.empty()) {
1441 auto *
I = Worklist.front();
1444 bool KeepInst =
false;
1445 for (
const auto &
Op :
I->operands()) {
1447 Type *ResTy =
nullptr;
1450 ResTy = COp->getType();
1462 ResTy =
Op->getType()->isVectorTy() ? COp->getType() :
B.getInt32Ty();
1467 for (
unsigned i = 0; i < COp->getNumElements(); ++i)
1468 Args.push_back(COp->getElementAsConstant(i));
1472 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
1473 :
B.SetInsertPoint(
I);
1477 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {
Args});
1481 AggrConsts[CI] = AggrConst;
1482 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst,
false);
1494 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
1499 unsigned RoundingModeDeco,
1506 ConstantInt::get(
Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1515 MDNode *SaturatedConversionNode =
1517 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1524 if (Fu->isIntrinsic()) {
1525 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1526 switch (IntrinsicId) {
1527 case Intrinsic::fptosi_sat:
1528 case Intrinsic::fptoui_sat:
1547 MDString *ConstraintString =
MDString::get(Ctx,
IA->getConstraintString());
1555 B.SetInsertPoint(&
Call);
1556 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {
Args});
1561void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1564 if (!
RM.has_value())
1566 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1567 switch (
RM.value()) {
1571 case RoundingMode::NearestTiesToEven:
1572 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1574 case RoundingMode::TowardNegative:
1575 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1577 case RoundingMode::TowardPositive:
1578 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1580 case RoundingMode::TowardZero:
1581 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1583 case RoundingMode::Dynamic:
1584 case RoundingMode::NearestTiesToAway:
1588 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1594Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &
I) {
1598 B.SetInsertPoint(&
I);
1599 SmallVector<Value *, 4>
Args;
1601 Args.push_back(
I.getCondition());
1604 for (
auto &Case :
I.cases()) {
1605 Args.push_back(Case.getCaseValue());
1606 BBCases.
push_back(Case.getCaseSuccessor());
1609 CallInst *NewI =
B.CreateIntrinsic(Intrinsic::spv_switch,
1610 {
I.getOperand(0)->getType()}, {
Args});
1614 I.eraseFromParent();
1617 B.SetInsertPoint(ParentBB);
1618 IndirectBrInst *BrI =
B.CreateIndirectBr(
1621 for (BasicBlock *BBCase : BBCases)
1627 if (
GEP->getNumIndices() == 0)
1630 return CI->getZExtValue() == 0;
1635Instruction *SPIRVEmitIntrinsics::visitIntrinsicInst(IntrinsicInst &
I) {
1641 B.SetInsertPoint(&
I);
1643 SmallVector<Value *, 4>
Args;
1644 Args.push_back(
B.getInt1(
true));
1645 Args.push_back(
I.getOperand(0));
1646 Args.push_back(
B.getInt32(0));
1647 for (
unsigned J = 0; J < SGEP->getNumIndices(); ++J)
1648 Args.push_back(SGEP->getIndexOperand(J));
1650 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, Types, Args);
1651 replaceAllUsesWithAndErase(
B, &
I, NewI);
1655Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &
I) {
1657 B.SetInsertPoint(&
I);
1665 if (
I.getSourceElementType() ==
1666 IntegerType::getInt8Ty(CurrF->
getContext())) {
1667 return buildLogicalAccessChainFromGEP(
I);
1672 Value *PtrOp =
I.getPointerOperand();
1673 Type *SrcElemTy =
I.getSourceElementType();
1674 Type *DeducedPointeeTy = deduceElementType(PtrOp,
true);
1677 if (ArrTy->getElementType() == SrcElemTy) {
1679 Type *FirstIdxType =
I.getOperand(1)->getType();
1680 NewIndices.
push_back(ConstantInt::get(FirstIdxType, 0));
1681 for (
Value *Idx :
I.indices())
1685 SmallVector<Value *, 4>
Args;
1686 Args.push_back(
B.getInt1(
I.isInBounds()));
1687 Args.push_back(
I.getPointerOperand());
1690 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1691 replaceAllUsesWithAndErase(
B, &
I, NewI);
1698 SmallVector<Value *, 4>
Args;
1699 Args.push_back(
B.getInt1(
I.isInBounds()));
1701 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1702 replaceAllUsesWithAndErase(
B, &
I, NewI);
1706Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &
I) {
1708 B.SetInsertPoint(&
I);
1717 I.eraseFromParent();
1723 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_bitcast, {
Types}, {
Args});
1724 replaceAllUsesWithAndErase(
B, &
I, NewI);
1728void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1730 Type *VTy =
V->getType();
1735 if (ElemTy != AssignedType)
1748 if (CurrentType == AssignedType)
1755 " for value " +
V->getName(),
1763void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1764 Instruction *
I,
Value *Pointer,
Type *ExpectedElementType,
1766 TypeValidated.insert(
I);
1769 Type *PointerElemTy = deduceElementTypeHelper(Pointer,
false);
1770 if (PointerElemTy == ExpectedElementType ||
1776 MetadataAsValue *VMD =
buildMD(ExpectedElementVal);
1778 bool FirstPtrCastOrAssignPtrType =
true;
1784 for (
auto User :
Pointer->users()) {
1787 (
II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1788 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1789 II->getOperand(0) != Pointer)
1794 FirstPtrCastOrAssignPtrType =
false;
1795 if (
II->getOperand(1) != VMD ||
1802 if (
II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1807 if (
II->getParent() !=
I->getParent())
1810 I->setOperand(OperandToReplace,
II);
1816 if (FirstPtrCastOrAssignPtrType) {
1821 }
else if (isTodoType(Pointer)) {
1822 eraseTodoType(Pointer);
1829 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1830 std::make_pair(
I, Pointer)};
1832 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1844 auto *PtrCastI =
B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
1850void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *
I,
1855 replacePointerOperandWithPtrCast(
1856 I,
SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->
getContext()),
1862 Type *OpTy =
Op->getType();
1865 if (OpTy ==
Op->getType())
1866 OpTy = deduceElementTypeByValueDeep(OpTy,
Op,
false);
1867 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 1,
B);
1872 Type *OpTy = LI->getType();
1877 Type *NewOpTy = OpTy;
1878 OpTy = deduceElementTypeByValueDeep(OpTy, LI,
false);
1879 if (OpTy == NewOpTy)
1880 insertTodoType(Pointer);
1883 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1888 Type *OpTy =
nullptr;
1900 OpTy = GEPI->getSourceElementType();
1902 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1904 insertTodoType(Pointer);
1916 std::string DemangledName =
1920 bool HaveTypes =
false;
1938 for (User *U : CalledArg->
users()) {
1940 if ((ElemTy = deduceElementTypeHelper(Inst,
false)) !=
nullptr)
1946 HaveTypes |= ElemTy !=
nullptr;
1951 if (DemangledName.empty() && !HaveTypes)
1969 Type *ExpectedType =
1971 if (!ExpectedType && !DemangledName.empty())
1972 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1973 DemangledName,
OpIdx,
I->getContext());
1974 if (!ExpectedType || ExpectedType->
isVoidTy())
1982 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType,
OpIdx,
B);
1986Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &
I) {
1993 I.getOperand(1)->getType(),
1994 I.getOperand(2)->getType()};
1996 B.SetInsertPoint(&
I);
1998 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_insertelt, {
Types}, {
Args});
1999 replaceAllUsesWithAndErase(
B, &
I, NewI);
2004SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &
I) {
2011 B.SetInsertPoint(&
I);
2013 I.getIndexOperand()->getType()};
2014 SmallVector<Value *, 2>
Args = {
I.getVectorOperand(),
I.getIndexOperand()};
2015 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_extractelt, {
Types}, {
Args});
2016 replaceAllUsesWithAndErase(
B, &
I, NewI);
2020Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &
I) {
2022 B.SetInsertPoint(&
I);
2025 Value *AggregateOp =
I.getAggregateOperand();
2029 Args.push_back(AggregateOp);
2030 Args.push_back(
I.getInsertedValueOperand());
2031 for (
auto &
Op :
I.indices())
2032 Args.push_back(
B.getInt32(
Op));
2034 B.CreateIntrinsic(Intrinsic::spv_insertv, {
Types}, {
Args});
2035 replaceMemInstrUses(&
I, NewI,
B);
2039Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &
I) {
2040 if (
I.getAggregateOperand()->getType()->isAggregateType())
2043 B.SetInsertPoint(&
I);
2045 for (
auto &
Op :
I.indices())
2046 Args.push_back(
B.getInt32(
Op));
2048 B.CreateIntrinsic(Intrinsic::spv_extractv, {
I.getType()}, {
Args});
2049 replaceAllUsesWithAndErase(
B, &
I, NewI);
2053Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &
I) {
2054 if (!
I.getType()->isAggregateType())
2057 B.SetInsertPoint(&
I);
2058 TrackConstants =
false;
2063 B.CreateIntrinsic(Intrinsic::spv_load, {
I.getOperand(0)->getType()},
2064 {
I.getPointerOperand(),
B.getInt16(Flags),
2065 B.getInt32(
I.getAlign().value())});
2066 replaceMemInstrUses(&
I, NewI,
B);
2070Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &
I) {
2074 B.SetInsertPoint(&
I);
2075 TrackConstants =
false;
2079 auto *PtrOp =
I.getPointerOperand();
2081 if (
I.getValueOperand()->getType()->isAggregateType()) {
2089 "Unexpected argument of aggregate type, should be spv_extractv!");
2093 auto *NewI =
B.CreateIntrinsic(
2094 Intrinsic::spv_store, {
I.getValueOperand()->getType(), PtrOp->
getType()},
2095 {
I.getValueOperand(), PtrOp,
B.getInt16(Flags),
2096 B.getInt32(
I.getAlign().value())});
2098 I.eraseFromParent();
2102Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &
I) {
2103 Value *ArraySize =
nullptr;
2104 if (
I.isArrayAllocation()) {
2107 SPIRV::Extension::SPV_INTEL_variable_length_array))
2109 "array allocation: this instruction requires the following "
2110 "SPIR-V extension: SPV_INTEL_variable_length_array",
2112 ArraySize =
I.getArraySize();
2115 B.SetInsertPoint(&
I);
2116 TrackConstants =
false;
2117 Type *PtrTy =
I.getType();
2120 ?
B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2121 {PtrTy, ArraySize->
getType()},
2122 {ArraySize,
B.getInt32(
I.getAlign().value())})
2123 :
B.CreateIntrinsic(
Intrinsic::spv_alloca, {PtrTy},
2124 {
B.getInt32(
I.getAlign().value())});
2125 replaceAllUsesWithAndErase(
B, &
I, NewI);
2129Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2130 assert(
I.getType()->isAggregateType() &&
"Aggregate result is expected");
2132 B.SetInsertPoint(&
I);
2134 Args.push_back(
B.getInt32(
2135 static_cast<uint32_t
>(
getMemScope(
I.getContext(),
I.getSyncScopeID()))));
2136 Args.push_back(
B.getInt32(
2138 Args.push_back(
B.getInt32(
2140 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2141 {
I.getPointerOperand()->getType()}, {
Args});
2142 replaceMemInstrUses(&
I, NewI,
B);
2146Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &
I) {
2148 B.SetInsertPoint(&
I);
2149 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2153void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2156 static const StringSet<> ArtificialGlobals{
"llvm.global.annotations",
2157 "llvm.compiler.used",
"llvm.used"};
2167 deduceElementTypeHelper(&GV,
false);
2171 auto *InitInst =
B.CreateIntrinsic(Intrinsic::spv_init_global,
2173 InitInst->setArgOperand(1, Init);
2176 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.
getType(), &GV);
2182bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *
I,
2184 bool UnknownElemTypeI8) {
2190 if (
Type *ElemTy = deduceElementType(
I, UnknownElemTypeI8)) {
2197void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *
I,
2200 static StringMap<unsigned> ResTypeWellKnown = {
2201 {
"async_work_group_copy", WellKnownTypes::Event},
2202 {
"async_work_group_strided_copy", WellKnownTypes::Event},
2203 {
"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2207 bool IsKnown =
false;
2212 std::string DemangledName =
2215 if (DemangledName.length() > 0)
2217 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2218 auto ResIt = ResTypeWellKnown.
find(DemangledName);
2219 if (ResIt != ResTypeWellKnown.
end()) {
2222 switch (ResIt->second) {
2223 case WellKnownTypes::Event:
2230 switch (DecorationId) {
2233 case FPDecorationId::SAT:
2236 case FPDecorationId::RTE:
2238 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE,
B);
2240 case FPDecorationId::RTZ:
2242 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ,
B);
2244 case FPDecorationId::RTP:
2246 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP,
B);
2248 case FPDecorationId::RTN:
2250 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN,
B);
2256 Type *Ty =
I->getType();
2259 Type *TypeToAssign = Ty;
2261 if (
II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2262 II->getIntrinsicID() == Intrinsic::spv_undef) {
2263 auto It = AggrConstTypes.
find(
II);
2264 if (It == AggrConstTypes.
end())
2266 TypeToAssign = It->second;
2272 for (
const auto &
Op :
I->operands()) {
2279 Type *OpTy =
Op->getType();
2281 CallInst *AssignCI =
2286 Type *OpTy =
Op->getType();
2301 CallInst *AssignCI =
2311bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2312 Instruction *Inst) {
2314 if (!STI->
canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2325 case Intrinsic::spv_load:
2326 case Intrinsic::spv_store:
2333 const std::string
Prefix =
"__spirv_Atomic";
2334 const bool IsAtomic =
Name.find(Prefix) == 0;
2342void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *
I,
2344 if (MDNode *MD =
I->getMetadata(
"spirv.Decorations")) {
2346 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
2351 auto processMemAliasingDecoration = [&](
unsigned Kind) {
2352 if (MDNode *AliasListMD =
I->getMetadata(Kind)) {
2353 if (shouldTryToAddMemAliasingDecoration(
I)) {
2354 uint32_t Dec =
Kind == LLVMContext::MD_alias_scope
2355 ? SPIRV::Decoration::AliasScopeINTEL
2356 : SPIRV::Decoration::NoAliasINTEL;
2358 I, ConstantInt::get(
B.getInt32Ty(), Dec),
2361 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2362 {
I->getType()}, {
Args});
2366 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2367 processMemAliasingDecoration(LLVMContext::MD_noalias);
2370 if (MDNode *MD =
I->getMetadata(LLVMContext::MD_fpmath)) {
2372 bool AllowFPMaxError =
2374 if (!AllowFPMaxError)
2378 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2387 &FPFastMathDefaultInfoMap,
2389 auto it = FPFastMathDefaultInfoMap.
find(
F);
2390 if (it != FPFastMathDefaultInfoMap.
end())
2398 SPIRV::FPFastMathMode::None);
2400 SPIRV::FPFastMathMode::None);
2402 SPIRV::FPFastMathMode::None);
2403 return FPFastMathDefaultInfoMap[
F] = std::move(FPFastMathDefaultInfoVec);
2409 size_t BitWidth = Ty->getScalarSizeInBits();
2413 assert(Index >= 0 && Index < 3 &&
2414 "Expected FPFastMathDefaultInfo for half, float, or double");
2415 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2416 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2417 return FPFastMathDefaultInfoVec[Index];
2420void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(
Module &M) {
2422 if (!
ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2431 auto Node =
M.getNamedMetadata(
"spirv.ExecutionMode");
2433 if (!
M.getNamedMetadata(
"opencl.enable.FP_CONTRACT")) {
2441 ConstantInt::get(Type::getInt32Ty(
M.getContext()), 0);
2444 [[maybe_unused]] GlobalVariable *GV =
2445 new GlobalVariable(M,
2446 Type::getInt32Ty(
M.getContext()),
2460 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2461 FPFastMathDefaultInfoMap;
2463 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2472 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2474 "Expected 4 operands for FPFastMathDefault");
2480 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2482 SPIRV::FPFastMathDefaultInfo &
Info =
2485 Info.FPFastMathDefault =
true;
2486 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2488 "Expected no operands for ContractionOff");
2492 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2494 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2495 Info.ContractionOff =
true;
2497 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2499 "Expected 1 operand for SignedZeroInfNanPreserve");
2500 unsigned TargetWidth =
2505 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2509 assert(Index >= 0 && Index < 3 &&
2510 "Expected FPFastMathDefaultInfo for half, float, or double");
2511 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2512 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2513 FPFastMathDefaultInfoVec[
Index].SignedZeroInfNanPreserve =
true;
2517 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2518 for (
auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2519 if (FPFastMathDefaultInfoVec.
empty())
2522 for (
const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2523 assert(
Info.Ty &&
"Expected target type for FPFastMathDefaultInfo");
2526 if (Flags == SPIRV::FPFastMathMode::None && !
Info.ContractionOff &&
2527 !
Info.SignedZeroInfNanPreserve && !
Info.FPFastMathDefault)
2531 if (
Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2533 "and AllowContract");
2535 if (
Info.SignedZeroInfNanPreserve &&
2537 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2538 SPIRV::FPFastMathMode::NSZ))) {
2539 if (
Info.FPFastMathDefault)
2541 "SignedZeroInfNanPreserve but at least one of "
2542 "NotNaN/NotInf/NSZ is enabled.");
2545 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2546 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2547 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2549 "AllowTransform requires AllowReassoc and "
2550 "AllowContract to be set.");
2553 auto it = GlobalVars.find(Flags);
2554 GlobalVariable *GV =
nullptr;
2555 if (it != GlobalVars.end()) {
2561 ConstantInt::get(Type::getInt32Ty(
M.getContext()), Flags);
2564 GV =
new GlobalVariable(M,
2565 Type::getInt32Ty(
M.getContext()),
2570 GlobalVars[
Flags] = GV;
2576void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *
I,
2579 bool IsConstComposite =
2580 II &&
II->getIntrinsicID() == Intrinsic::spv_const_composite;
2581 if (IsConstComposite && TrackConstants) {
2583 auto t = AggrConsts.
find(
I);
2587 {
II->getType(),
II->getType()}, t->second,
I, {},
B);
2589 NewOp->setArgOperand(0,
I);
2592 for (
const auto &
Op :
I->operands()) {
2596 unsigned OpNo =
Op.getOperandNo();
2597 if (
II && ((
II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2598 (
II->paramHasAttr(OpNo, Attribute::ImmArg))))
2602 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
2603 :
B.SetInsertPoint(
I);
2606 Type *OpTy =
Op->getType();
2614 {OpTy, OpTyVal->
getType()},
Op, OpTyVal, {},
B);
2616 if (!IsConstComposite &&
isPointerTy(OpTy) && OpElemTy !=
nullptr &&
2617 OpElemTy != IntegerType::getInt8Ty(
I->getContext())) {
2619 SmallVector<Value *, 2>
Args = {
2622 CallInst *PtrCasted =
2623 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
2628 I->setOperand(OpNo, NewOp);
2630 if (Named.insert(
I).second)
2634Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *
F,
2636 std::unordered_set<Function *> FVisited;
2637 return deduceFunParamElementType(
F,
OpIdx, FVisited);
2640Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2641 Function *
F,
unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2643 if (!FVisited.insert(
F).second)
2646 std::unordered_set<Value *> Visited;
2649 for (User *U :
F->users()) {
2661 if (
Type *Ty = deduceElementTypeHelper(OpArg, Visited,
false))
2664 for (User *OpU : OpArg->
users()) {
2666 if (!Inst || Inst == CI)
2669 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited,
false))
2676 if (FVisited.find(OuterF) != FVisited.end())
2678 for (
unsigned i = 0; i < OuterF->
arg_size(); ++i) {
2679 if (OuterF->
getArg(i) == OpArg) {
2680 Lookup.push_back(std::make_pair(OuterF, i));
2687 for (
auto &Pair :
Lookup) {
2688 if (
Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2695void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *
F,
2697 B.SetInsertPointPastAllocas(
F);
2711 for (User *U :
F->users()) {
2727 for (User *U : Arg->
users()) {
2731 CI->
getParent()->getParent() == CurrF) {
2733 deduceOperandElementTypeFunctionPointer(CI,
Ops, ElemTy,
false);
2744void SPIRVEmitIntrinsics::processParamTypes(Function *
F,
IRBuilder<> &
B) {
2745 B.SetInsertPointPastAllocas(
F);
2751 if (!ElemTy && (ElemTy = deduceFunParamElementType(
F,
OpIdx)) !=
nullptr) {
2753 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2755 propagateElemType(Arg, IntegerType::getInt8Ty(
F->getContext()),
2767 bool IsNewFTy =
false;
2783bool SPIRVEmitIntrinsics::processFunctionPointers(
Module &M) {
2786 if (
F.isIntrinsic())
2788 if (
F.isDeclaration()) {
2789 for (User *U :
F.users()) {
2802 for (User *U :
F.users()) {
2804 if (!
II ||
II->arg_size() != 3 ||
II->getOperand(0) != &
F)
2806 if (
II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2807 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2814 if (Worklist.
empty())
2820 "cannot allocate a name for the internal service function");
2821 LLVMContext &Ctx =
M.getContext();
2829 for (Function *
F : Worklist) {
2831 for (
const auto &Arg :
F->args())
2833 IRB.CreateCall(
F, Args);
2835 IRB.CreateRetVoid();
2841void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(
IRBuilder<> &
B) {
2842 DenseMap<Function *, CallInst *> Ptrcasts;
2843 for (
auto It : FDeclPtrTys) {
2845 for (
auto *U :
F->users()) {
2850 for (
auto [Idx, ElemTy] : It.second) {
2858 B.SetInsertPointPastAllocas(Arg->
getParent());
2862 }
else if (isaGEP(Param)) {
2863 replaceUsesOfWithSpvPtrcast(Param,
normalizeType(ElemTy), CI,
2872 .getFirstNonPHIOrDbgOrAlloca());
2893SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP) {
2900 Type *SrcTy =
GEP->getSourceElementType();
2901 SmallVector<Value *, 8> Indices(
GEP->indices());
2903 if (ArrTy && ArrTy->getNumElements() == 0 &&
2905 Indices.erase(Indices.begin());
2906 SrcTy = ArrTy->getElementType();
2908 GEP->getNoWrapFlags(),
"",
2909 GEP->getIterator());
2914void SPIRVEmitIntrinsics::emitUnstructuredLoopControls(Function &
F,
2920 if (!
ST->canUseExtension(
2921 SPIRV::Extension::SPV_INTEL_unstructured_loop_controls))
2924 for (BasicBlock &BB :
F) {
2926 MDNode *LoopMD =
Term->getMetadata(LLVMContext::MD_loop);
2932 unsigned LC =
Ops[0];
2933 if (LC == SPIRV::LoopControl::None)
2937 B.SetInsertPoint(Term);
2938 SmallVector<Value *, 4> IntrArgs;
2940 for (
unsigned I = 1;
I <
Ops.size(); ++
I)
2942 B.CreateIntrinsic(Intrinsic::spv_loop_control_intel, IntrArgs);
2946bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2947 if (
Func.isDeclaration())
2951 GR =
ST.getSPIRVGlobalRegistry();
2955 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2960 AggrConstTypes.
clear();
2965 SmallPtrSet<Instruction *, 4> DeadInsts;
2970 if ((!
GEP && !SGEP) || GR->findDeducedElementType(&
I))
2974 GR->addDeducedElementType(SGEP,
2979 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(
GEP);
2981 GEP->replaceAllUsesWith(NewGEP);
2985 if (
Type *GepTy = getGEPType(
GEP))
2989 for (
auto *
I : DeadInsts) {
2990 assert(
I->use_empty() &&
"Dead instruction should not have any uses left");
2991 I->eraseFromParent();
2994 processParamTypesByFunHeader(CurrF,
B);
3003 Type *ElTy =
SI->getValueOperand()->getType();
3008 B.SetInsertPoint(&
Func.getEntryBlock(),
Func.getEntryBlock().begin());
3009 for (
auto &GV :
Func.getParent()->globals())
3010 processGlobalValue(GV,
B);
3012 preprocessUndefs(
B);
3013 preprocessCompositeConstants(
B);
3017 applyDemangledPtrArgTypes(
B);
3020 for (
auto &
I : Worklist) {
3022 if (isConvergenceIntrinsic(
I))
3025 bool Postpone = insertAssignPtrTypeIntrs(
I,
B,
false);
3027 insertAssignTypeIntrs(
I,
B);
3028 insertPtrCastOrAssignTypeInstr(
I,
B);
3032 if (Postpone && !GR->findAssignPtrTypeInstr(
I))
3033 insertAssignPtrTypeIntrs(
I,
B,
true);
3036 useRoundingMode(FPI,
B);
3041 SmallPtrSet<Instruction *, 4> IncompleteRets;
3043 deduceOperandElementType(&
I, &IncompleteRets);
3047 for (BasicBlock &BB : Func)
3048 for (PHINode &Phi : BB.
phis())
3050 deduceOperandElementType(&Phi,
nullptr);
3052 for (
auto *
I : Worklist) {
3053 TrackConstants =
true;
3063 if (isConvergenceIntrinsic(
I))
3067 processInstrAfterVisit(
I,
B);
3070 emitUnstructuredLoopControls(Func,
B);
3076bool SPIRVEmitIntrinsics::postprocessTypes(
Module &M) {
3077 if (!GR || TodoTypeSz == 0)
3080 unsigned SzTodo = TodoTypeSz;
3081 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
3086 CallInst *AssignCI = GR->findAssignPtrTypeInstr(
Op);
3087 Type *KnownTy = GR->findDeducedElementType(
Op);
3088 if (!KnownTy || !AssignCI)
3094 std::unordered_set<Value *> Visited;
3095 if (
Type *ElemTy = deduceElementTypeHelper(
Op, Visited,
false,
true)) {
3096 if (ElemTy != KnownTy) {
3097 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3098 propagateElemType(CI, ElemTy, VisitedSubst);
3105 if (
Op->hasUseList()) {
3106 for (User *U :
Op->users()) {
3113 if (TodoTypeSz == 0)
3118 SmallPtrSet<Instruction *, 4> IncompleteRets;
3120 auto It = ToProcess.
find(&
I);
3121 if (It == ToProcess.
end())
3123 It->second.remove_if([
this](
Value *V) {
return !isTodoType(V); });
3124 if (It->second.size() == 0)
3126 deduceOperandElementType(&
I, &IncompleteRets, &It->second,
true);
3127 if (TodoTypeSz == 0)
3132 return SzTodo > TodoTypeSz;
3136void SPIRVEmitIntrinsics::parseFunDeclarations(
Module &M) {
3138 if (!
F.isDeclaration() ||
F.isIntrinsic())
3142 if (DemangledName.empty())
3146 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3147 DemangledName,
ST.getPreferredInstructionSet());
3148 if (Opcode != SPIRV::OpGroupAsyncCopy)
3151 SmallVector<unsigned> Idxs;
3160 LLVMContext &Ctx =
F.getContext();
3162 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3163 if (!TypeStrs.
size())
3166 for (
unsigned Idx : Idxs) {
3167 if (Idx >= TypeStrs.
size())
3170 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3173 FDeclPtrTys[&
F].push_back(std::make_pair(Idx, ElemTy));
3178bool SPIRVEmitIntrinsics::runOnModule(
Module &M) {
3181 parseFunDeclarations(M);
3182 insertConstantsForFPFastMathDefault(M);
3192 if (!
F.isDeclaration() && !
F.isIntrinsic()) {
3194 processParamTypes(&
F,
B);
3198 CanTodoType =
false;
3199 Changed |= postprocessTypes(M);
3202 Changed |= processFunctionPointers(M);
3208 return new SPIRVEmitIntrinsics(TM);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static Type * getPointeeType(Value *Ptr, const DataLayout &DL)
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static cl::opt< bool > SpirvEmitOpNames("spirv-emit-op-names", cl::desc("Emit OpName for all instructions"), cl::init(false))
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
StringSet - A set-like wrapper for the StringMap.
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
const Function * getParent() const
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
Argument * getArg(unsigned i) const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ InternalLinkage
Rename collisions when linking (static functions).
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
static unsigned getPointerOperandIndex()
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Flags
Flags values. These may be or'd together.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
A Module instance is used to store all the information related to an LLVM module.
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator find(StringRef Key)
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
bool contains(StringRef key) const
Check if the set contains the given key.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
static unsigned getPointerOperandIndex()
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
bool isTargetExtTy() const
Return true if this is a target extension type.
bool isAggregateType() const
Return true if the type is an aggregate type.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
void setOperand(unsigned i, Value *Val)
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
iterator_range< user_iterator > users()
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
NodeAddr< NodeBase * > Node
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool getVacantFunctionName(Module &M, std::string &Name)
FunctionAddr VTableAddr Value
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD)
auto reverse(ContainerTy &&C)
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
bool isPointerTy(const Type *T)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
bool hasPointeeTypeAttr(Argument *Arg)
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool hasInitializer(const GlobalVariable *GV)
Type * normalizeType(Type *Ty)
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
PoisonValue * getNormalizedPoisonValue(Type *Ty)
bool isUntypedPointerTy(const Type *T)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)