24#include "llvm/IR/IntrinsicsSPIRV.h"
32#include <unordered_set>
56 cl::desc(
"Emit OpName for all instructions"),
60#define GET_BuiltinGroup_DECL
61#include "SPIRVGenTables.inc"
66class SPIRVEmitIntrinsics
68 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
69 SPIRVTargetMachine *TM =
nullptr;
70 SPIRVGlobalRegistry *GR =
nullptr;
72 bool TrackConstants =
true;
73 bool HaveFunPtrs =
false;
74 DenseMap<Instruction *, Constant *> AggrConsts;
75 DenseMap<Instruction *, Type *> AggrConstTypes;
76 DenseSet<Instruction *> AggrStores;
77 std::unordered_set<Value *> Named;
80 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
83 bool CanTodoType =
true;
84 unsigned TodoTypeSz = 0;
85 DenseMap<Value *, bool> TodoType;
86 void insertTodoType(
Value *
Op) {
89 auto It = TodoType.try_emplace(
Op,
true);
95 auto It = TodoType.find(
Op);
96 if (It != TodoType.end() && It->second) {
104 auto It = TodoType.find(
Op);
105 return It != TodoType.end() && It->second;
109 std::unordered_set<Instruction *> TypeValidated;
112 enum WellKnownTypes { Event };
115 Type *deduceElementType(
Value *
I,
bool UnknownElemTypeI8);
116 Type *deduceElementTypeHelper(
Value *
I,
bool UnknownElemTypeI8);
117 Type *deduceElementTypeHelper(
Value *
I, std::unordered_set<Value *> &Visited,
118 bool UnknownElemTypeI8,
119 bool IgnoreKnownType =
false);
120 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
121 bool UnknownElemTypeI8);
122 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
123 std::unordered_set<Value *> &Visited,
124 bool UnknownElemTypeI8);
126 std::unordered_set<Value *> &Visited,
127 bool UnknownElemTypeI8);
129 bool UnknownElemTypeI8);
132 Type *deduceNestedTypeHelper(
User *
U,
bool UnknownElemTypeI8);
134 std::unordered_set<Value *> &Visited,
135 bool UnknownElemTypeI8);
139 SmallPtrSet<Instruction *, 4> *IncompleteRets,
140 const SmallPtrSet<Value *, 4> *AskOps =
nullptr,
141 bool IsPostprocessing =
false);
146 Type *reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
147 bool IsPostprocessing);
152 bool UnknownElemTypeI8);
154 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType,
Value *
V,
157 Type *ExpectedElementType,
158 unsigned OperandToReplace,
161 bool shouldTryToAddMemAliasingDecoration(
Instruction *Inst);
163 void insertConstantsForFPFastMathDefault(
Module &
M);
164 void processGlobalValue(GlobalVariable &GV,
IRBuilder<> &
B);
169 std::unordered_set<Function *> &FVisited);
171 bool deduceOperandElementTypeCalledFunction(
173 Type *&KnownElemTy,
bool &Incomplete);
174 void deduceOperandElementTypeFunctionPointer(
176 Type *&KnownElemTy,
bool IsPostprocessing);
177 bool deduceOperandElementTypeFunctionRet(
178 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
179 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
184 DenseMap<Function *, CallInst *> Ptrcasts);
186 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
189 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
190 void propagateElemTypeRec(
Value *
Op,
Type *PtrElemTy,
Type *CastElemTy,
191 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
192 std::unordered_set<Value *> &Visited,
193 DenseMap<Function *, CallInst *> Ptrcasts);
201 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP);
204 bool postprocessTypes(
Module &
M);
205 bool processFunctionPointers(
Module &
M);
206 void parseFunDeclarations(
Module &
M);
208 void useRoundingMode(ConstrainedFPIntrinsic *FPI,
IRBuilder<> &
B);
224 bool walkLogicalAccessChain(
225 GetElementPtrInst &
GEP,
226 const std::function<
void(
Type *PointedType, uint64_t
Index)>
235 Type *getGEPType(GetElementPtrInst *
GEP);
242 Type *getGEPTypeLogical(GetElementPtrInst *
GEP);
244 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP);
248 SPIRVEmitIntrinsics(SPIRVTargetMachine *TM =
nullptr)
249 : ModulePass(ID), TM(TM) {}
252 Instruction *visitGetElementPtrInst(GetElementPtrInst &
I);
254 Instruction *visitInsertElementInst(InsertElementInst &
I);
255 Instruction *visitExtractElementInst(ExtractElementInst &
I);
257 Instruction *visitExtractValueInst(ExtractValueInst &
I);
261 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I);
265 StringRef getPassName()
const override {
return "SPIRV emit intrinsics"; }
267 bool runOnModule(
Module &
M)
override;
269 void getAnalysisUsage(AnalysisUsage &AU)
const override {
270 ModulePass::getAnalysisUsage(AU);
279 return II->getIntrinsicID() == Intrinsic::experimental_convergence_entry ||
280 II->getIntrinsicID() == Intrinsic::experimental_convergence_loop ||
281 II->getIntrinsicID() == Intrinsic::experimental_convergence_anchor;
284bool expectIgnoredInIRTranslation(
const Instruction *
I) {
288 switch (
II->getIntrinsicID()) {
289 case Intrinsic::invariant_start:
290 case Intrinsic::spv_resource_handlefrombinding:
291 case Intrinsic::spv_resource_getpointer:
301 if (
II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
302 Value *V =
II->getArgOperand(0);
303 return getPointerRoot(V);
311char SPIRVEmitIntrinsics::ID = 0;
334 B.SetInsertPoint(
I->getParent()->getFirstNonPHIOrDbgOrAlloca());
340 B.SetCurrentDebugLocation(
I->getDebugLoc());
341 if (
I->getType()->isVoidTy())
342 B.SetInsertPoint(
I->getNextNode());
344 B.SetInsertPoint(*
I->getInsertionPointAfterDef());
349 switch (Intr->getIntrinsicID()) {
350 case Intrinsic::invariant_start:
351 case Intrinsic::invariant_end:
359 if (
I->getType()->isTokenTy())
361 "does not support token type",
366 if (!
I->hasName() ||
I->getType()->isAggregateType() ||
367 expectIgnoredInIRTranslation(
I))
378 if (
F &&
F->getName().starts_with(
"llvm.spv.alloca"))
389 std::vector<Value *> Args = {
392 B.CreateIntrinsic(Intrinsic::spv_assign_name, {
I->getType()}, Args);
395void SPIRVEmitIntrinsics::replaceAllUsesWith(
Value *Src,
Value *Dest,
399 if (isTodoType(Src)) {
402 insertTodoType(Dest);
406void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(
IRBuilder<> &
B,
411 std::string
Name = Src->hasName() ? Src->getName().str() :
"";
412 Src->eraseFromParent();
415 if (Named.insert(Dest).second)
440Type *SPIRVEmitIntrinsics::reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
441 bool IsPostprocessing) {
456 if (UnknownElemTypeI8) {
457 if (!IsPostprocessing)
465CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *
F,
Value *
Op,
473 B.SetInsertPointPastAllocas(OpA->getParent());
476 B.SetInsertPoint(
F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
478 Type *OpTy =
Op->getType();
482 CallInst *PtrCasted =
483 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
488void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
490 DenseMap<Function *, CallInst *> Ptrcasts) {
492 CallInst *PtrCastedI =
nullptr;
493 auto It = Ptrcasts.
find(
F);
494 if (It == Ptrcasts.
end()) {
495 PtrCastedI = buildSpvPtrcast(
F,
Op, ElemTy);
496 Ptrcasts[
F] = PtrCastedI;
498 PtrCastedI = It->second;
500 I->replaceUsesOfWith(
Op, PtrCastedI);
503void SPIRVEmitIntrinsics::propagateElemType(
505 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
506 DenseMap<Function *, CallInst *> Ptrcasts;
508 for (
auto *U :
Users) {
511 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
517 TypeValidated.find(UI) != TypeValidated.end())
518 replaceUsesOfWithSpvPtrcast(
Op, ElemTy, UI, Ptrcasts);
522void SPIRVEmitIntrinsics::propagateElemTypeRec(
524 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
525 std::unordered_set<Value *> Visited;
526 DenseMap<Function *, CallInst *> Ptrcasts;
527 propagateElemTypeRec(
Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
528 std::move(Ptrcasts));
531void SPIRVEmitIntrinsics::propagateElemTypeRec(
533 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
534 std::unordered_set<Value *> &Visited,
535 DenseMap<Function *, CallInst *> Ptrcasts) {
536 if (!Visited.insert(
Op).second)
539 for (
auto *U :
Users) {
542 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
548 TypeValidated.find(UI) != TypeValidated.end())
549 replaceUsesOfWithSpvPtrcast(
Op, CastElemTy, UI, Ptrcasts);
557SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
558 bool UnknownElemTypeI8) {
559 std::unordered_set<Value *> Visited;
560 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
564Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
565 Type *ValueTy,
Value *Operand, std::unordered_set<Value *> &Visited,
566 bool UnknownElemTypeI8) {
571 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
582Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
583 Value *
Op, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8) {
595 for (User *OpU :
Op->users()) {
597 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
610 if ((DemangledName.
starts_with(
"__spirv_ocl_printf(") ||
619Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
Value *
I,
620 bool UnknownElemTypeI8) {
621 std::unordered_set<Value *> Visited;
622 return deduceElementTypeHelper(
I, Visited, UnknownElemTypeI8);
625void SPIRVEmitIntrinsics::maybeAssignPtrType(
Type *&Ty,
Value *
Op,
Type *RefTy,
626 bool UnknownElemTypeI8) {
628 if (!UnknownElemTypeI8)
635bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
636 GetElementPtrInst &
GEP,
637 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
638 const std::function<
void(
Type *,
Value *)> &OnDynamicIndexing) {
646 Value *Src = getPointerRoot(
GEP.getPointerOperand());
647 Type *CurType = deduceElementType(Src,
true);
656 OnDynamicIndexing(AT->getElementType(), Operand);
657 return AT ==
nullptr;
665 uint32_t EltTypeSize =
DL.getTypeSizeInBits(AT->getElementType()) / 8;
669 CurType = AT->getElementType();
670 OnLiteralIndexing(CurType, Index);
672 uint32_t StructSize =
DL.getTypeSizeInBits(ST) / 8;
675 const auto &STL =
DL.getStructLayout(ST);
676 unsigned Element = STL->getElementContainingOffset(
Offset);
677 Offset -= STL->getElementOffset(Element);
678 CurType =
ST->getElementType(Element);
679 OnLiteralIndexing(CurType, Element);
681 Type *EltTy = VT->getElementType();
682 TypeSize EltSizeBits =
DL.getTypeSizeInBits(EltTy);
683 assert(EltSizeBits % 8 == 0 &&
684 "Element type size in bits must be a multiple of 8.");
685 uint32_t EltTypeSize = EltSizeBits / 8;
690 OnLiteralIndexing(CurType, Index);
702SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP) {
705 B.SetInsertPoint(&
GEP);
707 std::vector<Value *> Indices;
708 Indices.push_back(ConstantInt::get(
709 IntegerType::getInt32Ty(CurrF->
getContext()), 0,
false));
710 walkLogicalAccessChain(
712 [&Indices, &
B](
Type *EltType, uint64_t Index) {
714 ConstantInt::get(
B.getInt64Ty(), Index,
false));
717 uint32_t EltTypeSize =
DL.getTypeSizeInBits(EltType) / 8;
719 Offset, ConstantInt::get(
Offset->getType(), EltTypeSize,
721 Indices.push_back(Index);
726 Args.push_back(
B.getInt1(
GEP.isInBounds()));
727 Args.push_back(
GEP.getOperand(0));
729 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
730 replaceAllUsesWithAndErase(
B, &
GEP, NewI);
734Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *
GEP) {
736 Type *CurType =
GEP->getResultElementType();
738 bool Interrupted = walkLogicalAccessChain(
739 *
GEP, [&CurType](
Type *EltType, uint64_t Index) { CurType = EltType; },
742 return Interrupted ?
GEP->getResultElementType() : CurType;
745Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *
Ref) {
746 if (
Ref->getSourceElementType() ==
747 IntegerType::getInt8Ty(CurrF->
getContext()) &&
749 return getGEPTypeLogical(
Ref);
756 Ty =
Ref->getSourceElementType();
760 Ty =
Ref->getResultElementType();
765Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
766 Value *
I, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8,
767 bool IgnoreKnownType) {
773 if (!IgnoreKnownType)
778 if (!Visited.insert(
I).second)
785 maybeAssignPtrType(Ty,
I,
Ref->getAllocatedType(), UnknownElemTypeI8);
787 Ty = getGEPType(
Ref);
792 KnownTy =
Op->getType();
794 maybeAssignPtrType(Ty,
I, ElemTy, UnknownElemTypeI8);
797 Ty = SPIRV::getOriginalFunctionType(*Fn);
800 Ty = deduceElementTypeByValueDeep(
802 Ref->getNumOperands() > 0 ?
Ref->getOperand(0) :
nullptr, Visited,
806 Type *RefTy = deduceElementTypeHelper(
Ref->getPointerOperand(), Visited,
808 maybeAssignPtrType(Ty,
I, RefTy, UnknownElemTypeI8);
810 maybeAssignPtrType(Ty,
I,
Ref->getDestTy(), UnknownElemTypeI8);
812 if (
Type *Src =
Ref->getSrcTy(), *Dest =
Ref->getDestTy();
814 Ty = deduceElementTypeHelper(
Ref->getOperand(0), Visited,
819 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
823 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
825 Type *BestTy =
nullptr;
827 DenseMap<Type *, unsigned> PhiTys;
828 for (
int i =
Ref->getNumIncomingValues() - 1; i >= 0; --i) {
829 Ty = deduceElementTypeByUsersDeep(
Ref->getIncomingValue(i), Visited,
836 if (It.first->second > MaxN) {
837 MaxN = It.first->second;
845 for (
Value *
Op : {
Ref->getTrueValue(),
Ref->getFalseValue()}) {
846 Ty = deduceElementTypeByUsersDeep(
Op, Visited, UnknownElemTypeI8);
851 static StringMap<unsigned> ResTypeByArg = {
855 {
"__spirv_GenericCastToPtr_ToGlobal", 0},
856 {
"__spirv_GenericCastToPtr_ToLocal", 0},
857 {
"__spirv_GenericCastToPtr_ToPrivate", 0},
858 {
"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
859 {
"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
860 {
"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
864 if (
II &&
II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
866 if (HandleType->getTargetExtName() ==
"spirv.Image" ||
867 HandleType->getTargetExtName() ==
"spirv.SignedImage") {
868 for (User *U :
II->users()) {
873 }
else if (HandleType->getTargetExtName() ==
"spirv.VulkanBuffer") {
875 Ty = HandleType->getTypeParameter(0);
887 }
else if (
II &&
II->getIntrinsicID() ==
888 Intrinsic::spv_generic_cast_to_ptr_explicit) {
889 Ty = deduceElementTypeHelper(CI->getArgOperand(0), Visited,
891 }
else if (Function *CalledF = CI->getCalledFunction()) {
892 std::string DemangledName =
894 if (DemangledName.length() > 0)
895 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
896 auto AsArgIt = ResTypeByArg.
find(DemangledName);
897 if (AsArgIt != ResTypeByArg.
end())
898 Ty = deduceElementTypeHelper(CI->getArgOperand(AsArgIt->second),
899 Visited, UnknownElemTypeI8);
906 if (Ty && !IgnoreKnownType) {
917Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
918 bool UnknownElemTypeI8) {
919 std::unordered_set<Value *> Visited;
920 return deduceNestedTypeHelper(U,
U->getType(), Visited, UnknownElemTypeI8);
923Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
924 User *U,
Type *OrigTy, std::unordered_set<Value *> &Visited,
925 bool UnknownElemTypeI8) {
934 if (!Visited.insert(U).second)
940 for (
unsigned i = 0; i <
U->getNumOperands(); ++i) {
942 assert(
Op &&
"Operands should not be null.");
943 Type *OpTy =
Op->getType();
947 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
954 Change |= Ty != OpTy;
962 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
963 Type *OpTy = ArrTy->getElementType();
967 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
974 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
980 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
981 Type *OpTy = VecTy->getElementType();
985 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
992 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
1002Type *SPIRVEmitIntrinsics::deduceElementType(
Value *
I,
bool UnknownElemTypeI8) {
1003 if (
Type *Ty = deduceElementTypeHelper(
I, UnknownElemTypeI8))
1005 if (!UnknownElemTypeI8)
1008 return IntegerType::getInt8Ty(
I->getContext());
1012 Value *PointerOperand) {
1018 return I->getType();
1026bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1028 Type *&KnownElemTy,
bool &Incomplete) {
1032 std::string DemangledName =
1034 if (DemangledName.length() > 0 &&
1036 const SPIRVSubtarget &
ST = TM->
getSubtarget<SPIRVSubtarget>(*CalledF);
1037 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1038 DemangledName,
ST.getPreferredInstructionSet());
1039 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1040 for (
unsigned i = 0, PtrCnt = 0; i < CI->
arg_size() && PtrCnt < 2; ++i) {
1046 KnownElemTy = ElemTy;
1047 Ops.push_back(std::make_pair(
Op, i));
1049 }
else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1056 case SPIRV::OpAtomicFAddEXT:
1057 case SPIRV::OpAtomicFMinEXT:
1058 case SPIRV::OpAtomicFMaxEXT:
1059 case SPIRV::OpAtomicLoad:
1060 case SPIRV::OpAtomicCompareExchangeWeak:
1061 case SPIRV::OpAtomicCompareExchange:
1062 case SPIRV::OpAtomicExchange:
1063 case SPIRV::OpAtomicIAdd:
1064 case SPIRV::OpAtomicISub:
1065 case SPIRV::OpAtomicOr:
1066 case SPIRV::OpAtomicXor:
1067 case SPIRV::OpAtomicAnd:
1068 case SPIRV::OpAtomicUMin:
1069 case SPIRV::OpAtomicUMax:
1070 case SPIRV::OpAtomicSMin:
1071 case SPIRV::OpAtomicSMax: {
1076 Incomplete = isTodoType(
Op);
1077 Ops.push_back(std::make_pair(
Op, 0));
1079 case SPIRV::OpAtomicStore: {
1088 Incomplete = isTodoType(
Op);
1089 Ops.push_back(std::make_pair(
Op, 0));
1098void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1100 Type *&KnownElemTy,
bool IsPostprocessing) {
1104 Ops.push_back(std::make_pair(
Op, std::numeric_limits<unsigned>::max()));
1105 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1106 bool IsNewFTy =
false, IsIncomplete =
false;
1109 Type *ArgTy = Arg->getType();
1114 if (isTodoType(Arg))
1115 IsIncomplete =
true;
1117 IsIncomplete =
true;
1120 ArgTy = FTy->getFunctionParamType(ParmIdx);
1124 Type *RetTy = FTy->getReturnType();
1131 IsIncomplete =
true;
1133 IsIncomplete =
true;
1136 if (!IsPostprocessing && IsIncomplete)
1139 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1142bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1143 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1144 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
1156 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(
I,
Op)};
1157 for (User *U :
F->users()) {
1165 propagateElemType(CI, PrevElemTy, VisitedSubst);
1175 for (Instruction *IncompleteRetI : *IncompleteRets)
1176 deduceOperandElementType(IncompleteRetI,
nullptr, AskOps,
1178 }
else if (IncompleteRets) {
1181 TypeValidated.insert(
I);
1189void SPIRVEmitIntrinsics::deduceOperandElementType(
1190 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1191 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing) {
1193 Type *KnownElemTy =
nullptr;
1194 bool Incomplete =
false;
1200 Incomplete = isTodoType(
I);
1201 for (
unsigned i = 0; i <
Ref->getNumIncomingValues(); i++) {
1204 Ops.push_back(std::make_pair(
Op, i));
1210 Incomplete = isTodoType(
I);
1211 Ops.push_back(std::make_pair(
Ref->getPointerOperand(), 0));
1218 Incomplete = isTodoType(
I);
1219 Ops.push_back(std::make_pair(
Ref->getOperand(0), 0));
1223 KnownElemTy =
Ref->getSourceElementType();
1224 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1227 KnownElemTy =
I->getType();
1233 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1237 reconstructType(
Ref->getValueOperand(),
false, IsPostprocessing)))
1242 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1250 Incomplete = isTodoType(
Ref->getPointerOperand());
1251 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1259 Incomplete = isTodoType(
Ref->getPointerOperand());
1260 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1266 Incomplete = isTodoType(
I);
1267 for (
unsigned i = 0; i <
Ref->getNumOperands(); i++) {
1270 Ops.push_back(std::make_pair(
Op, i));
1278 if (deduceOperandElementTypeFunctionRet(
I, IncompleteRets, AskOps,
1279 IsPostprocessing, KnownElemTy,
Op,
1282 Incomplete = isTodoType(CurrF);
1283 Ops.push_back(std::make_pair(
Op, 0));
1289 bool Incomplete0 = isTodoType(Op0);
1290 bool Incomplete1 = isTodoType(Op1);
1292 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1294 : GR->findDeducedElementType(Op0);
1296 KnownElemTy = ElemTy0;
1297 Incomplete = Incomplete0;
1298 Ops.push_back(std::make_pair(Op1, 1));
1299 }
else if (ElemTy1) {
1300 KnownElemTy = ElemTy1;
1301 Incomplete = Incomplete1;
1302 Ops.push_back(std::make_pair(Op0, 0));
1306 deduceOperandElementTypeCalledFunction(CI,
Ops, KnownElemTy, Incomplete);
1307 else if (HaveFunPtrs)
1308 deduceOperandElementTypeFunctionPointer(CI,
Ops, KnownElemTy,
1313 if (!KnownElemTy ||
Ops.size() == 0)
1318 for (
auto &OpIt :
Ops) {
1322 Type *AskTy =
nullptr;
1323 CallInst *AskCI =
nullptr;
1324 if (IsPostprocessing && AskOps) {
1330 if (Ty == KnownElemTy)
1333 Type *OpTy =
Op->getType();
1334 if (
Op->hasUseList() &&
1341 else if (!IsPostprocessing)
1345 if (AssignCI ==
nullptr) {
1354 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1355 std::make_pair(
I,
Op)};
1356 propagateElemTypeRec(
Op, KnownElemTy, PrevElemTy, VisitedSubst);
1360 CallInst *PtrCastI =
1361 buildSpvPtrcast(
I->getParent()->getParent(),
Op, KnownElemTy);
1362 if (OpIt.second == std::numeric_limits<unsigned>::max())
1365 I->setOperand(OpIt.second, PtrCastI);
1368 TypeValidated.insert(
I);
1371void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1376 if (isAssignTypeInstr(U)) {
1377 B.SetInsertPoint(U);
1378 SmallVector<Value *, 2>
Args = {
New,
U->getOperand(1)};
1379 CallInst *AssignCI =
1380 B.CreateIntrinsic(Intrinsic::spv_assign_type, {
New->getType()},
Args);
1382 U->eraseFromParent();
1385 U->replaceUsesOfWith(Old, New);
1390 New->copyMetadata(*Old);
1394void SPIRVEmitIntrinsics::preprocessUndefs(
IRBuilder<> &
B) {
1395 std::queue<Instruction *> Worklist;
1399 while (!Worklist.empty()) {
1401 bool BPrepared =
false;
1404 for (
auto &
Op :
I->operands()) {
1406 if (!AggrUndef || !
Op->getType()->isAggregateType())
1413 auto *IntrUndef =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
1414 Worklist.push(IntrUndef);
1415 I->replaceUsesOfWith(
Op, IntrUndef);
1416 AggrConsts[IntrUndef] = AggrUndef;
1417 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1422void SPIRVEmitIntrinsics::preprocessCompositeConstants(
IRBuilder<> &
B) {
1423 std::queue<Instruction *> Worklist;
1427 while (!Worklist.empty()) {
1428 auto *
I = Worklist.front();
1431 bool KeepInst =
false;
1432 for (
const auto &
Op :
I->operands()) {
1434 Type *ResTy =
nullptr;
1437 ResTy = COp->getType();
1449 ResTy =
Op->getType()->isVectorTy() ? COp->getType() :
B.getInt32Ty();
1454 for (
unsigned i = 0; i < COp->getNumElements(); ++i)
1455 Args.push_back(COp->getElementAsConstant(i));
1459 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
1460 :
B.SetInsertPoint(
I);
1464 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {
Args});
1468 AggrConsts[CI] = AggrConst;
1469 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst,
false);
1481 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
1486 unsigned RoundingModeDeco,
1493 ConstantInt::get(
Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1502 MDNode *SaturatedConversionNode =
1504 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1511 if (Fu->isIntrinsic()) {
1512 unsigned const int IntrinsicId = Fu->getIntrinsicID();
1513 switch (IntrinsicId) {
1514 case Intrinsic::fptosi_sat:
1515 case Intrinsic::fptoui_sat:
1534 MDString *ConstraintString =
MDString::get(Ctx,
IA->getConstraintString());
1542 B.SetInsertPoint(&
Call);
1543 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {
Args});
1548void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1551 if (!
RM.has_value())
1553 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1554 switch (
RM.value()) {
1558 case RoundingMode::NearestTiesToEven:
1559 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1561 case RoundingMode::TowardNegative:
1562 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1564 case RoundingMode::TowardPositive:
1565 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1567 case RoundingMode::TowardZero:
1568 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1570 case RoundingMode::Dynamic:
1571 case RoundingMode::NearestTiesToAway:
1575 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1581Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &
I) {
1585 B.SetInsertPoint(&
I);
1588 Args.push_back(
I.getCondition());
1591 for (
auto &Case :
I.cases()) {
1592 Args.push_back(Case.getCaseValue());
1593 BBCases.
push_back(Case.getCaseSuccessor());
1596 CallInst *NewI =
B.CreateIntrinsic(Intrinsic::spv_switch,
1597 {
I.getOperand(0)->getType()}, {
Args});
1601 I.eraseFromParent();
1604 B.SetInsertPoint(ParentBB);
1605 IndirectBrInst *BrI =
B.CreateIndirectBr(
1608 for (BasicBlock *BBCase : BBCases)
1614 if (
GEP->getNumIndices() == 0)
1617 return CI->getZExtValue() == 0;
1622Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &
I) {
1624 B.SetInsertPoint(&
I);
1632 if (
I.getSourceElementType() ==
1633 IntegerType::getInt8Ty(CurrF->
getContext())) {
1634 return buildLogicalAccessChainFromGEP(
I);
1639 Value *PtrOp =
I.getPointerOperand();
1640 Type *SrcElemTy =
I.getSourceElementType();
1641 Type *DeducedPointeeTy = deduceElementType(PtrOp,
true);
1644 if (ArrTy->getElementType() == SrcElemTy) {
1646 Type *FirstIdxType =
I.getOperand(1)->getType();
1647 NewIndices.
push_back(ConstantInt::get(FirstIdxType, 0));
1648 for (
Value *Idx :
I.indices())
1653 Args.push_back(
B.getInt1(
I.isInBounds()));
1654 Args.push_back(
I.getPointerOperand());
1657 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1658 replaceAllUsesWithAndErase(
B, &
I, NewI);
1666 Args.push_back(
B.getInt1(
I.isInBounds()));
1668 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1669 replaceAllUsesWithAndErase(
B, &
I, NewI);
1673Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &
I) {
1675 B.SetInsertPoint(&
I);
1684 I.eraseFromParent();
1690 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_bitcast, {
Types}, {
Args});
1691 replaceAllUsesWithAndErase(
B, &
I, NewI);
1695void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1697 Type *VTy =
V->getType();
1702 if (ElemTy != AssignedType)
1715 if (CurrentType == AssignedType)
1722 " for value " +
V->getName(),
1730void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1731 Instruction *
I,
Value *Pointer,
Type *ExpectedElementType,
1733 TypeValidated.insert(
I);
1736 Type *PointerElemTy = deduceElementTypeHelper(Pointer,
false);
1737 if (PointerElemTy == ExpectedElementType ||
1743 MetadataAsValue *VMD =
buildMD(ExpectedElementVal);
1745 bool FirstPtrCastOrAssignPtrType =
true;
1751 for (
auto User :
Pointer->users()) {
1754 (
II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1755 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1756 II->getOperand(0) != Pointer)
1761 FirstPtrCastOrAssignPtrType =
false;
1762 if (
II->getOperand(1) != VMD ||
1769 if (
II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1774 if (
II->getParent() !=
I->getParent())
1777 I->setOperand(OperandToReplace,
II);
1783 if (FirstPtrCastOrAssignPtrType) {
1788 }
else if (isTodoType(Pointer)) {
1789 eraseTodoType(Pointer);
1796 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1797 std::make_pair(
I, Pointer)};
1799 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
1811 auto *PtrCastI =
B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
1817void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *
I,
1822 replacePointerOperandWithPtrCast(
1823 I,
SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->
getContext()),
1829 Type *OpTy =
Op->getType();
1832 if (OpTy ==
Op->getType())
1833 OpTy = deduceElementTypeByValueDeep(OpTy,
Op,
false);
1834 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 1,
B);
1839 Type *OpTy = LI->getType();
1844 Type *NewOpTy = OpTy;
1845 OpTy = deduceElementTypeByValueDeep(OpTy, LI,
false);
1846 if (OpTy == NewOpTy)
1847 insertTodoType(Pointer);
1850 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1855 Type *OpTy =
nullptr;
1867 OpTy = GEPI->getSourceElementType();
1869 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
1871 insertTodoType(Pointer);
1883 std::string DemangledName =
1887 bool HaveTypes =
false;
1905 for (User *U : CalledArg->
users()) {
1907 if ((ElemTy = deduceElementTypeHelper(Inst,
false)) !=
nullptr)
1913 HaveTypes |= ElemTy !=
nullptr;
1918 if (DemangledName.empty() && !HaveTypes)
1936 Type *ExpectedType =
1938 if (!ExpectedType && !DemangledName.empty())
1939 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
1940 DemangledName,
OpIdx,
I->getContext());
1941 if (!ExpectedType || ExpectedType->
isVoidTy())
1949 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType,
OpIdx,
B);
1953Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &
I) {
1960 I.getOperand(1)->getType(),
1961 I.getOperand(2)->getType()};
1963 B.SetInsertPoint(&
I);
1965 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_insertelt, {
Types}, {
Args});
1966 replaceAllUsesWithAndErase(
B, &
I, NewI);
1971SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &
I) {
1978 B.SetInsertPoint(&
I);
1980 I.getIndexOperand()->getType()};
1981 SmallVector<Value *, 2>
Args = {
I.getVectorOperand(),
I.getIndexOperand()};
1982 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_extractelt, {
Types}, {
Args});
1983 replaceAllUsesWithAndErase(
B, &
I, NewI);
1987Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &
I) {
1989 B.SetInsertPoint(&
I);
1992 Value *AggregateOp =
I.getAggregateOperand();
1996 Args.push_back(AggregateOp);
1997 Args.push_back(
I.getInsertedValueOperand());
1998 for (
auto &
Op :
I.indices())
1999 Args.push_back(
B.getInt32(
Op));
2001 B.CreateIntrinsic(Intrinsic::spv_insertv, {
Types}, {
Args});
2002 replaceMemInstrUses(&
I, NewI,
B);
2006Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &
I) {
2007 if (
I.getAggregateOperand()->getType()->isAggregateType())
2010 B.SetInsertPoint(&
I);
2012 for (
auto &
Op :
I.indices())
2013 Args.push_back(
B.getInt32(
Op));
2015 B.CreateIntrinsic(Intrinsic::spv_extractv, {
I.getType()}, {
Args});
2016 replaceAllUsesWithAndErase(
B, &
I, NewI);
2020Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &
I) {
2021 if (!
I.getType()->isAggregateType())
2024 B.SetInsertPoint(&
I);
2025 TrackConstants =
false;
2030 B.CreateIntrinsic(Intrinsic::spv_load, {
I.getOperand(0)->getType()},
2031 {
I.getPointerOperand(),
B.getInt16(Flags),
2032 B.getInt8(
I.getAlign().value())});
2033 replaceMemInstrUses(&
I, NewI,
B);
2037Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &
I) {
2041 B.SetInsertPoint(&
I);
2042 TrackConstants =
false;
2046 auto *PtrOp =
I.getPointerOperand();
2047 auto *NewI =
B.CreateIntrinsic(
2048 Intrinsic::spv_store, {
I.getValueOperand()->getType(), PtrOp->
getType()},
2049 {
I.getValueOperand(), PtrOp,
B.getInt16(Flags),
2050 B.getInt8(
I.getAlign().value())});
2052 I.eraseFromParent();
2056Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &
I) {
2057 Value *ArraySize =
nullptr;
2058 if (
I.isArrayAllocation()) {
2061 SPIRV::Extension::SPV_INTEL_variable_length_array))
2063 "array allocation: this instruction requires the following "
2064 "SPIR-V extension: SPV_INTEL_variable_length_array",
2066 ArraySize =
I.getArraySize();
2069 B.SetInsertPoint(&
I);
2070 TrackConstants =
false;
2071 Type *PtrTy =
I.getType();
2074 ?
B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2075 {PtrTy, ArraySize->
getType()},
2076 {ArraySize,
B.getInt8(
I.getAlign().value())})
2077 :
B.CreateIntrinsic(
Intrinsic::spv_alloca, {PtrTy},
2078 {
B.getInt8(
I.getAlign().value())});
2079 replaceAllUsesWithAndErase(
B, &
I, NewI);
2083Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2084 assert(
I.getType()->isAggregateType() &&
"Aggregate result is expected");
2086 B.SetInsertPoint(&
I);
2088 Args.push_back(
B.getInt32(
2089 static_cast<uint32_t
>(
getMemScope(
I.getContext(),
I.getSyncScopeID()))));
2090 Args.push_back(
B.getInt32(
2092 Args.push_back(
B.getInt32(
2094 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2095 {
I.getPointerOperand()->getType()}, {
Args});
2096 replaceMemInstrUses(&
I, NewI,
B);
2100Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &
I) {
2102 B.SetInsertPoint(&
I);
2103 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2107void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2110 static const StringSet<> ArtificialGlobals{
"llvm.global.annotations",
2111 "llvm.compiler.used"};
2121 deduceElementTypeHelper(&GV,
false);
2125 auto *InitInst =
B.CreateIntrinsic(Intrinsic::spv_init_global,
2127 InitInst->setArgOperand(1, Init);
2130 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.
getType(), &GV);
2136bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *
I,
2138 bool UnknownElemTypeI8) {
2144 if (
Type *ElemTy = deduceElementType(
I, UnknownElemTypeI8)) {
2151void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *
I,
2154 static StringMap<unsigned> ResTypeWellKnown = {
2155 {
"async_work_group_copy", WellKnownTypes::Event},
2156 {
"async_work_group_strided_copy", WellKnownTypes::Event},
2157 {
"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2161 bool IsKnown =
false;
2166 std::string DemangledName =
2169 if (DemangledName.length() > 0)
2171 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2172 auto ResIt = ResTypeWellKnown.
find(DemangledName);
2173 if (ResIt != ResTypeWellKnown.
end()) {
2176 switch (ResIt->second) {
2177 case WellKnownTypes::Event:
2184 switch (DecorationId) {
2187 case FPDecorationId::SAT:
2190 case FPDecorationId::RTE:
2192 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE,
B);
2194 case FPDecorationId::RTZ:
2196 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ,
B);
2198 case FPDecorationId::RTP:
2200 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP,
B);
2202 case FPDecorationId::RTN:
2204 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN,
B);
2210 Type *Ty =
I->getType();
2213 Type *TypeToAssign = Ty;
2215 if (
II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2216 II->getIntrinsicID() == Intrinsic::spv_undef) {
2217 auto It = AggrConstTypes.find(
II);
2218 if (It == AggrConstTypes.end())
2220 TypeToAssign = It->second;
2226 for (
const auto &
Op :
I->operands()) {
2233 Type *OpTy =
Op->getType();
2235 CallInst *AssignCI =
2240 Type *OpTy =
Op->getType();
2255 CallInst *AssignCI =
2265bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2266 Instruction *Inst) {
2268 if (!STI->
canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2279 case Intrinsic::spv_load:
2280 case Intrinsic::spv_store:
2287 const std::string
Prefix =
"__spirv_Atomic";
2288 const bool IsAtomic =
Name.find(Prefix) == 0;
2296void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *
I,
2298 if (MDNode *MD =
I->getMetadata(
"spirv.Decorations")) {
2300 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
2305 auto processMemAliasingDecoration = [&](
unsigned Kind) {
2306 if (MDNode *AliasListMD =
I->getMetadata(Kind)) {
2307 if (shouldTryToAddMemAliasingDecoration(
I)) {
2308 uint32_t Dec =
Kind == LLVMContext::MD_alias_scope
2309 ? SPIRV::Decoration::AliasScopeINTEL
2310 : SPIRV::Decoration::NoAliasINTEL;
2312 I, ConstantInt::get(
B.getInt32Ty(), Dec),
2315 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2316 {
I->getType()}, {
Args});
2320 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2321 processMemAliasingDecoration(LLVMContext::MD_noalias);
2324 if (MDNode *MD =
I->getMetadata(LLVMContext::MD_fpmath)) {
2326 bool AllowFPMaxError =
2328 if (!AllowFPMaxError)
2332 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2341 &FPFastMathDefaultInfoMap,
2343 auto it = FPFastMathDefaultInfoMap.
find(
F);
2344 if (it != FPFastMathDefaultInfoMap.
end())
2352 SPIRV::FPFastMathMode::None);
2354 SPIRV::FPFastMathMode::None);
2356 SPIRV::FPFastMathMode::None);
2357 return FPFastMathDefaultInfoMap[
F] = std::move(FPFastMathDefaultInfoVec);
2363 size_t BitWidth = Ty->getScalarSizeInBits();
2367 assert(Index >= 0 && Index < 3 &&
2368 "Expected FPFastMathDefaultInfo for half, float, or double");
2369 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2370 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2371 return FPFastMathDefaultInfoVec[Index];
2374void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(
Module &M) {
2376 if (!
ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2385 auto Node =
M.getNamedMetadata(
"spirv.ExecutionMode");
2387 if (!
M.getNamedMetadata(
"opencl.enable.FP_CONTRACT")) {
2395 ConstantInt::get(Type::getInt32Ty(
M.getContext()), 0);
2398 [[maybe_unused]] GlobalVariable *GV =
2399 new GlobalVariable(M,
2400 Type::getInt32Ty(
M.getContext()),
2414 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2415 FPFastMathDefaultInfoMap;
2417 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2426 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2428 "Expected 4 operands for FPFastMathDefault");
2434 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2436 SPIRV::FPFastMathDefaultInfo &
Info =
2439 Info.FPFastMathDefault =
true;
2440 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2442 "Expected no operands for ContractionOff");
2446 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2448 for (SPIRV::FPFastMathDefaultInfo &
Info : FPFastMathDefaultInfoVec) {
2449 Info.ContractionOff =
true;
2451 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2453 "Expected 1 operand for SignedZeroInfNanPreserve");
2454 unsigned TargetWidth =
2459 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2463 assert(Index >= 0 && Index < 3 &&
2464 "Expected FPFastMathDefaultInfo for half, float, or double");
2465 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2466 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2467 FPFastMathDefaultInfoVec[
Index].SignedZeroInfNanPreserve =
true;
2471 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2472 for (
auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2473 if (FPFastMathDefaultInfoVec.
empty())
2476 for (
const SPIRV::FPFastMathDefaultInfo &
Info : FPFastMathDefaultInfoVec) {
2477 assert(
Info.Ty &&
"Expected target type for FPFastMathDefaultInfo");
2480 if (Flags == SPIRV::FPFastMathMode::None && !
Info.ContractionOff &&
2481 !
Info.SignedZeroInfNanPreserve && !
Info.FPFastMathDefault)
2485 if (
Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2487 "and AllowContract");
2489 if (
Info.SignedZeroInfNanPreserve &&
2491 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2492 SPIRV::FPFastMathMode::NSZ))) {
2493 if (
Info.FPFastMathDefault)
2495 "SignedZeroInfNanPreserve but at least one of "
2496 "NotNaN/NotInf/NSZ is enabled.");
2499 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2500 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2501 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2503 "AllowTransform requires AllowReassoc and "
2504 "AllowContract to be set.");
2507 auto it = GlobalVars.find(Flags);
2508 GlobalVariable *GV =
nullptr;
2509 if (it != GlobalVars.end()) {
2515 ConstantInt::get(Type::getInt32Ty(
M.getContext()), Flags);
2518 GV =
new GlobalVariable(M,
2519 Type::getInt32Ty(
M.getContext()),
2524 GlobalVars[
Flags] = GV;
2530void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *
I,
2533 bool IsConstComposite =
2534 II &&
II->getIntrinsicID() == Intrinsic::spv_const_composite;
2535 if (IsConstComposite && TrackConstants) {
2537 auto t = AggrConsts.find(
I);
2538 assert(t != AggrConsts.end());
2541 {
II->getType(),
II->getType()}, t->second,
I, {},
B);
2543 NewOp->setArgOperand(0,
I);
2546 for (
const auto &
Op :
I->operands()) {
2550 unsigned OpNo =
Op.getOperandNo();
2551 if (
II && ((
II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2552 (
II->paramHasAttr(OpNo, Attribute::ImmArg))))
2556 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
2557 :
B.SetInsertPoint(
I);
2560 Type *OpTy =
Op->getType();
2568 {OpTy, OpTyVal->
getType()},
Op, OpTyVal, {},
B);
2570 if (!IsConstComposite &&
isPointerTy(OpTy) && OpElemTy !=
nullptr &&
2571 OpElemTy != IntegerType::getInt8Ty(
I->getContext())) {
2573 SmallVector<Value *, 2>
Args = {
2576 CallInst *PtrCasted =
2577 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
2582 I->setOperand(OpNo, NewOp);
2584 if (Named.insert(
I).second)
2588Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *
F,
2590 std::unordered_set<Function *> FVisited;
2591 return deduceFunParamElementType(
F,
OpIdx, FVisited);
2594Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2595 Function *
F,
unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2597 if (!FVisited.insert(
F).second)
2600 std::unordered_set<Value *> Visited;
2603 for (User *U :
F->users()) {
2615 if (
Type *Ty = deduceElementTypeHelper(OpArg, Visited,
false))
2618 for (User *OpU : OpArg->
users()) {
2620 if (!Inst || Inst == CI)
2623 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited,
false))
2630 if (FVisited.find(OuterF) != FVisited.end())
2632 for (
unsigned i = 0; i < OuterF->
arg_size(); ++i) {
2633 if (OuterF->
getArg(i) == OpArg) {
2634 Lookup.push_back(std::make_pair(OuterF, i));
2641 for (
auto &Pair :
Lookup) {
2642 if (
Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2649void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *
F,
2651 B.SetInsertPointPastAllocas(
F);
2665 for (User *U :
F->users()) {
2681 for (User *U : Arg->
users()) {
2685 CI->
getParent()->getParent() == CurrF) {
2687 deduceOperandElementTypeFunctionPointer(CI,
Ops, ElemTy,
false);
2698void SPIRVEmitIntrinsics::processParamTypes(Function *
F,
IRBuilder<> &
B) {
2699 B.SetInsertPointPastAllocas(
F);
2705 if (!ElemTy && (ElemTy = deduceFunParamElementType(
F,
OpIdx)) !=
nullptr) {
2707 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
2709 propagateElemType(Arg, IntegerType::getInt8Ty(
F->getContext()),
2721 bool IsNewFTy =
false;
2737bool SPIRVEmitIntrinsics::processFunctionPointers(
Module &M) {
2740 if (
F.isIntrinsic())
2742 if (
F.isDeclaration()) {
2743 for (User *U :
F.users()) {
2756 for (User *U :
F.users()) {
2758 if (!
II ||
II->arg_size() != 3 ||
II->getOperand(0) != &
F)
2760 if (
II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
2761 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
2768 if (Worklist.
empty())
2774 "cannot allocate a name for the internal service function");
2775 LLVMContext &Ctx =
M.getContext();
2783 for (Function *
F : Worklist) {
2785 for (
const auto &Arg :
F->args())
2787 IRB.CreateCall(
F, Args);
2789 IRB.CreateRetVoid();
2795void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(
IRBuilder<> &
B) {
2796 DenseMap<Function *, CallInst *> Ptrcasts;
2797 for (
auto It : FDeclPtrTys) {
2799 for (
auto *U :
F->users()) {
2804 for (
auto [Idx, ElemTy] : It.second) {
2812 B.SetInsertPointPastAllocas(Arg->
getParent());
2817 replaceUsesOfWithSpvPtrcast(Param,
normalizeType(ElemTy), CI,
2826 .getFirstNonPHIOrDbgOrAlloca());
2847SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP) {
2854 Type *SrcTy =
GEP->getSourceElementType();
2855 SmallVector<Value *, 8> Indices(
GEP->indices());
2857 if (ArrTy && ArrTy->getNumElements() == 0 &&
2859 Indices.erase(Indices.begin());
2860 SrcTy = ArrTy->getElementType();
2862 GEP->getNoWrapFlags(),
"",
2863 GEP->getIterator());
2868bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
2869 if (
Func.isDeclaration())
2873 GR =
ST.getSPIRVGlobalRegistry();
2877 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
2882 AggrConstTypes.clear();
2887 SmallPtrSet<Instruction *, 4> DeadInsts;
2890 if (!
Ref || GR->findDeducedElementType(
Ref))
2893 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(
Ref);
2895 Ref->replaceAllUsesWith(NewGEP);
2899 if (
Type *GepTy = getGEPType(
Ref))
2903 for (
auto *
I : DeadInsts) {
2904 assert(
I->use_empty() &&
"Dead instruction should not have any uses left");
2905 I->eraseFromParent();
2908 processParamTypesByFunHeader(CurrF,
B);
2917 Type *ElTy =
SI->getValueOperand()->getType();
2922 B.SetInsertPoint(&
Func.getEntryBlock(),
Func.getEntryBlock().begin());
2923 for (
auto &GV :
Func.getParent()->globals())
2924 processGlobalValue(GV,
B);
2926 preprocessUndefs(
B);
2927 preprocessCompositeConstants(
B);
2931 applyDemangledPtrArgTypes(
B);
2934 for (
auto &
I : Worklist) {
2936 if (isConvergenceIntrinsic(
I))
2939 bool Postpone = insertAssignPtrTypeIntrs(
I,
B,
false);
2941 insertAssignTypeIntrs(
I,
B);
2942 insertPtrCastOrAssignTypeInstr(
I,
B);
2946 if (Postpone && !GR->findAssignPtrTypeInstr(
I))
2947 insertAssignPtrTypeIntrs(
I,
B,
true);
2950 useRoundingMode(FPI,
B);
2955 SmallPtrSet<Instruction *, 4> IncompleteRets;
2957 deduceOperandElementType(&
I, &IncompleteRets);
2961 for (BasicBlock &BB : Func)
2962 for (PHINode &Phi : BB.
phis())
2964 deduceOperandElementType(&Phi,
nullptr);
2966 for (
auto *
I : Worklist) {
2967 TrackConstants =
true;
2977 if (isConvergenceIntrinsic(
I))
2981 processInstrAfterVisit(
I,
B);
2988bool SPIRVEmitIntrinsics::postprocessTypes(
Module &M) {
2989 if (!GR || TodoTypeSz == 0)
2992 unsigned SzTodo = TodoTypeSz;
2993 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
2998 CallInst *AssignCI = GR->findAssignPtrTypeInstr(
Op);
2999 Type *KnownTy = GR->findDeducedElementType(
Op);
3000 if (!KnownTy || !AssignCI)
3006 std::unordered_set<Value *> Visited;
3007 if (
Type *ElemTy = deduceElementTypeHelper(
Op, Visited,
false,
true)) {
3008 if (ElemTy != KnownTy) {
3009 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3010 propagateElemType(CI, ElemTy, VisitedSubst);
3017 if (
Op->hasUseList()) {
3018 for (User *U :
Op->users()) {
3025 if (TodoTypeSz == 0)
3030 SmallPtrSet<Instruction *, 4> IncompleteRets;
3032 auto It = ToProcess.
find(&
I);
3033 if (It == ToProcess.
end())
3035 It->second.remove_if([
this](
Value *V) {
return !isTodoType(V); });
3036 if (It->second.size() == 0)
3038 deduceOperandElementType(&
I, &IncompleteRets, &It->second,
true);
3039 if (TodoTypeSz == 0)
3044 return SzTodo > TodoTypeSz;
3048void SPIRVEmitIntrinsics::parseFunDeclarations(
Module &M) {
3050 if (!
F.isDeclaration() ||
F.isIntrinsic())
3054 if (DemangledName.empty())
3058 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3059 DemangledName,
ST.getPreferredInstructionSet());
3060 if (Opcode != SPIRV::OpGroupAsyncCopy)
3063 SmallVector<unsigned> Idxs;
3072 LLVMContext &Ctx =
F.getContext();
3074 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3075 if (!TypeStrs.
size())
3078 for (
unsigned Idx : Idxs) {
3079 if (Idx >= TypeStrs.
size())
3082 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3085 FDeclPtrTys[&
F].push_back(std::make_pair(Idx, ElemTy));
3090bool SPIRVEmitIntrinsics::runOnModule(
Module &M) {
3093 parseFunDeclarations(M);
3094 insertConstantsForFPFastMathDefault(M);
3104 if (!
F.isDeclaration() && !
F.isIntrinsic()) {
3106 processParamTypes(&
F,
B);
3110 CanTodoType =
false;
3111 Changed |= postprocessTypes(M);
3114 Changed |= processFunctionPointers(M);
3120 return new SPIRVEmitIntrinsics(TM);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Analysis containing CSE Info
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static Type * getPointeeType(Value *Ptr, const DataLayout &DL)
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static cl::opt< bool > SpirvEmitOpNames("spirv-emit-op-names", cl::desc("Emit OpName for all instructions"), cl::init(false))
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
void visit(MachineFunction &MF, MachineBasicBlock &Start, std::function< void(MachineBasicBlock *)> op)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
#define SPIRV_BACKEND_SERVICE_FUN_NAME
StringSet - A set-like wrapper for the StringMap.
static SymbolRef::Type getType(const Symbol *Sym)
static std::optional< unsigned > getOpcode(ArrayRef< VPValue * > Values)
Returns the opcode of Values or ~0 if they do not all agree.
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
This class represents an incoming formal argument to a Function.
const Function * getParent() const
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
void addFnAttr(Attribute::AttrKind Kind)
Add function attributes to this function.
static Function * Create(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace, const Twine &N="", Module *M=nullptr)
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY
getIntrinsicID - This method returns the ID number of the specified function, or Intrinsic::not_intri...
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
Argument * getArg(unsigned i) const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ PrivateLinkage
Like Internal, but omit from symbol table.
@ InternalLinkage
Rename collisions when linking (static functions).
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
static unsigned getPointerOperandIndex()
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Flags
Flags values. These may be or'd together.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
A Module instance is used to store all the information related to an LLVM module.
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator find(StringRef Key)
StringRef - Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
bool contains(StringRef key) const
Check if the set contains the given key.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
bool isTargetExtTy() const
Return true if this is a target extension type.
bool isAggregateType() const
Return true if the type is an aggregate type.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
void setOperand(unsigned i, Value *Val)
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
iterator_range< user_iterator > users()
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ BasicBlock
Various leaf nodes.
bool match(Val *V, const Pattern &P)
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
initializer< Ty > init(const Ty &Val)
DenseSetImpl< ValueT, DenseMap< ValueT, DenseSetEmpty, ValueInfoT, DenseSetPair< ValueT > >, ValueInfoT > DenseSet
ElementType
The element type of an SRV or UAV resource.
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
NodeAddr< NodeBase * > Node
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
bool getVacantFunctionName(Module &M, std::string &Name)
FunctionAddr VTableAddr Value
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
ModulePass * createSPIRVEmitIntrinsicsPass(SPIRVTargetMachine *TM)
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool isNestedPointer(const Type *Ty)
MetadataAsValue * buildMD(Value *Arg)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
auto reverse(ContainerTy &&C)
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
bool isPointerTy(const Type *T)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
bool hasPointeeTypeAttr(Argument *Arg)
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool hasInitializer(const GlobalVariable *GV)
Type * normalizeType(Type *Ty)
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
PoisonValue * getNormalizedPoisonValue(Type *Ty)
bool isUntypedPointerTy(const Type *T)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)