27#include "llvm/IR/IntrinsicsSPIRV.h"
37#include <unordered_set>
62 cl::desc(
"Emit OpName for all instructions"),
66#define GET_BuiltinGroup_DECL
67#include "SPIRVGenTables.inc"
72class GlobalVariableUsers {
73 template <
typename T1,
typename T2>
74 using OneToManyMapTy = DenseMap<T1, SmallPtrSet<T2, 4>>;
76 OneToManyMapTy<const GlobalVariable *, const Function *> GlobalIsUsedByFun;
78 void collectGlobalUsers(
79 const GlobalVariable *GV,
80 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
81 &GlobalIsUsedByGlobal) {
83 while (!
Stack.empty()) {
87 GlobalIsUsedByFun[GV].insert(
I->getFunction());
92 GlobalIsUsedByGlobal[GV].insert(UserGV);
97 Stack.append(
C->user_begin(),
C->user_end());
101 bool propagateGlobalToGlobalUsers(
102 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
103 &GlobalIsUsedByGlobal) {
106 for (
auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
107 OldUsersGlobals.
assign(UserGlobals.begin(), UserGlobals.end());
108 for (
const GlobalVariable *UserGV : OldUsersGlobals) {
109 auto It = GlobalIsUsedByGlobal.find(UserGV);
110 if (It == GlobalIsUsedByGlobal.end())
118 void propagateGlobalToFunctionReferences(
119 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
120 &GlobalIsUsedByGlobal) {
121 for (
auto &[GV, UserGlobals] : GlobalIsUsedByGlobal) {
122 auto &UserFunctions = GlobalIsUsedByFun[GV];
123 for (
const GlobalVariable *UserGV : UserGlobals) {
124 auto It = GlobalIsUsedByFun.find(UserGV);
125 if (It == GlobalIsUsedByFun.end())
136 OneToManyMapTy<const GlobalVariable *, const GlobalVariable *>
137 GlobalIsUsedByGlobal;
138 GlobalIsUsedByFun.clear();
139 for (GlobalVariable &GV :
M.globals())
140 collectGlobalUsers(&GV, GlobalIsUsedByGlobal);
143 while (propagateGlobalToGlobalUsers(GlobalIsUsedByGlobal))
146 propagateGlobalToFunctionReferences(GlobalIsUsedByGlobal);
149 using FunctionSetType =
typename decltype(GlobalIsUsedByFun)::mapped_type;
150 const FunctionSetType &
151 getTransitiveUserFunctions(
const GlobalVariable &GV)
const {
152 auto It = GlobalIsUsedByFun.find(&GV);
153 if (It != GlobalIsUsedByFun.end())
156 static const FunctionSetType
Empty{};
161static bool isaGEP(
const Value *V) {
167static std::optional<uint64_t> getByteAddressingMultiplier(
Type *Ty) {
173 return AT->getNumElements();
179class SPIRVEmitIntrinsics
181 public InstVisitor<SPIRVEmitIntrinsics, Instruction *> {
182 const SPIRVTargetMachine &TM;
183 SPIRVGlobalRegistry *GR =
nullptr;
185 bool TrackConstants =
true;
186 bool HaveFunPtrs =
false;
187 DenseMap<Instruction *, Constant *> AggrConsts;
188 DenseMap<Instruction *, Type *> AggrConstTypes;
189 DenseSet<Instruction *> AggrStores;
190 SmallPtrSet<Instruction *, 8> DeletedInstrs;
191 GlobalVariableUsers GVUsers;
192 std::unordered_set<Value *> Named;
195 DenseMap<Function *, SmallVector<std::pair<unsigned, Type *>>> FDeclPtrTys;
198 bool CanTodoType =
true;
199 unsigned TodoTypeSz = 0;
200 DenseMap<Value *, bool> TodoType;
201 void insertTodoType(
Value *
Op) {
203 if (CanTodoType && !isaGEP(
Op)) {
204 auto It = TodoType.try_emplace(
Op,
true);
209 void eraseTodoType(
Value *
Op) {
210 auto It = TodoType.find(
Op);
211 if (It != TodoType.end() && It->second) {
219 auto It = TodoType.find(
Op);
220 return It != TodoType.end() && It->second;
224 std::unordered_set<Instruction *> TypeValidated;
227 enum WellKnownTypes { Event };
230 Type *deduceElementType(
Value *
I,
bool UnknownElemTypeI8);
231 Type *deduceElementTypeHelper(
Value *
I,
bool UnknownElemTypeI8);
232 Type *deduceElementTypeHelper(
Value *
I, std::unordered_set<Value *> &Visited,
233 bool UnknownElemTypeI8,
234 bool IgnoreKnownType =
false);
235 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
236 bool UnknownElemTypeI8);
237 Type *deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
238 std::unordered_set<Value *> &Visited,
239 bool UnknownElemTypeI8);
241 std::unordered_set<Value *> &Visited,
242 bool UnknownElemTypeI8);
244 bool UnknownElemTypeI8);
247 Type *deduceNestedTypeHelper(User *U,
bool UnknownElemTypeI8);
248 Type *deduceNestedTypeHelper(User *U,
Type *Ty,
249 std::unordered_set<Value *> &Visited,
250 bool UnknownElemTypeI8);
253 void deduceOperandElementType(Instruction *
I,
254 SmallPtrSet<Instruction *, 4> *IncompleteRets,
255 const SmallPtrSet<Value *, 4> *AskOps =
nullptr,
256 bool IsPostprocessing =
false);
260 void simplifyNullAddrSpaceCasts();
262 Type *reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
263 bool IsPostprocessing);
265 void replaceMemInstrUses(Instruction *Old, Instruction *New,
IRBuilder<> &
B);
267 bool insertAssignPtrTypeIntrs(Instruction *
I,
IRBuilder<> &
B,
268 bool UnknownElemTypeI8);
270 void insertAssignPtrTypeTargetExt(TargetExtType *AssignedType,
Value *V,
272 void replacePointerOperandWithPtrCast(Instruction *
I,
Value *Pointer,
273 Type *ExpectedElementType,
274 unsigned OperandToReplace,
276 void insertPtrCastOrAssignTypeInstr(Instruction *
I,
IRBuilder<> &
B);
277 bool shouldTryToAddMemAliasingDecoration(Instruction *Inst);
279 void insertConstantsForFPFastMathDefault(
Module &M);
281 void processGlobalValue(GlobalVariable &GV,
IRBuilder<> &
B);
283 void processParamTypesByFunHeader(Function *
F,
IRBuilder<> &
B);
284 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx);
285 Type *deduceFunParamElementType(Function *
F,
unsigned OpIdx,
286 std::unordered_set<Function *> &FVisited);
288 bool deduceOperandElementTypeCalledFunction(
290 Type *&KnownElemTy,
bool &Incomplete);
291 void deduceOperandElementTypeFunctionPointer(
293 Type *&KnownElemTy,
bool IsPostprocessing);
294 bool deduceOperandElementTypeFunctionRet(
295 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
296 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
299 CallInst *buildSpvPtrcast(Function *
F,
Value *
Op,
Type *ElemTy);
300 void replaceUsesOfWithSpvPtrcast(
Value *
Op,
Type *ElemTy, Instruction *
I,
301 DenseMap<Function *, CallInst *> Ptrcasts);
303 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
306 DenseSet<std::pair<Value *, Value *>> &VisitedSubst);
307 void propagateElemTypeRec(
Value *
Op,
Type *PtrElemTy,
Type *CastElemTy,
308 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
309 std::unordered_set<Value *> &Visited,
310 DenseMap<Function *, CallInst *> Ptrcasts);
313 void replaceAllUsesWithAndErase(
IRBuilder<> &
B, Instruction *Src,
314 Instruction *Dest,
bool DeleteOld =
true);
318 GetElementPtrInst *simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP);
321 bool postprocessTypes(
Module &M);
322 bool processFunctionPointers(
Module &M);
323 void parseFunDeclarations(
Module &M);
324 void useRoundingMode(ConstrainedFPIntrinsic *FPI,
IRBuilder<> &
B);
325 bool processMaskedMemIntrinsic(IntrinsicInst &
I);
326 bool convertMaskedMemIntrinsics(
Module &M);
327 void preprocessBoolVectorBitcasts(Function &
F);
329 void emitUnstructuredLoopControls(Function &
F,
IRBuilder<> &
B);
346 bool walkLogicalAccessChain(
347 GetElementPtrInst &
GEP,
348 const std::function<
void(
Type *PointedType, uint64_t Index)>
351 uint64_t Multiplier)> &OnDynamicIndexing);
353 bool walkLogicalAccessChainDynamic(
354 Type *CurType,
Value *Operand, uint64_t Multiplier,
355 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
356 const std::function<
void(
Type *,
Value *, uint64_t)> &OnDynamicIndexing);
358 bool walkLogicalAccessChainConstant(
360 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing);
366 Type *getGEPType(GetElementPtrInst *
GEP);
373 Type *getGEPTypeLogical(GetElementPtrInst *
GEP);
375 Instruction *buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP);
379 SPIRVEmitIntrinsics(
const SPIRVTargetMachine &TM) : ModulePass(ID), TM(TM) {}
382 Instruction *visitGetElementPtrInst(GetElementPtrInst &
I);
385 Instruction *visitInsertElementInst(InsertElementInst &
I);
386 Instruction *visitExtractElementInst(ExtractElementInst &
I);
388 Instruction *visitExtractValueInst(ExtractValueInst &
I);
392 Instruction *visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I);
396 StringRef getPassName()
const override {
return "SPIRV emit intrinsics"; }
398 bool runOnModule(
Module &M)
override;
400 void getAnalysisUsage(AnalysisUsage &AU)
const override {
401 ModulePass::getAnalysisUsage(AU);
407 Intrinsic::experimental_convergence_loop,
408 Intrinsic::experimental_convergence_anchor>());
411bool expectIgnoredInIRTranslation(
const Instruction *
I) {
413 Intrinsic::spv_resource_handlefrombinding,
414 Intrinsic::spv_resource_getpointer>());
421 return getPointerRoot(V);
427char SPIRVEmitIntrinsics::ID = 0;
430 "SPIRV emit intrinsics",
false,
false)
444 bool IsUndefAggregate =
isa<UndefValue>(V) && V->getType()->isAggregateType();
451 B.SetInsertPoint(
I->getParent()->getFirstNonPHIOrDbgOrAlloca());
457 B.SetCurrentDebugLocation(
I->getDebugLoc());
458 if (
I->getType()->isVoidTy())
459 B.SetInsertPoint(
I->getNextNode());
461 B.SetInsertPoint(*
I->getInsertionPointAfterDef());
471 if (
I->getType()->isTokenTy())
473 "does not support token type",
478 if (!
I->hasName() ||
I->getType()->isAggregateType() ||
479 expectIgnoredInIRTranslation(
I))
490 if (
F &&
F->getName().starts_with(
"llvm.spv.alloca"))
501 std::vector<Value *> Args = {
504 B.CreateIntrinsic(Intrinsic::spv_assign_name, {
I->getType()}, Args);
507void SPIRVEmitIntrinsics::replaceAllUsesWith(
Value *Src,
Value *Dest,
511 if (isTodoType(Src)) {
514 insertTodoType(Dest);
518void SPIRVEmitIntrinsics::replaceAllUsesWithAndErase(
IRBuilder<> &
B,
523 std::string
Name = Src->hasName() ? Src->getName().str() :
"";
524 Src->eraseFromParent();
527 if (Named.insert(Dest).second)
552Type *SPIRVEmitIntrinsics::reconstructType(
Value *
Op,
bool UnknownElemTypeI8,
553 bool IsPostprocessing) {
562 if (
auto It = AggrConstTypes.
find(OpI); It != AggrConstTypes.
end())
576 if (UnknownElemTypeI8) {
577 if (!IsPostprocessing)
585CallInst *SPIRVEmitIntrinsics::buildSpvPtrcast(Function *
F,
Value *
Op,
593 B.SetInsertPointPastAllocas(OpA->getParent());
596 B.SetInsertPoint(
F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
598 Type *OpTy =
Op->getType();
602 CallInst *PtrCasted =
603 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
608void SPIRVEmitIntrinsics::replaceUsesOfWithSpvPtrcast(
610 DenseMap<Function *, CallInst *> Ptrcasts) {
612 CallInst *PtrCastedI =
nullptr;
613 auto It = Ptrcasts.
find(
F);
614 if (It == Ptrcasts.
end()) {
615 PtrCastedI = buildSpvPtrcast(
F,
Op, ElemTy);
616 Ptrcasts[
F] = PtrCastedI;
618 PtrCastedI = It->second;
620 I->replaceUsesOfWith(
Op, PtrCastedI);
623void SPIRVEmitIntrinsics::propagateElemType(
625 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
626 DenseMap<Function *, CallInst *> Ptrcasts;
628 for (
auto *U :
Users) {
631 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
636 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
637 replaceUsesOfWithSpvPtrcast(
Op, ElemTy, UI, Ptrcasts);
641void SPIRVEmitIntrinsics::propagateElemTypeRec(
643 DenseSet<std::pair<Value *, Value *>> &VisitedSubst) {
644 std::unordered_set<Value *> Visited;
645 DenseMap<Function *, CallInst *> Ptrcasts;
646 propagateElemTypeRec(
Op, PtrElemTy, CastElemTy, VisitedSubst, Visited,
647 std::move(Ptrcasts));
650void SPIRVEmitIntrinsics::propagateElemTypeRec(
652 DenseSet<std::pair<Value *, Value *>> &VisitedSubst,
653 std::unordered_set<Value *> &Visited,
654 DenseMap<Function *, CallInst *> Ptrcasts) {
655 if (!Visited.insert(
Op).second)
658 for (
auto *U :
Users) {
661 if (!VisitedSubst.insert(std::make_pair(U,
Op)).second)
666 if (isaGEP(UI) || TypeValidated.find(UI) != TypeValidated.end())
667 replaceUsesOfWithSpvPtrcast(
Op, CastElemTy, UI, Ptrcasts);
675SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
Type *ValueTy,
Value *Operand,
676 bool UnknownElemTypeI8) {
677 std::unordered_set<Value *> Visited;
678 return deduceElementTypeByValueDeep(ValueTy, Operand, Visited,
682Type *SPIRVEmitIntrinsics::deduceElementTypeByValueDeep(
683 Type *ValueTy,
Value *Operand, std::unordered_set<Value *> &Visited,
684 bool UnknownElemTypeI8) {
689 deduceElementTypeHelper(Operand, Visited, UnknownElemTypeI8))
700Type *SPIRVEmitIntrinsics::deduceElementTypeByUsersDeep(
701 Value *
Op, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8) {
713 for (User *OpU :
Op->users()) {
715 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited, UnknownElemTypeI8))
728 if ((DemangledName.
starts_with(
"__spirv_ocl_printf(") ||
737Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
Value *
I,
738 bool UnknownElemTypeI8) {
739 std::unordered_set<Value *> Visited;
740 return deduceElementTypeHelper(
I, Visited, UnknownElemTypeI8);
743void SPIRVEmitIntrinsics::maybeAssignPtrType(
Type *&Ty,
Value *
Op,
Type *RefTy,
744 bool UnknownElemTypeI8) {
746 if (!UnknownElemTypeI8)
753bool SPIRVEmitIntrinsics::walkLogicalAccessChainDynamic(
754 Type *CurType,
Value *Operand, uint64_t Multiplier,
755 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
756 const std::function<
void(
Type *,
Value *, uint64_t)> &OnDynamicIndexing) {
762 if (
ST->getNumElements() > 0) {
763 CurType =
ST->getElementType(0);
764 OnLiteralIndexing(CurType, 0);
774 OnDynamicIndexing(AT->getElementType(), Operand, Multiplier);
775 return AT ==
nullptr;
778bool SPIRVEmitIntrinsics::walkLogicalAccessChainConstant(
780 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing) {
785 uint32_t EltTypeSize =
DL.getTypeSizeInBits(AT->getElementType()) / 8;
789 CurType = AT->getElementType();
790 OnLiteralIndexing(CurType, Index);
792 uint32_t StructSize =
DL.getTypeSizeInBits(ST) / 8;
795 const auto &STL =
DL.getStructLayout(ST);
796 unsigned Element = STL->getElementContainingOffset(
Offset);
797 Offset -= STL->getElementOffset(Element);
798 CurType =
ST->getElementType(Element);
799 OnLiteralIndexing(CurType, Element);
801 Type *EltTy = VT->getElementType();
802 TypeSize EltSizeBits =
DL.getTypeSizeInBits(EltTy);
803 assert(EltSizeBits % 8 == 0 &&
804 "Element type size in bits must be a multiple of 8.");
805 uint32_t EltTypeSize = EltSizeBits / 8;
810 OnLiteralIndexing(CurType, Index);
820bool SPIRVEmitIntrinsics::walkLogicalAccessChain(
821 GetElementPtrInst &
GEP,
822 const std::function<
void(
Type *, uint64_t)> &OnLiteralIndexing,
823 const std::function<
void(
Type *,
Value *, uint64_t)> &OnDynamicIndexing) {
826 std::optional<uint64_t> MultiplierOpt =
827 getByteAddressingMultiplier(
GEP.getSourceElementType());
828 assert(MultiplierOpt &&
"We only rewrite byte-addressing GEP");
829 uint64_t Multiplier = *MultiplierOpt;
832 Value *Src = getPointerRoot(
GEP.getPointerOperand());
833 Type *CurType = deduceElementType(Src,
true);
837 return walkLogicalAccessChainConstant(
838 CurType, CI->getZExtValue() * Multiplier, OnLiteralIndexing);
840 return walkLogicalAccessChainDynamic(CurType, Operand, Multiplier,
841 OnLiteralIndexing, OnDynamicIndexing);
845SPIRVEmitIntrinsics::buildLogicalAccessChainFromGEP(GetElementPtrInst &
GEP) {
848 B.SetInsertPoint(&
GEP);
850 std::vector<Value *> Indices;
851 Indices.push_back(ConstantInt::get(
852 IntegerType::getInt32Ty(CurrF->
getContext()), 0,
false));
853 walkLogicalAccessChain(
855 [&Indices, &
B](
Type *EltType, uint64_t Index) {
857 ConstantInt::get(
B.getInt64Ty(), Index,
false));
860 uint64_t Multiplier) {
862 uint32_t EltTypeSize =
DL.getTypeSizeInBits(EltType) / 8;
864 if (Multiplier == EltTypeSize) {
866 }
else if (EltTypeSize % Multiplier == 0) {
869 EltTypeSize / Multiplier,
873 ConstantInt::get(
Offset->getType(), Multiplier,
876 Index =
B.CreateUDiv(Index,
877 ConstantInt::get(
Offset->getType(), EltTypeSize,
881 Indices.push_back(Index);
885 SmallVector<Value *, 4>
Args;
886 Args.push_back(
B.getInt1(
GEP.isInBounds()));
887 Args.push_back(
GEP.getOperand(0));
889 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
890 replaceAllUsesWithAndErase(
B, &
GEP, NewI);
894Type *SPIRVEmitIntrinsics::getGEPTypeLogical(GetElementPtrInst *
GEP) {
896 Type *CurType =
GEP->getResultElementType();
898 bool Interrupted = walkLogicalAccessChain(
899 *
GEP, [&CurType](
Type *EltType, uint64_t Index) { CurType = EltType; },
900 [&CurType](
Type *EltType,
Value *
Index, uint64_t) { CurType = EltType; });
902 return Interrupted ?
GEP->getResultElementType() : CurType;
905Type *SPIRVEmitIntrinsics::getGEPType(GetElementPtrInst *
Ref) {
906 if (getByteAddressingMultiplier(
Ref->getSourceElementType()) &&
908 return getGEPTypeLogical(
Ref);
915 Ty =
Ref->getSourceElementType();
919 Ty =
Ref->getResultElementType();
924Type *SPIRVEmitIntrinsics::deduceElementTypeHelper(
925 Value *
I, std::unordered_set<Value *> &Visited,
bool UnknownElemTypeI8,
926 bool IgnoreKnownType) {
932 if (!IgnoreKnownType)
937 if (!Visited.insert(
I).second)
944 maybeAssignPtrType(Ty,
I,
Ref->getAllocatedType(), UnknownElemTypeI8);
946 Ty = getGEPType(
Ref);
948 Ty = SGEP->getResultElementType();
953 KnownTy =
Op->getType();
955 maybeAssignPtrType(Ty,
I, ElemTy, UnknownElemTypeI8);
958 Ty = SPIRV::getOriginalFunctionType(*Fn);
961 Ty = deduceElementTypeByValueDeep(
963 Ref->getNumOperands() > 0 ?
Ref->getOperand(0) :
nullptr, Visited,
967 Type *RefTy = deduceElementTypeHelper(
Ref->getPointerOperand(), Visited,
969 maybeAssignPtrType(Ty,
I, RefTy, UnknownElemTypeI8);
971 maybeAssignPtrType(Ty,
I,
Ref->getDestTy(), UnknownElemTypeI8);
973 if (
Type *Src =
Ref->getSrcTy(), *Dest =
Ref->getDestTy();
975 Ty = deduceElementTypeHelper(
Ref->getOperand(0), Visited,
980 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
984 Ty = deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8);
986 Type *BestTy =
nullptr;
988 DenseMap<Type *, unsigned> PhiTys;
989 for (
int i =
Ref->getNumIncomingValues() - 1; i >= 0; --i) {
990 Ty = deduceElementTypeByUsersDeep(
Ref->getIncomingValue(i), Visited,
997 if (It.first->second > MaxN) {
998 MaxN = It.first->second;
1006 for (
Value *
Op : {
Ref->getTrueValue(),
Ref->getFalseValue()}) {
1007 Ty = deduceElementTypeByUsersDeep(
Op, Visited, UnknownElemTypeI8);
1012 static StringMap<unsigned> ResTypeByArg = {
1016 {
"__spirv_GenericCastToPtr_ToGlobal", 0},
1017 {
"__spirv_GenericCastToPtr_ToLocal", 0},
1018 {
"__spirv_GenericCastToPtr_ToPrivate", 0},
1019 {
"__spirv_GenericCastToPtrExplicit_ToGlobal", 0},
1020 {
"__spirv_GenericCastToPtrExplicit_ToLocal", 0},
1021 {
"__spirv_GenericCastToPtrExplicit_ToPrivate", 0}};
1025 if (
II &&
II->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1027 if (HandleType->getTargetExtName() ==
"spirv.Image" ||
1028 HandleType->getTargetExtName() ==
"spirv.SignedImage") {
1029 for (User *U :
II->users()) {
1034 }
else if (HandleType->getTargetExtName() ==
"spirv.VulkanBuffer") {
1036 Ty = HandleType->getTypeParameter(0);
1048 }
else if (
II &&
II->getIntrinsicID() ==
1049 Intrinsic::spv_generic_cast_to_ptr_explicit) {
1053 std::string DemangledName =
1055 if (DemangledName.length() > 0)
1056 DemangledName = SPIRV::lookupBuiltinNameHelper(DemangledName);
1057 auto AsArgIt = ResTypeByArg.
find(DemangledName);
1058 if (AsArgIt != ResTypeByArg.
end())
1059 Ty = deduceElementTypeHelper(CI->
getArgOperand(AsArgIt->second),
1060 Visited, UnknownElemTypeI8);
1067 if (Ty && !IgnoreKnownType) {
1078Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(User *U,
1079 bool UnknownElemTypeI8) {
1080 std::unordered_set<Value *> Visited;
1081 return deduceNestedTypeHelper(U,
U->getType(), Visited, UnknownElemTypeI8);
1084Type *SPIRVEmitIntrinsics::deduceNestedTypeHelper(
1085 User *U,
Type *OrigTy, std::unordered_set<Value *> &Visited,
1086 bool UnknownElemTypeI8) {
1095 if (!Visited.insert(U).second)
1100 bool Change =
false;
1101 for (
unsigned i = 0; i <
U->getNumOperands(); ++i) {
1103 assert(
Op &&
"Operands should not be null.");
1104 Type *OpTy =
Op->getType();
1107 if (
Type *NestedTy =
1108 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1115 Change |= Ty != OpTy;
1123 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
1124 Type *OpTy = ArrTy->getElementType();
1127 if (
Type *NestedTy =
1128 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1135 Type *NewTy = ArrayType::get(Ty, ArrTy->getNumElements());
1141 if (
Value *
Op =
U->getNumOperands() > 0 ?
U->getOperand(0) :
nullptr) {
1142 Type *OpTy = VecTy->getElementType();
1145 if (
Type *NestedTy =
1146 deduceElementTypeHelper(
Op, Visited, UnknownElemTypeI8))
1153 Type *NewTy = VectorType::get(Ty, VecTy->getElementCount());
1163Type *SPIRVEmitIntrinsics::deduceElementType(
Value *
I,
bool UnknownElemTypeI8) {
1164 if (
Type *Ty = deduceElementTypeHelper(
I, UnknownElemTypeI8))
1166 if (!UnknownElemTypeI8)
1169 return IntegerType::getInt8Ty(
I->getContext());
1173 Value *PointerOperand) {
1179 return I->getType();
1187bool SPIRVEmitIntrinsics::deduceOperandElementTypeCalledFunction(
1189 Type *&KnownElemTy,
bool &Incomplete) {
1193 std::string DemangledName =
1195 if (DemangledName.length() > 0 &&
1197 const SPIRVSubtarget &
ST = TM.
getSubtarget<SPIRVSubtarget>(*CalledF);
1198 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
1199 DemangledName,
ST.getPreferredInstructionSet());
1200 if (Opcode == SPIRV::OpGroupAsyncCopy) {
1201 for (
unsigned i = 0, PtrCnt = 0; i < CI->
arg_size() && PtrCnt < 2; ++i) {
1207 KnownElemTy = ElemTy;
1208 Ops.push_back(std::make_pair(
Op, i));
1210 }
else if (Grp == SPIRV::Atomic || Grp == SPIRV::AtomicFloating) {
1217 case SPIRV::OpAtomicFAddEXT:
1218 case SPIRV::OpAtomicFMinEXT:
1219 case SPIRV::OpAtomicFMaxEXT:
1220 case SPIRV::OpAtomicLoad:
1221 case SPIRV::OpAtomicCompareExchangeWeak:
1222 case SPIRV::OpAtomicCompareExchange:
1223 case SPIRV::OpAtomicExchange:
1224 case SPIRV::OpAtomicIAdd:
1225 case SPIRV::OpAtomicISub:
1226 case SPIRV::OpAtomicOr:
1227 case SPIRV::OpAtomicXor:
1228 case SPIRV::OpAtomicAnd:
1229 case SPIRV::OpAtomicUMin:
1230 case SPIRV::OpAtomicUMax:
1231 case SPIRV::OpAtomicSMin:
1232 case SPIRV::OpAtomicSMax: {
1237 Incomplete = isTodoType(
Op);
1238 Ops.push_back(std::make_pair(
Op, 0));
1240 case SPIRV::OpAtomicStore: {
1249 Incomplete = isTodoType(
Op);
1250 Ops.push_back(std::make_pair(
Op, 0));
1259void SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionPointer(
1261 Type *&KnownElemTy,
bool IsPostprocessing) {
1265 Ops.push_back(std::make_pair(
Op, std::numeric_limits<unsigned>::max()));
1266 FunctionType *FTy = SPIRV::getOriginalFunctionType(*CI);
1267 bool IsNewFTy =
false, IsIncomplete =
false;
1270 Type *ArgTy = Arg->getType();
1275 if (isTodoType(Arg))
1276 IsIncomplete =
true;
1278 IsIncomplete =
true;
1281 ArgTy = FTy->getFunctionParamType(ParmIdx);
1285 Type *RetTy = FTy->getReturnType();
1292 IsIncomplete =
true;
1294 IsIncomplete =
true;
1297 if (!IsPostprocessing && IsIncomplete)
1300 IsNewFTy ? FunctionType::get(RetTy, ArgTys, FTy->isVarArg()) : FTy;
1303bool SPIRVEmitIntrinsics::deduceOperandElementTypeFunctionRet(
1304 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1305 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing,
1317 DenseSet<std::pair<Value *, Value *>> VisitedSubst{std::make_pair(
I,
Op)};
1318 for (User *U :
F->users()) {
1326 propagateElemType(CI, PrevElemTy, VisitedSubst);
1336 for (Instruction *IncompleteRetI : *IncompleteRets)
1337 deduceOperandElementType(IncompleteRetI,
nullptr, AskOps,
1339 }
else if (IncompleteRets) {
1342 TypeValidated.insert(
I);
1350void SPIRVEmitIntrinsics::deduceOperandElementType(
1351 Instruction *
I, SmallPtrSet<Instruction *, 4> *IncompleteRets,
1352 const SmallPtrSet<Value *, 4> *AskOps,
bool IsPostprocessing) {
1354 Type *KnownElemTy =
nullptr;
1355 bool Incomplete =
false;
1361 Incomplete = isTodoType(
I);
1362 for (
unsigned i = 0; i <
Ref->getNumIncomingValues(); i++) {
1365 Ops.push_back(std::make_pair(
Op, i));
1371 Incomplete = isTodoType(
I);
1372 Ops.push_back(std::make_pair(
Ref->getPointerOperand(), 0));
1379 Incomplete = isTodoType(
I);
1380 Ops.push_back(std::make_pair(
Ref->getOperand(0), 0));
1384 KnownElemTy =
Ref->getSourceElementType();
1385 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1390 KnownElemTy =
Ref->getBaseType();
1391 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1394 KnownElemTy =
I->getType();
1400 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1404 reconstructType(
Ref->getValueOperand(),
false, IsPostprocessing)))
1409 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1417 Incomplete = isTodoType(
Ref->getPointerOperand());
1418 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1426 Incomplete = isTodoType(
Ref->getPointerOperand());
1427 Ops.push_back(std::make_pair(
Ref->getPointerOperand(),
1433 Incomplete = isTodoType(
I);
1434 for (
unsigned i = 0; i <
Ref->getNumOperands(); i++) {
1437 Ops.push_back(std::make_pair(
Op, i));
1445 if (deduceOperandElementTypeFunctionRet(
I, IncompleteRets, AskOps,
1446 IsPostprocessing, KnownElemTy,
Op,
1449 Incomplete = isTodoType(CurrF);
1450 Ops.push_back(std::make_pair(
Op, 0));
1456 bool Incomplete0 = isTodoType(Op0);
1457 bool Incomplete1 = isTodoType(Op1);
1459 Type *ElemTy0 = (Incomplete0 && !Incomplete1 && ElemTy1)
1461 : GR->findDeducedElementType(Op0);
1463 KnownElemTy = ElemTy0;
1464 Incomplete = Incomplete0;
1465 Ops.push_back(std::make_pair(Op1, 1));
1466 }
else if (ElemTy1) {
1467 KnownElemTy = ElemTy1;
1468 Incomplete = Incomplete1;
1469 Ops.push_back(std::make_pair(Op0, 0));
1473 deduceOperandElementTypeCalledFunction(CI,
Ops, KnownElemTy, Incomplete);
1474 else if (HaveFunPtrs)
1475 deduceOperandElementTypeFunctionPointer(CI,
Ops, KnownElemTy,
1480 if (!KnownElemTy ||
Ops.size() == 0)
1485 for (
auto &OpIt :
Ops) {
1489 Type *AskTy =
nullptr;
1490 CallInst *AskCI =
nullptr;
1491 if (IsPostprocessing && AskOps) {
1497 if (Ty == KnownElemTy)
1500 Type *OpTy =
Op->getType();
1501 if (
Op->hasUseList() &&
1508 else if (!IsPostprocessing)
1512 if (AssignCI ==
nullptr) {
1521 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
1522 std::make_pair(
I,
Op)};
1523 propagateElemTypeRec(
Op, KnownElemTy, PrevElemTy, VisitedSubst);
1527 CallInst *PtrCastI =
1528 buildSpvPtrcast(
I->getParent()->getParent(),
Op, KnownElemTy);
1529 if (OpIt.second == std::numeric_limits<unsigned>::max())
1532 I->setOperand(OpIt.second, PtrCastI);
1535 TypeValidated.insert(
I);
1538void SPIRVEmitIntrinsics::replaceMemInstrUses(Instruction *Old,
1543 if (isAssignTypeInstr(U)) {
1544 B.SetInsertPoint(U);
1545 SmallVector<Value *, 2>
Args = {
New,
U->getOperand(1)};
1546 CallInst *AssignCI =
1547 B.CreateIntrinsic(Intrinsic::spv_assign_type, {
New->getType()},
Args);
1549 U->eraseFromParent();
1552 U->replaceUsesOfWith(Old, New);
1554 if (
Phi->getType() !=
New->getType()) {
1555 Phi->mutateType(
New->getType());
1556 Phi->replaceUsesOfWith(Old, New);
1559 for (User *PhiUser :
Phi->users())
1562 for (ExtractValueInst *EV : EVUsers) {
1563 B.SetInsertPoint(EV);
1565 for (
unsigned Idx : EV->indices())
1566 Args.push_back(
B.getInt32(Idx));
1568 B.CreateIntrinsic(Intrinsic::spv_extractv, {EV->getType()},
Args);
1569 EV->replaceAllUsesWith(NewEV);
1570 DeletedInstrs.
insert(EV);
1571 EV->eraseFromParent();
1574 Phi->replaceUsesOfWith(Old, New);
1580 New->copyMetadata(*Old);
1584void SPIRVEmitIntrinsics::preprocessUndefs(
IRBuilder<> &
B) {
1585 std::queue<Instruction *> Worklist;
1589 while (!Worklist.empty()) {
1591 bool BPrepared =
false;
1594 for (
auto &
Op :
I->operands()) {
1596 if (!AggrUndef || !
Op->getType()->isAggregateType())
1603 auto *IntrUndef =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
1604 Worklist.push(IntrUndef);
1605 I->replaceUsesOfWith(
Op, IntrUndef);
1606 AggrConsts[IntrUndef] = AggrUndef;
1607 AggrConstTypes[IntrUndef] = AggrUndef->getType();
1616void SPIRVEmitIntrinsics::simplifyNullAddrSpaceCasts() {
1620 ASC->replaceAllUsesWith(
1622 ASC->eraseFromParent();
1626void SPIRVEmitIntrinsics::preprocessCompositeConstants(
IRBuilder<> &
B) {
1627 std::queue<Instruction *> Worklist;
1631 while (!Worklist.empty()) {
1632 auto *
I = Worklist.front();
1635 bool KeepInst =
false;
1636 for (
const auto &
Op :
I->operands()) {
1638 Type *ResTy =
nullptr;
1641 ResTy = COp->getType();
1653 ResTy =
Op->getType()->isVectorTy() ? COp->getType() :
B.getInt32Ty();
1658 for (
unsigned i = 0; i < COp->getNumElements(); ++i)
1659 Args.push_back(COp->getElementAsConstant(i));
1665 CE &&
CE->getOpcode() == Instruction::AddrSpaceCast &&
1671 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
1672 :
B.SetInsertPoint(
I);
1676 B.CreateIntrinsic(Intrinsic::spv_const_composite, {ResTy}, {
Args});
1680 AggrConsts[CI] = AggrConst;
1681 AggrConstTypes[CI] = deduceNestedTypeHelper(AggrConst,
false);
1693 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
1698 unsigned RoundingModeDeco,
1705 ConstantInt::get(
Int32Ty, SPIRV::Decoration::FPRoundingMode)),
1714 MDNode *SaturatedConversionNode =
1716 Int32Ty, SPIRV::Decoration::SaturatedConversion))});
1736 MDString *ConstraintString =
1745 B.SetInsertPoint(&
Call);
1746 B.CreateIntrinsic(Intrinsic::spv_inline_asm, {
Args});
1751void SPIRVEmitIntrinsics::useRoundingMode(ConstrainedFPIntrinsic *FPI,
1754 if (!
RM.has_value())
1756 unsigned RoundingModeDeco = std::numeric_limits<unsigned>::max();
1757 switch (
RM.value()) {
1761 case RoundingMode::NearestTiesToEven:
1762 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTE;
1764 case RoundingMode::TowardNegative:
1765 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTN;
1767 case RoundingMode::TowardPositive:
1768 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTP;
1770 case RoundingMode::TowardZero:
1771 RoundingModeDeco = SPIRV::FPRoundingMode::FPRoundingMode::RTZ;
1773 case RoundingMode::Dynamic:
1774 case RoundingMode::NearestTiesToAway:
1778 if (RoundingModeDeco == std::numeric_limits<unsigned>::max())
1784Instruction *SPIRVEmitIntrinsics::visitSwitchInst(SwitchInst &
I) {
1788 B.SetInsertPoint(&
I);
1789 SmallVector<Value *, 4>
Args;
1791 Args.push_back(
I.getCondition());
1794 for (
auto &Case :
I.cases()) {
1795 Args.push_back(Case.getCaseValue());
1796 BBCases.
push_back(Case.getCaseSuccessor());
1799 CallInst *NewI =
B.CreateIntrinsic(Intrinsic::spv_switch,
1800 {
I.getOperand(0)->getType()}, {
Args});
1804 I.eraseFromParent();
1807 B.SetInsertPoint(ParentBB);
1808 IndirectBrInst *BrI =
B.CreateIndirectBr(
1811 for (BasicBlock *BBCase : BBCases)
1820Instruction *SPIRVEmitIntrinsics::visitIntrinsicInst(IntrinsicInst &
I) {
1826 B.SetInsertPoint(&
I);
1828 SmallVector<Value *, 4>
Args;
1829 Args.push_back(
B.getInt1(
true));
1830 Args.push_back(
I.getOperand(0));
1831 Args.push_back(
B.getInt32(0));
1832 for (
unsigned J = 0; J < SGEP->getNumIndices(); ++J)
1833 Args.push_back(SGEP->getIndexOperand(J));
1835 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, Types, Args);
1836 replaceAllUsesWithAndErase(
B, &
I, NewI);
1840Instruction *SPIRVEmitIntrinsics::visitGetElementPtrInst(GetElementPtrInst &
I) {
1842 B.SetInsertPoint(&
I);
1850 if (getByteAddressingMultiplier(
I.getSourceElementType())) {
1851 return buildLogicalAccessChainFromGEP(
I);
1856 Value *PtrOp =
I.getPointerOperand();
1857 Type *SrcElemTy =
I.getSourceElementType();
1858 Type *DeducedPointeeTy = deduceElementType(PtrOp,
true);
1861 if (ArrTy->getElementType() == SrcElemTy) {
1863 Type *FirstIdxType =
I.getOperand(1)->getType();
1864 NewIndices.
push_back(ConstantInt::get(FirstIdxType, 0));
1865 for (
Value *Idx :
I.indices())
1869 SmallVector<Value *, 4>
Args;
1870 Args.push_back(
B.getInt1(
I.isInBounds()));
1871 Args.push_back(
I.getPointerOperand());
1874 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1875 replaceAllUsesWithAndErase(
B, &
I, NewI);
1882 SmallVector<Value *, 4>
Args;
1883 Args.push_back(
B.getInt1(
I.isInBounds()));
1885 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_gep, {
Types}, {
Args});
1886 replaceAllUsesWithAndErase(
B, &
I, NewI);
1890Instruction *SPIRVEmitIntrinsics::visitBitCastInst(BitCastInst &
I) {
1892 B.SetInsertPoint(&
I);
1901 I.eraseFromParent();
1907 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_bitcast, {
Types}, {
Args});
1908 replaceAllUsesWithAndErase(
B, &
I, NewI);
1912void SPIRVEmitIntrinsics::insertAssignPtrTypeTargetExt(
1914 Type *VTy =
V->getType();
1919 if (ElemTy != AssignedType)
1932 if (CurrentType == AssignedType)
1939 " for value " +
V->getName(),
1947void SPIRVEmitIntrinsics::replacePointerOperandWithPtrCast(
1948 Instruction *
I,
Value *Pointer,
Type *ExpectedElementType,
1950 TypeValidated.insert(
I);
1953 Type *PointerElemTy = deduceElementTypeHelper(Pointer,
false);
1954 if (PointerElemTy == ExpectedElementType ||
1960 MetadataAsValue *VMD =
buildMD(ExpectedElementVal);
1962 bool FirstPtrCastOrAssignPtrType =
true;
1968 for (
auto User :
Pointer->users()) {
1971 (
II->getIntrinsicID() != Intrinsic::spv_assign_ptr_type &&
1972 II->getIntrinsicID() != Intrinsic::spv_ptrcast) ||
1973 II->getOperand(0) != Pointer)
1978 FirstPtrCastOrAssignPtrType =
false;
1979 if (
II->getOperand(1) != VMD ||
1986 if (
II->getIntrinsicID() != Intrinsic::spv_ptrcast)
1991 if (
II->getParent() !=
I->getParent())
1994 I->setOperand(OperandToReplace,
II);
2000 if (FirstPtrCastOrAssignPtrType) {
2005 }
else if (isTodoType(Pointer)) {
2006 eraseTodoType(Pointer);
2014 DenseSet<std::pair<Value *, Value *>> VisitedSubst{
2015 std::make_pair(
I, Pointer)};
2017 propagateElemType(Pointer, PrevElemTy, VisitedSubst);
2029 auto *PtrCastI =
B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
2035void SPIRVEmitIntrinsics::insertPtrCastOrAssignTypeInstr(Instruction *
I,
2040 replacePointerOperandWithPtrCast(
2041 I,
SI->getValueOperand(), IntegerType::getInt8Ty(CurrF->
getContext()),
2047 Type *OpTy =
Op->getType();
2057 It != AggrConstTypes.
end())
2059 if (OpTy ==
Op->getType())
2060 OpTy = deduceElementTypeByValueDeep(OpTy,
Op,
false);
2061 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 1,
B);
2066 Type *OpTy = LI->getType();
2071 Type *NewOpTy = OpTy;
2072 OpTy = deduceElementTypeByValueDeep(OpTy, LI,
false);
2073 if (OpTy == NewOpTy)
2074 insertTodoType(Pointer);
2077 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
2082 Type *OpTy =
nullptr;
2094 OpTy = GEPI->getSourceElementType();
2096 replacePointerOperandWithPtrCast(
I, Pointer, OpTy, 0,
B);
2098 insertTodoType(Pointer);
2110 std::string DemangledName =
2114 bool HaveTypes =
false;
2132 for (User *U : CalledArg->
users()) {
2134 if ((ElemTy = deduceElementTypeHelper(Inst,
false)) !=
nullptr)
2140 HaveTypes |= ElemTy !=
nullptr;
2145 if (DemangledName.empty() && !HaveTypes)
2163 Type *ExpectedType =
2165 if (!ExpectedType && !DemangledName.empty())
2166 ExpectedType = SPIRV::parseBuiltinCallArgumentBaseType(
2167 DemangledName,
OpIdx,
I->getContext());
2168 if (!ExpectedType || ExpectedType->
isVoidTy())
2176 replacePointerOperandWithPtrCast(CI, ArgOperand, ExpectedType,
OpIdx,
B);
2180Instruction *SPIRVEmitIntrinsics::visitInsertElementInst(InsertElementInst &
I) {
2187 I.getOperand(1)->getType(),
2188 I.getOperand(2)->getType()};
2190 B.SetInsertPoint(&
I);
2192 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_insertelt, {
Types}, {
Args});
2193 replaceAllUsesWithAndErase(
B, &
I, NewI);
2198SPIRVEmitIntrinsics::visitExtractElementInst(ExtractElementInst &
I) {
2205 B.SetInsertPoint(&
I);
2207 I.getIndexOperand()->getType()};
2208 SmallVector<Value *, 2>
Args = {
I.getVectorOperand(),
I.getIndexOperand()};
2209 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_extractelt, {
Types}, {
Args});
2210 replaceAllUsesWithAndErase(
B, &
I, NewI);
2214Instruction *SPIRVEmitIntrinsics::visitInsertValueInst(InsertValueInst &
I) {
2216 B.SetInsertPoint(&
I);
2219 Value *AggregateOp =
I.getAggregateOperand();
2223 Args.push_back(AggregateOp);
2224 Args.push_back(
I.getInsertedValueOperand());
2225 for (
auto &
Op :
I.indices())
2226 Args.push_back(
B.getInt32(
Op));
2228 B.CreateIntrinsic(Intrinsic::spv_insertv, {
Types}, {
Args});
2229 replaceMemInstrUses(&
I, NewI,
B);
2233Instruction *SPIRVEmitIntrinsics::visitExtractValueInst(ExtractValueInst &
I) {
2234 if (
I.getAggregateOperand()->getType()->isAggregateType())
2237 B.SetInsertPoint(&
I);
2239 for (
auto &
Op :
I.indices())
2240 Args.push_back(
B.getInt32(
Op));
2242 B.CreateIntrinsic(Intrinsic::spv_extractv, {
I.getType()}, {
Args});
2243 replaceAllUsesWithAndErase(
B, &
I, NewI);
2247Instruction *SPIRVEmitIntrinsics::visitLoadInst(LoadInst &
I) {
2248 if (!
I.getType()->isAggregateType())
2251 B.SetInsertPoint(&
I);
2252 TrackConstants =
false;
2257 unsigned IntrinsicId;
2258 SmallVector<Value *, 4>
Args = {
I.getPointerOperand(),
B.getInt16(Flags)};
2259 if (!
I.isAtomic()) {
2260 IntrinsicId = Intrinsic::spv_load;
2261 Args.push_back(
B.getInt32(
I.getAlign().value()));
2263 IntrinsicId = Intrinsic::spv_atomic_load;
2264 Args.push_back(
B.getInt8(
static_cast<uint8_t
>(
I.getOrdering())));
2267 B.CreateIntrinsic(IntrinsicId, {
I.getOperand(0)->getType()},
Args);
2269 replaceMemInstrUses(&
I, NewI,
B);
2273Instruction *SPIRVEmitIntrinsics::visitStoreInst(StoreInst &
I) {
2277 B.SetInsertPoint(&
I);
2278 TrackConstants =
false;
2282 auto *PtrOp =
I.getPointerOperand();
2284 if (
I.getValueOperand()->getType()->isAggregateType()) {
2292 "Unexpected argument of aggregate type, should be spv_extractv!");
2296 unsigned IntrinsicId;
2297 SmallVector<Value *, 4>
Args = {
I.getValueOperand(), PtrOp,
2299 if (!
I.isAtomic()) {
2300 IntrinsicId = Intrinsic::spv_store;
2301 Args.push_back(
B.getInt32(
I.getAlign().value()));
2303 IntrinsicId = Intrinsic::spv_atomic_store;
2304 Args.push_back(
B.getInt8(
static_cast<uint8_t
>(
I.getOrdering())));
2306 auto *NewI =
B.CreateIntrinsic(
2307 IntrinsicId, {
I.getValueOperand()->getType(), PtrOp->
getType()},
Args);
2309 I.eraseFromParent();
2313Instruction *SPIRVEmitIntrinsics::visitAllocaInst(AllocaInst &
I) {
2314 Value *ArraySize =
nullptr;
2315 if (
I.isArrayAllocation()) {
2318 SPIRV::Extension::SPV_INTEL_variable_length_array))
2320 "array allocation: this instruction requires the following "
2321 "SPIR-V extension: SPV_INTEL_variable_length_array",
2323 ArraySize =
I.getArraySize();
2326 B.SetInsertPoint(&
I);
2327 TrackConstants =
false;
2328 Type *PtrTy =
I.getType();
2331 ?
B.CreateIntrinsic(Intrinsic::spv_alloca_array,
2332 {PtrTy, ArraySize->
getType()},
2333 {ArraySize,
B.getInt32(
I.getAlign().value())})
2334 :
B.CreateIntrinsic(
Intrinsic::spv_alloca, {PtrTy},
2335 {
B.getInt32(
I.getAlign().value())});
2336 replaceAllUsesWithAndErase(
B, &
I, NewI);
2340Instruction *SPIRVEmitIntrinsics::visitAtomicCmpXchgInst(AtomicCmpXchgInst &
I) {
2341 assert(
I.getType()->isAggregateType() &&
"Aggregate result is expected");
2343 B.SetInsertPoint(&
I);
2345 Args.push_back(
B.getInt32(
2346 static_cast<uint32_t
>(
getMemScope(
I.getContext(),
I.getSyncScopeID()))));
2349 const SPIRVSubtarget &
ST = TM.
getSubtarget<SPIRVSubtarget>(*
I.getFunction());
2350 unsigned AS =
I.getPointerOperand()->getType()->getPointerAddressSpace();
2351 uint32_t ScSem =
static_cast<uint32_t
>(
2353 Args.push_back(
B.getInt32(
2355 Args.push_back(
B.getInt32(
2357 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_cmpxchg,
2358 {
I.getPointerOperand()->getType()}, {
Args});
2359 replaceMemInstrUses(&
I, NewI,
B);
2363Instruction *SPIRVEmitIntrinsics::visitUnreachableInst(UnreachableInst &
I) {
2365 B.SetInsertPoint(&
I);
2366 B.CreateIntrinsic(Intrinsic::spv_unreachable, {});
2375 static const StringSet<> ArtificialGlobals{
"llvm.global.annotations",
2376 "llvm.compiler.used",
"llvm.used"};
2381 auto &UserFunctions = GVUsers.getTransitiveUserFunctions(GV);
2382 if (UserFunctions.contains(
F))
2387 if (!UserFunctions.empty())
2392 const Module &M = *
F->getParent();
2393 const Function &FirstDefinition = *M.getFunctionDefs().
begin();
2394 return F == &FirstDefinition;
2397Value *SPIRVEmitIntrinsics::buildSpvUndefComposite(
Type *AggrTy,
2399 SmallVector<Value *, 4> Elems;
2401 Type *ElemTy = ArrTy->getElementType();
2402 auto *UI =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
2404 AggrConstTypes[UI] = ElemTy;
2405 Elems.
assign(ArrTy->getNumElements(), UI);
2408 DenseMap<Type *, Instruction *> UndefByType;
2409 for (
unsigned I = 0;
I < StructTy->getNumElements(); ++
I) {
2411 auto &
Entry = UndefByType[ElemTy];
2413 Entry =
B.CreateIntrinsic(Intrinsic::spv_undef, {});
2415 AggrConstTypes[
Entry] = ElemTy;
2420 auto *Composite =
B.CreateIntrinsic(Intrinsic::spv_const_composite,
2421 {
B.getInt32Ty()}, Elems);
2423 AggrConstTypes[Composite] = AggrTy;
2427void SPIRVEmitIntrinsics::processGlobalValue(GlobalVariable &GV,
2438 deduceElementTypeHelper(&GV,
false);
2440 Value *InitOp = Init;
2442 InitOp = buildSpvUndefComposite(Init->
getType(),
B);
2445 auto *InitInst =
B.CreateIntrinsic(Intrinsic::spv_init_global,
2447 InitInst->setArgOperand(1, InitOp);
2450 B.CreateIntrinsic(Intrinsic::spv_unref_global, GV.
getType(), &GV);
2456bool SPIRVEmitIntrinsics::insertAssignPtrTypeIntrs(Instruction *
I,
2458 bool UnknownElemTypeI8) {
2464 if (
Type *ElemTy = deduceElementType(
I, UnknownElemTypeI8)) {
2471void SPIRVEmitIntrinsics::insertAssignTypeIntrs(Instruction *
I,
2474 static StringMap<unsigned> ResTypeWellKnown = {
2475 {
"async_work_group_copy", WellKnownTypes::Event},
2476 {
"async_work_group_strided_copy", WellKnownTypes::Event},
2477 {
"__spirv_GroupAsyncCopy", WellKnownTypes::Event}};
2481 bool IsKnown =
false;
2486 std::string DemangledName =
2489 if (DemangledName.length() > 0)
2491 SPIRV::lookupBuiltinNameHelper(DemangledName, &DecorationId);
2492 auto ResIt = ResTypeWellKnown.
find(DemangledName);
2493 if (ResIt != ResTypeWellKnown.
end()) {
2496 switch (ResIt->second) {
2497 case WellKnownTypes::Event:
2504 switch (DecorationId) {
2507 case FPDecorationId::SAT:
2510 case FPDecorationId::RTE:
2512 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTE,
B);
2514 case FPDecorationId::RTZ:
2516 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTZ,
B);
2518 case FPDecorationId::RTP:
2520 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTP,
B);
2522 case FPDecorationId::RTN:
2524 CI, SPIRV::FPRoundingMode::FPRoundingMode::RTN,
B);
2530 Type *Ty =
I->getType();
2533 Type *TypeToAssign = Ty;
2535 if (
II->getIntrinsicID() == Intrinsic::spv_const_composite ||
2536 II->getIntrinsicID() == Intrinsic::spv_undef) {
2537 auto It = AggrConstTypes.
find(
II);
2538 if (It == AggrConstTypes.
end())
2540 TypeToAssign = It->second;
2542 }
else if (
auto It = AggrConstTypes.
find(
I); It != AggrConstTypes.
end())
2543 TypeToAssign = It->second;
2547 for (
const auto &
Op :
I->operands()) {
2554 Type *OpTy =
Op->getType();
2556 CallInst *AssignCI =
2561 Type *OpTy =
Op->getType();
2576 CallInst *AssignCI =
2586bool SPIRVEmitIntrinsics::shouldTryToAddMemAliasingDecoration(
2587 Instruction *Inst) {
2589 if (!STI->
canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing))
2599void SPIRVEmitIntrinsics::insertSpirvDecorations(Instruction *
I,
2601 if (MDNode *MD =
I->getMetadata(
"spirv.Decorations")) {
2603 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
2608 auto processMemAliasingDecoration = [&](
unsigned Kind) {
2609 if (MDNode *AliasListMD =
I->getMetadata(Kind)) {
2610 if (shouldTryToAddMemAliasingDecoration(
I)) {
2611 uint32_t Dec =
Kind == LLVMContext::MD_alias_scope
2612 ? SPIRV::Decoration::AliasScopeINTEL
2613 : SPIRV::Decoration::NoAliasINTEL;
2615 I, ConstantInt::get(
B.getInt32Ty(), Dec),
2618 B.CreateIntrinsic(Intrinsic::spv_assign_aliasing_decoration,
2619 {
I->getType()}, {
Args});
2623 processMemAliasingDecoration(LLVMContext::MD_alias_scope);
2624 processMemAliasingDecoration(LLVMContext::MD_noalias);
2627 if (MDNode *MD =
I->getMetadata(LLVMContext::MD_fpmath)) {
2629 bool AllowFPMaxError =
2631 if (!AllowFPMaxError)
2635 B.CreateIntrinsic(Intrinsic::spv_assign_fpmaxerror_decoration,
2639 if (
I->getModule()->getTargetTriple().getVendor() ==
Triple::AMD &&
2643 auto &Ctx =
B.getContext();
2645 ConstantInt::get(
B.getInt32Ty(), SPIRV::Decoration::UserSemantic));
2648 if (
I->hasMetadata(
"amdgpu.no.fine.grained.memory"))
2650 Ctx, {US,
MDString::get(Ctx,
"amdgpu.no.fine.grained.memory")}));
2651 if (
I->hasMetadata(
"amdgpu.no.remote.memory"))
2654 if (
I->hasMetadata(
"amdgpu.ignore.denormal.mode"))
2656 Ctx, {US,
MDString::get(Ctx,
"amdgpu.ignore.denormal.mode")}));
2658 B.CreateIntrinsic(Intrinsic::spv_assign_decoration, {
I->getType()},
2666 &FPFastMathDefaultInfoMap,
2668 auto it = FPFastMathDefaultInfoMap.
find(
F);
2669 if (it != FPFastMathDefaultInfoMap.
end())
2677 SPIRV::FPFastMathMode::None);
2679 SPIRV::FPFastMathMode::None);
2681 SPIRV::FPFastMathMode::None);
2682 return FPFastMathDefaultInfoMap[
F] = std::move(FPFastMathDefaultInfoVec);
2688 size_t BitWidth = Ty->getScalarSizeInBits();
2692 assert(Index >= 0 && Index < 3 &&
2693 "Expected FPFastMathDefaultInfo for half, float, or double");
2694 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2695 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2696 return FPFastMathDefaultInfoVec[Index];
2699void SPIRVEmitIntrinsics::insertConstantsForFPFastMathDefault(
Module &M) {
2701 if (!
ST->canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2710 auto Node =
M.getNamedMetadata(
"spirv.ExecutionMode");
2712 if (!
M.getNamedMetadata(
"opencl.enable.FP_CONTRACT")) {
2720 ConstantInt::get(Type::getInt32Ty(
M.getContext()), 0);
2723 [[maybe_unused]] GlobalVariable *GV =
2724 new GlobalVariable(M,
2725 Type::getInt32Ty(
M.getContext()),
2739 DenseMap<Function *, SPIRV::FPFastMathDefaultInfoVector>
2740 FPFastMathDefaultInfoMap;
2742 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2751 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2753 "Expected 4 operands for FPFastMathDefault");
2759 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2761 SPIRV::FPFastMathDefaultInfo &
Info =
2764 Info.FPFastMathDefault =
true;
2765 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2767 "Expected no operands for ContractionOff");
2771 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2773 for (SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2774 Info.ContractionOff =
true;
2776 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2778 "Expected 1 operand for SignedZeroInfNanPreserve");
2779 unsigned TargetWidth =
2784 SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec =
2788 assert(Index >= 0 && Index < 3 &&
2789 "Expected FPFastMathDefaultInfo for half, float, or double");
2790 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2791 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2792 FPFastMathDefaultInfoVec[
Index].SignedZeroInfNanPreserve =
true;
2796 std::unordered_map<unsigned, GlobalVariable *> GlobalVars;
2797 for (
auto &[Func, FPFastMathDefaultInfoVec] : FPFastMathDefaultInfoMap) {
2798 if (FPFastMathDefaultInfoVec.
empty())
2801 for (
const SPIRV::FPFastMathDefaultInfo &Info : FPFastMathDefaultInfoVec) {
2802 assert(
Info.Ty &&
"Expected target type for FPFastMathDefaultInfo");
2805 if (Flags == SPIRV::FPFastMathMode::None && !
Info.ContractionOff &&
2806 !
Info.SignedZeroInfNanPreserve && !
Info.FPFastMathDefault)
2810 if (
Info.ContractionOff && (Flags & SPIRV::FPFastMathMode::AllowContract))
2812 "and AllowContract");
2814 if (
Info.SignedZeroInfNanPreserve &&
2816 (SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2817 SPIRV::FPFastMathMode::NSZ))) {
2818 if (
Info.FPFastMathDefault)
2820 "SignedZeroInfNanPreserve but at least one of "
2821 "NotNaN/NotInf/NSZ is enabled.");
2824 if ((Flags & SPIRV::FPFastMathMode::AllowTransform) &&
2825 !((Flags & SPIRV::FPFastMathMode::AllowReassoc) &&
2826 (Flags & SPIRV::FPFastMathMode::AllowContract))) {
2828 "AllowTransform requires AllowReassoc and "
2829 "AllowContract to be set.");
2832 auto it = GlobalVars.find(Flags);
2833 GlobalVariable *GV =
nullptr;
2834 if (it != GlobalVars.end()) {
2840 ConstantInt::get(Type::getInt32Ty(
M.getContext()), Flags);
2843 GV =
new GlobalVariable(M,
2844 Type::getInt32Ty(
M.getContext()),
2849 GlobalVars[
Flags] = GV;
2855void SPIRVEmitIntrinsics::processInstrAfterVisit(Instruction *
I,
2858 bool IsConstComposite =
2859 II &&
II->getIntrinsicID() == Intrinsic::spv_const_composite;
2860 if (IsConstComposite && TrackConstants) {
2862 auto t = AggrConsts.
find(
I);
2866 {
II->getType(),
II->getType()}, t->second,
I, {},
B);
2868 NewOp->setArgOperand(0,
I);
2871 for (
const auto &
Op :
I->operands()) {
2875 unsigned OpNo =
Op.getOperandNo();
2876 if (
II && ((
II->getIntrinsicID() == Intrinsic::spv_gep && OpNo == 0) ||
2877 (!
II->isBundleOperand(OpNo) &&
2878 II->paramHasAttr(OpNo, Attribute::ImmArg))))
2882 IsPhi ?
B.SetInsertPointPastAllocas(
I->getParent()->getParent())
2883 :
B.SetInsertPoint(
I);
2886 Type *OpTy =
Op->getType();
2894 {OpTy, OpTyVal->
getType()},
Op, OpTyVal, {},
B);
2896 if (!IsConstComposite &&
isPointerTy(OpTy) && OpElemTy !=
nullptr &&
2897 OpElemTy != IntegerType::getInt8Ty(
I->getContext())) {
2899 SmallVector<Value *, 2>
Args = {
2902 CallInst *PtrCasted =
2903 B.CreateIntrinsic(Intrinsic::spv_ptrcast, {
Types},
Args);
2908 I->setOperand(OpNo, NewOp);
2910 if (Named.insert(
I).second)
2914Type *SPIRVEmitIntrinsics::deduceFunParamElementType(Function *
F,
2916 std::unordered_set<Function *> FVisited;
2917 return deduceFunParamElementType(
F,
OpIdx, FVisited);
2920Type *SPIRVEmitIntrinsics::deduceFunParamElementType(
2921 Function *
F,
unsigned OpIdx, std::unordered_set<Function *> &FVisited) {
2923 if (!FVisited.insert(
F).second)
2926 std::unordered_set<Value *> Visited;
2929 for (User *U :
F->users()) {
2941 if (
Type *Ty = deduceElementTypeHelper(OpArg, Visited,
false))
2944 for (User *OpU : OpArg->
users()) {
2946 if (!Inst || Inst == CI)
2949 if (
Type *Ty = deduceElementTypeHelper(Inst, Visited,
false))
2956 if (FVisited.find(OuterF) != FVisited.end())
2958 for (
unsigned i = 0; i < OuterF->
arg_size(); ++i) {
2959 if (OuterF->
getArg(i) == OpArg) {
2960 Lookup.push_back(std::make_pair(OuterF, i));
2967 for (
auto &Pair :
Lookup) {
2968 if (
Type *Ty = deduceFunParamElementType(Pair.first, Pair.second, FVisited))
2975void SPIRVEmitIntrinsics::processParamTypesByFunHeader(Function *
F,
2977 B.SetInsertPointPastAllocas(
F);
2991 for (User *U :
F->users()) {
3007 for (User *U : Arg->
users()) {
3011 CI->
getParent()->getParent() == CurrF) {
3013 deduceOperandElementTypeFunctionPointer(CI,
Ops, ElemTy,
false);
3024void SPIRVEmitIntrinsics::processParamTypes(Function *
F,
IRBuilder<> &
B) {
3025 B.SetInsertPointPastAllocas(
F);
3031 if (!ElemTy && (ElemTy = deduceFunParamElementType(
F,
OpIdx)) !=
nullptr) {
3033 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3035 propagateElemType(Arg, IntegerType::getInt8Ty(
F->getContext()),
3047 bool IsNewFTy =
false;
3063bool SPIRVEmitIntrinsics::processFunctionPointers(
Module &M) {
3066 if (
F.isIntrinsic())
3068 if (
F.isDeclaration()) {
3069 for (User *U :
F.users()) {
3082 for (User *U :
F.users()) {
3084 if (!
II ||
II->arg_size() != 3 ||
II->getOperand(0) != &
F)
3086 if (
II->getIntrinsicID() == Intrinsic::spv_assign_ptr_type ||
3087 II->getIntrinsicID() == Intrinsic::spv_ptrcast) {
3094 if (Worklist.
empty())
3097 LLVMContext &Ctx =
M.getContext();
3102 for (Function *
F : Worklist) {
3104 for (
const auto &Arg :
F->args())
3106 IRB.CreateCall(
F, Args);
3108 IRB.CreateRetVoid();
3114void SPIRVEmitIntrinsics::applyDemangledPtrArgTypes(
IRBuilder<> &
B) {
3115 DenseMap<Function *, CallInst *> Ptrcasts;
3116 for (
auto It : FDeclPtrTys) {
3118 for (
auto *U :
F->users()) {
3123 for (
auto [Idx, ElemTy] : It.second) {
3131 B.SetInsertPointPastAllocas(Arg->
getParent());
3135 }
else if (isaGEP(Param)) {
3136 replaceUsesOfWithSpvPtrcast(Param,
normalizeType(ElemTy), CI,
3145 .getFirstNonPHIOrDbgOrAlloca());
3166SPIRVEmitIntrinsics::simplifyZeroLengthArrayGepInst(GetElementPtrInst *
GEP) {
3173 Type *SrcTy =
GEP->getSourceElementType();
3174 SmallVector<Value *, 8> Indices(
GEP->indices());
3176 if (ArrTy && ArrTy->getNumElements() == 0 &&
match(Indices[0],
m_Zero())) {
3177 Indices.erase(Indices.begin());
3178 SrcTy = ArrTy->getElementType();
3180 GEP->getNoWrapFlags(),
"",
3181 GEP->getIterator());
3186void SPIRVEmitIntrinsics::emitUnstructuredLoopControls(Function &
F,
3193 if (
ST->canUseExtension(
3194 SPIRV::Extension::SPV_INTEL_unstructured_loop_controls)) {
3195 for (BasicBlock &BB :
F) {
3197 MDNode *LoopMD =
Term->getMetadata(LLVMContext::MD_loop);
3203 unsigned LC =
Ops[0];
3204 if (LC == SPIRV::LoopControl::None)
3208 B.SetInsertPoint(Term);
3209 SmallVector<Value *, 4> IntrArgs;
3210 for (
unsigned Op :
Ops)
3212 B.CreateIntrinsic(Intrinsic::spv_loop_control_intel, IntrArgs);
3219 DominatorTree DT(
F);
3224 for (Loop *L : LI.getLoopsInPreorder()) {
3235 if (LoopControlOps[0] == SPIRV::LoopControl::None)
3239 B.SetInsertPoint(Header->getTerminator());
3242 SmallVector<Value *, 4>
Args = {MergeAddress, ContinueAddress};
3243 for (
unsigned Imm : LoopControlOps)
3244 Args.emplace_back(
B.getInt32(Imm));
3245 B.CreateIntrinsic(Intrinsic::spv_loop_merge, {
Args});
3249bool SPIRVEmitIntrinsics::runOnFunction(Function &Func) {
3250 if (
Func.isDeclaration())
3254 GR =
ST.getSPIRVGlobalRegistry();
3258 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
3263 AggrConstTypes.
clear();
3265 DeletedInstrs.
clear();
3267 processParamTypesByFunHeader(CurrF,
B);
3271 SmallPtrSet<Instruction *, 4> DeadInsts;
3276 if ((!
GEP && !SGEP) || GR->findDeducedElementType(&
I))
3280 GR->addDeducedElementType(SGEP,
3285 GetElementPtrInst *NewGEP = simplifyZeroLengthArrayGepInst(
GEP);
3287 GEP->replaceAllUsesWith(NewGEP);
3291 if (
Type *GepTy = getGEPType(
GEP))
3295 for (
auto *
I : DeadInsts) {
3296 assert(
I->use_empty() &&
"Dead instruction should not have any uses left");
3297 I->eraseFromParent();
3307 Type *ElTy =
SI->getValueOperand()->getType();
3312 B.SetInsertPoint(&
Func.getEntryBlock(),
Func.getEntryBlock().begin());
3313 for (
auto &GV :
Func.getParent()->globals())
3314 processGlobalValue(GV,
B);
3316 preprocessUndefs(
B);
3317 simplifyNullAddrSpaceCasts();
3318 preprocessCompositeConstants(
B);
3320 for (BasicBlock &BB : Func)
3321 for (PHINode &Phi : BB.
phis())
3322 if (
Phi.getType()->isAggregateType()) {
3323 AggrConstTypes[&
Phi] =
Phi.getType();
3324 Phi.mutateType(
B.getInt32Ty());
3327 preprocessBoolVectorBitcasts(Func);
3331 applyDemangledPtrArgTypes(
B);
3334 for (
auto &
I : Worklist) {
3336 if (isConvergenceIntrinsic(
I))
3339 bool Postpone = insertAssignPtrTypeIntrs(
I,
B,
false);
3341 insertAssignTypeIntrs(
I,
B);
3342 insertPtrCastOrAssignTypeInstr(
I,
B);
3346 if (Postpone && !GR->findAssignPtrTypeInstr(
I))
3347 insertAssignPtrTypeIntrs(
I,
B,
true);
3350 useRoundingMode(FPI,
B);
3355 SmallPtrSet<Instruction *, 4> IncompleteRets;
3357 deduceOperandElementType(&
I, &IncompleteRets);
3361 for (BasicBlock &BB : Func)
3362 for (PHINode &Phi : BB.
phis())
3364 deduceOperandElementType(&Phi,
nullptr);
3366 for (
auto *
I : Worklist) {
3367 if (DeletedInstrs.
count(
I))
3369 TrackConstants =
true;
3379 if (isConvergenceIntrinsic(
I))
3383 processInstrAfterVisit(
I,
B);
3386 emitUnstructuredLoopControls(Func,
B);
3392bool SPIRVEmitIntrinsics::postprocessTypes(
Module &M) {
3393 if (!GR || TodoTypeSz == 0)
3396 unsigned SzTodo = TodoTypeSz;
3397 DenseMap<Value *, SmallPtrSet<Value *, 4>> ToProcess;
3402 CallInst *AssignCI = GR->findAssignPtrTypeInstr(
Op);
3403 Type *KnownTy = GR->findDeducedElementType(
Op);
3404 if (!KnownTy || !AssignCI)
3410 std::unordered_set<Value *> Visited;
3411 if (
Type *ElemTy = deduceElementTypeHelper(
Op, Visited,
false,
true)) {
3412 if (ElemTy != KnownTy) {
3413 DenseSet<std::pair<Value *, Value *>> VisitedSubst;
3414 propagateElemType(CI, ElemTy, VisitedSubst);
3421 if (
Op->hasUseList()) {
3422 for (User *U :
Op->users()) {
3429 if (TodoTypeSz == 0)
3434 SmallPtrSet<Instruction *, 4> IncompleteRets;
3436 auto It = ToProcess.
find(&
I);
3437 if (It == ToProcess.
end())
3439 It->second.remove_if([
this](
Value *V) {
return !isTodoType(V); });
3440 if (It->second.size() == 0)
3442 deduceOperandElementType(&
I, &IncompleteRets, &It->second,
true);
3443 if (TodoTypeSz == 0)
3448 return SzTodo > TodoTypeSz;
3452void SPIRVEmitIntrinsics::parseFunDeclarations(
Module &M) {
3454 if (!
F.isDeclaration() ||
F.isIntrinsic())
3458 if (DemangledName.empty())
3462 auto [Grp, Opcode, ExtNo] = SPIRV::mapBuiltinToOpcode(
3463 DemangledName,
ST.getPreferredInstructionSet());
3464 if (Opcode != SPIRV::OpGroupAsyncCopy)
3467 SmallVector<unsigned> Idxs;
3476 LLVMContext &Ctx =
F.getContext();
3478 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledName, Ctx);
3479 if (!TypeStrs.
size())
3482 for (
unsigned Idx : Idxs) {
3483 if (Idx >= TypeStrs.
size())
3486 SPIRV::parseBuiltinCallArgumentType(TypeStrs[Idx].trim(), Ctx))
3489 FDeclPtrTys[&
F].push_back(std::make_pair(Idx, ElemTy));
3494bool SPIRVEmitIntrinsics::processMaskedMemIntrinsic(IntrinsicInst &
I) {
3495 const SPIRVSubtarget &
ST = TM.
getSubtarget<SPIRVSubtarget>(*
I.getFunction());
3497 if (
I.getIntrinsicID() == Intrinsic::masked_gather) {
3498 if (!
ST.canUseExtension(
3499 SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
3500 I.getContext().emitError(
3501 &
I,
"llvm.masked.gather requires SPV_INTEL_masked_gather_scatter "
3505 I.eraseFromParent();
3511 Value *Ptrs =
I.getArgOperand(0);
3513 Value *Passthru =
I.getArgOperand(2);
3516 uint32_t Alignment =
I.getParamAlign(0).valueOrOne().value();
3518 SmallVector<Value *, 4>
Args = {Ptrs,
B.getInt32(Alignment),
Mask,
3523 auto *NewI =
B.CreateIntrinsic(Intrinsic::spv_masked_gather, Types, Args);
3525 I.eraseFromParent();
3529 if (
I.getIntrinsicID() == Intrinsic::masked_scatter) {
3530 if (!
ST.canUseExtension(
3531 SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
3532 I.getContext().emitError(
3533 &
I,
"llvm.masked.scatter requires SPV_INTEL_masked_gather_scatter "
3536 I.eraseFromParent();
3542 Value *Values =
I.getArgOperand(0);
3543 Value *Ptrs =
I.getArgOperand(1);
3548 uint32_t Alignment =
I.getParamAlign(1).valueOrOne().value();
3550 SmallVector<Value *, 4>
Args = {Values, Ptrs,
B.getInt32(Alignment),
Mask};
3554 B.CreateIntrinsic(Intrinsic::spv_masked_scatter, Types, Args);
3555 I.eraseFromParent();
3566void SPIRVEmitIntrinsics::preprocessBoolVectorBitcasts(Function &
F) {
3567 struct BoolVecBitcast {
3569 FixedVectorType *BoolVecTy;
3573 auto getAsBoolVec = [](
Type *Ty) -> FixedVectorType * {
3575 return (VTy && VTy->getElementType()->
isIntegerTy(1)) ? VTy :
nullptr;
3583 if (
auto *BVTy = getAsBoolVec(BC->getSrcTy()))
3585 else if (
auto *BVTy = getAsBoolVec(BC->getDestTy()))
3589 for (
auto &[BC, BoolVecTy, SrcIsBoolVec] : ToReplace) {
3591 Value *Src = BC->getOperand(0);
3592 unsigned BoolVecN = BoolVecTy->getNumElements();
3594 Type *IntTy =
B.getIntNTy(BoolVecN);
3600 IntVal = ConstantInt::get(IntTy, 0);
3601 for (
unsigned I = 0;
I < BoolVecN; ++
I) {
3602 Value *Elem =
B.CreateExtractElement(Src,
B.getInt32(
I));
3603 Value *Ext =
B.CreateZExt(Elem, IntTy);
3605 Ext =
B.CreateShl(Ext, ConstantInt::get(IntTy,
I));
3606 IntVal =
B.CreateOr(IntVal, Ext);
3612 if (!Src->getType()->isIntegerTy())
3613 IntVal =
B.CreateBitCast(Src, IntTy);
3618 if (!SrcIsBoolVec) {
3621 for (
unsigned I = 0;
I < BoolVecN; ++
I) {
3624 Value *
Cmp =
B.CreateICmpNE(
And, ConstantInt::get(IntTy, 0));
3625 Result =
B.CreateInsertElement(Result, Cmp,
B.getInt32(
I));
3631 if (!BC->getDestTy()->isIntegerTy())
3632 Result =
B.CreateBitCast(IntVal, BC->getDestTy());
3635 BC->replaceAllUsesWith(Result);
3636 BC->eraseFromParent();
3640bool SPIRVEmitIntrinsics::convertMaskedMemIntrinsics(
Module &M) {
3644 if (!
F.isIntrinsic())
3647 if (IID != Intrinsic::masked_gather && IID != Intrinsic::masked_scatter)
3652 Changed |= processMaskedMemIntrinsic(*
II);
3656 F.eraseFromParent();
3662bool SPIRVEmitIntrinsics::runOnModule(
Module &M) {
3665 Changed |= convertMaskedMemIntrinsics(M);
3667 parseFunDeclarations(M);
3668 insertConstantsForFPFastMathDefault(M);
3679 if (!
F.isDeclaration() && !
F.isIntrinsic()) {
3681 processParamTypes(&
F,
B);
3685 CanTodoType =
false;
3686 Changed |= postprocessTypes(M);
3689 Changed |= processFunctionPointers(M);
3696 SPIRVEmitIntrinsics Legacy(TM);
3697 if (Legacy.runOnModule(M))
3703 return new SPIRVEmitIntrinsics(TM);
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Expand Atomic instructions
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static void replaceAllUsesWith(Value *Old, Value *New, SmallPtrSet< BasicBlock *, 32 > &FreshBBs, bool IsHuge)
Replace all old uses with new ones, and push the updated BBs into FreshBBs.
static Type * getPointeeType(Value *Ptr, const DataLayout &DL)
This file defines the DenseSet and SmallDenseSet classes.
static bool runOnFunction(Function &F, bool PostInlining)
iv Induction Variable Users
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Machine Check Debug Module
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static unsigned getNumElements(Type *Ty)
static bool isMemInstrToReplace(Instruction *I)
static bool isAggrConstForceInt32(const Value *V)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static Type * getAtomicElemTy(SPIRVGlobalRegistry *GR, Instruction *I, Value *PointerOperand)
static void reportFatalOnTokenType(const Instruction *I)
static void setInsertPointAfterDef(IRBuilder<> &B, Instruction *I)
static void emitAssignName(Instruction *I, IRBuilder<> &B)
static Type * getPointeeTypeByCallInst(StringRef DemangledName, Function *CalledF, unsigned OpIdx)
static void createRoundingModeDecoration(Instruction *I, unsigned RoundingModeDeco, IRBuilder<> &B)
static void createDecorationIntrinsic(Instruction *I, MDNode *Node, IRBuilder<> &B)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
static cl::opt< bool > SpirvEmitOpNames("spirv-emit-op-names", cl::desc("Emit OpName for all instructions"), cl::init(false))
static bool IsKernelArgInt8(Function *F, StoreInst *SI)
static void addSaturatedDecorationToIntrinsic(Instruction *I, IRBuilder<> &B)
static bool isFirstIndexZero(const GetElementPtrInst *GEP)
static void setInsertPointSkippingPhis(IRBuilder<> &B, Instruction *I)
static FunctionType * getFunctionPointerElemType(Function *F, SPIRVGlobalRegistry *GR)
static void createSaturatedConversionDecoration(Instruction *I, IRBuilder<> &B)
static bool shouldEmitIntrinsicsForGlobalValue(const GlobalVariableUsers &GVUsers, const GlobalVariable &GV, const Function *F)
static Type * restoreMutatedType(SPIRVGlobalRegistry *GR, Instruction *I, Type *Ty)
static bool requireAssignType(Instruction *I)
static void insertSpirvDecorations(MachineFunction &MF, SPIRVGlobalRegistry *GR, MachineIRBuilder MIB)
static void visit(BasicBlock &Start, std::function< bool(BasicBlock *)> op)
StringSet - A set-like wrapper for the StringMap.
static SymbolRef::Type getType(const Symbol *Sym)
LocallyHashedType DenseMapInfo< LocallyHashedType >::Empty
static int Lookup(ArrayRef< TableEntry > Table, unsigned Opcode)
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
This class represents an incoming formal argument to a Function.
const Function * getParent() const
static unsigned getPointerOperandIndex()
static unsigned getPointerOperandIndex()
iterator_range< const_phi_iterator > phis() const
Returns a range that iterates over the phis in the basic block.
const Function * getParent() const
Return the enclosing method, or null if none.
static BasicBlock * Create(LLVMContext &Context, const Twine &Name="", Function *Parent=nullptr, BasicBlock *InsertBefore=nullptr)
Creates a new BasicBlock.
LLVM_ABI LLVMContext & getContext() const
Get the context in which this basic block lives.
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction; assumes that the block is well-formed.
static LLVM_ABI BlockAddress * get(Function *F, BasicBlock *BB)
Return a BlockAddress for the specified function and basic block.
bool isInlineAsm() const
Check if this call is an inline asm statement.
Function * getCalledFunction() const
Returns the function called, or null if this is an indirect function invocation or the function signa...
LLVM_ABI bool isIndirectCall() const
Return true if the callsite is an indirect call.
Value * getCalledOperand() const
Value * getArgOperand(unsigned i) const
LLVM_ABI Intrinsic::ID getIntrinsicID() const
Returns the intrinsic ID of the intrinsic called or Intrinsic::not_intrinsic if the called function i...
iterator_range< User::op_iterator > args()
Iteration adapter for range-for loops.
unsigned arg_size() const
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI std::optional< RoundingMode > getRoundingMode() const
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > try_emplace(KeyT &&Key, Ts &&...Args)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
static LLVM_ABI FunctionType * get(Type *Result, ArrayRef< Type * > Params, bool isVarArg)
This static method is the primary way of constructing a FunctionType.
const DataLayout & getDataLayout() const
Get the data layout of the module this function belongs to.
bool isIntrinsic() const
isIntrinsic - Returns true if the function's name starts with "llvm.".
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Type * getReturnType() const
Returns the type of the ret val.
Argument * getArg(unsigned i) const
an instruction for type-safe pointer arithmetic to access elements of arrays and structs
static LLVM_ABI Type * getTypeAtIndex(Type *Ty, Value *Idx)
Return the type of the element at the given index of an indexable type.
static GetElementPtrInst * Create(Type *PointeeType, Value *Ptr, ArrayRef< Value * > IdxList, const Twine &NameStr="", InsertPosition InsertBefore=nullptr)
static unsigned getPointerOperandIndex()
PointerType * getType() const
Global values are always pointers.
@ InternalLinkage
Rename collisions when linking (static functions).
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
LLVM_ABI void addDestination(BasicBlock *Dest)
Add a destination.
Base class for instruction visitors.
LLVM_ABI InstListType::iterator eraseFromParent()
This method unlinks 'this' from the containing basic block and deletes it.
Instruction * user_back()
Specialize the methods defined in Value, as we know that an instruction can only be used by other ins...
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
LLVM_ABI void copyMetadata(const Instruction &SrcInst, ArrayRef< unsigned > WL=ArrayRef< unsigned >())
Copy metadata from SrcInst to this instruction.
This is an important class for using LLVM in a threaded context.
static unsigned getPointerOperandIndex()
const MDOperand & getOperand(unsigned I) const
static MDTuple * get(LLVMContext &Context, ArrayRef< Metadata * > MDs)
unsigned getNumOperands() const
Return number of MDNode operands.
static LLVM_ABI MDString * get(LLVMContext &Context, StringRef Str)
Flags
Flags values. These may be or'd together.
ModulePass class - This class is used to implement unstructured interprocedural optimizations and ana...
A Module instance is used to store all the information related to an LLVM module.
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
static PreservedAnalyses all()
Construct a special preserved set that preserves all passes.
PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM)
void addAssignPtrTypeInstr(Value *Val, CallInst *AssignPtrTyCI)
void buildAssignPtr(IRBuilder<> &B, Type *ElemTy, Value *Arg)
Type * findDeducedCompositeType(const Value *Val)
void replaceAllUsesWith(Value *Old, Value *New, bool DeleteOld=true)
void addDeducedElementType(Value *Val, Type *Ty)
void addReturnType(const Function *ArgF, TypedPointerType *DerivedTy)
Type * findMutated(const Value *Val)
void addDeducedCompositeType(Value *Val, Type *Ty)
void buildAssignType(IRBuilder<> &B, Type *Ty, Value *Arg)
Type * findDeducedElementType(const Value *Val)
void updateAssignType(CallInst *AssignCI, Value *Arg, Value *OfType)
CallInst * findAssignPtrTypeInstr(const Value *Val)
const SPIRVTargetLowering * getTargetLowering() const override
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
const SPIRVSubtarget * getSubtargetImpl() const
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
void assign(size_type NumElts, ValueParamT Elt)
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
An instruction for storing to memory.
static unsigned getPointerOperandIndex()
iterator find(StringRef Key)
Represent a constant reference to a string, i.e.
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
StringSet - A wrapper for StringMap that provides set-like functionality.
bool contains(StringRef key) const
Check if the set contains the given key.
static LLVM_ABI StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
static unsigned getPointerOperandIndex()
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
const STC & getSubtarget(const Function &F) const
This method returns a pointer to the specified type of TargetSubtargetInfo.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
bool isArrayTy() const
True if this is an instance of ArrayType.
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
bool isPointerTy() const
True if this is an instance of PointerType.
Type * getArrayElementType() const
LLVM_ABI StringRef getTargetExtName() const
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
bool isStructTy() const
True if this is an instance of StructType.
bool isTargetExtTy() const
Return true if this is a target extension type.
bool isAggregateType() const
Return true if the type is an aggregate type.
bool isIntegerTy() const
True if this is an instance of IntegerType.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
Type * getContainedType(unsigned i) const
This method is used to implement the type iterator (defined at the end of the file).
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
static LLVM_ABI bool isValidElementType(Type *ElemTy)
Return true if the specified type is valid as a element type.
static LLVM_ABI TypedPointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
void setOperand(unsigned i, Value *Val)
LLVM_ABI bool replaceUsesOfWith(Value *From, Value *To)
Replace uses of one Value with another.
Value * getOperand(unsigned i) const
LLVM Value Representation.
Type * getType() const
All values are typed, get the type of this value.
user_iterator user_begin()
LLVM_ABI void setName(const Twine &Name)
Change the name of the value.
LLVM_ABI void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
iterator_range< user_iterator > users()
void mutateType(Type *Ty)
Mutate the type of this Value to be of the specified type.
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
std::pair< iterator, bool > insert(const ValueT &V)
bool contains(const_arg_type_t< ValueT > V) const
Check if the set contains the given element.
const ParentTy * getParent() const
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ SPIR_KERNEL
Used for SPIR kernel functions.
@ C
The default llvm calling convention, compatible with C.
@ BasicBlock
Various leaf nodes.
match_combine_or< Ty... > m_CombineOr(const Ty &...Ps)
Combine pattern matchers matching any of Ps patterns.
bool match(Val *V, const Pattern &P)
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
auto m_Value()
Match an arbitrary value and ignore it.
auto m_AnyIntrinsic()
Matches any intrinsic call and ignore it.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
@ CE
Windows NT (Windows on ARM)
initializer< Ty > init(const Ty &Val)
@ User
could "use" a pointer
NodeAddr< PhiNode * > Phi
NodeAddr< NodeBase * > Node
NodeAddr< FuncNode * > Func
friend class Instruction
Iterator for Instructions in a `BasicBlock.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
FunctionAddr VTableAddr Value
ModulePass * createSPIRVEmitIntrinsicsPass(const SPIRVTargetMachine &TM)
bool isTypedPointerWrapper(const TargetExtType *ExtTy)
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
unsigned getPointerAddressSpace(const Type *T)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
CallInst * buildIntrWithMD(Intrinsic::ID IntrID, ArrayRef< Type * > Types, Value *Arg, Value *Arg2, ArrayRef< Constant * > Imms, IRBuilder<> &B)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
iterator_range< early_inc_iterator_impl< detail::IterOfRange< RangeT > > > make_early_inc_range(RangeT &&Range)
Make a range that does early increment to allow mutation of the underlying range without disrupting i...
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
bool isNestedPointer(const Type *Ty)
Function * getOrCreateBackendServiceFunction(Module &M)
MetadataAsValue * buildMD(Value *Arg)
std::string getOclOrSpirvBuiltinDemangledName(StringRef Name)
SmallVector< unsigned, 1 > getSpirvLoopControlOperandsFromLoopMetadata(MDNode *LoopMD)
auto reverse(ContainerTy &&C)
Type * getTypedPointerWrapper(Type *ElemTy, unsigned AS)
bool isPointerTy(const Type *T)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
bool set_union(S1Ty &S1, const S2Ty &S2)
set_union(A, B) - Compute A := A u B, return whether A changed.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
@ Ref
The access may reference the value stored in memory.
IRBuilder(LLVMContext &, FolderTy, InserterTy, MDNode *, ArrayRef< OperandBundleDef >) -> IRBuilder< FolderTy, InserterTy >
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
Type * getPointeeTypeByAttr(Argument *Arg)
bool hasPointeeTypeAttr(Argument *Arg)
constexpr unsigned BitWidth
bool isEquivalentTypes(Type *Ty1, Type *Ty2)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
iterator_range< pointer_iterator< WrappedIteratorT > > make_pointer_range(RangeT &&Range)
bool hasInitializer(const GlobalVariable *GV)
Type * normalizeType(Type *Ty)
@ Enabled
Convert any .debug_str_offsets tables to DWARF64 if needed.
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
PoisonValue * getNormalizedPoisonValue(Type *Ty)
bool isUntypedPointerTy(const Type *T)
Type * reconstitutePeeledArrayType(Type *Ty)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
AnalysisManager< Module > ModuleAnalysisManager
Convenience typedef for the Module analysis manager.
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)