34#define DEBUG_TYPE "spirv-module-analysis"
38 cl::desc(
"Dump MIR with SPIR-V dependencies info"),
43 cl::desc(
"SPIR-V capabilities to avoid if there are "
44 "other options enabling a feature"),
47 "SPIR-V Shader capability")));
61 unsigned DefaultVal = 0) {
63 const auto &
Op = MdNode->getOperand(
OpIndex);
70getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
76 AvoidCaps.
S.
insert(SPIRV::Capability::Shader);
78 AvoidCaps.
S.
insert(SPIRV::Capability::Kernel);
83 bool MinVerOK = SPIRVVersion.
empty() || SPIRVVersion >= ReqMinVer;
85 ReqMaxVer.
empty() || SPIRVVersion.
empty() || SPIRVVersion <= ReqMaxVer;
88 if (ReqCaps.
empty()) {
89 if (ReqExts.
empty()) {
90 if (MinVerOK && MaxVerOK)
91 return {
true, {}, {}, ReqMinVer, ReqMaxVer};
94 }
else if (MinVerOK && MaxVerOK) {
95 if (ReqCaps.
size() == 1) {
96 auto Cap = ReqCaps[0];
99 SPIRV::OperandCategory::CapabilityOperand, Cap));
100 return {
true, {Cap}, std::move(ReqExts), ReqMinVer, ReqMaxVer};
110 for (
auto Cap : ReqCaps)
113 for (
size_t i = 0, Sz = UseCaps.
size(); i < Sz; ++i) {
114 auto Cap = UseCaps[i];
115 if (i == Sz - 1 || !AvoidCaps.
S.
contains(Cap)) {
117 SPIRV::OperandCategory::CapabilityOperand, Cap));
118 return {
true, {Cap}, std::move(ReqExts), ReqMinVer, ReqMaxVer};
126 if (
llvm::all_of(ReqExts, [&ST](
const SPIRV::Extension::Extension &Ext) {
127 return ST.canUseExtension(Ext);
138void SPIRVModuleAnalysis::setBaseInfo(
const Module &M) {
142 MAI.RegisterAliasTable.clear();
143 MAI.InstrsToDelete.clear();
144 MAI.GlobalObjMap.clear();
145 MAI.GlobalVarList.clear();
146 MAI.ExtInstSetMap.clear();
148 MAI.Reqs.initAvailableCapabilities(*ST);
151 if (
auto MemModel =
M.getNamedMetadata(
"spirv.MemoryModel")) {
152 auto MemMD = MemModel->getOperand(0);
153 MAI.Addr =
static_cast<SPIRV::AddressingModel::AddressingModel
>(
154 getMetadataUInt(MemMD, 0));
156 static_cast<SPIRV::MemoryModel::MemoryModel
>(getMetadataUInt(MemMD, 1));
159 MAI.Mem = ST->isShader() ? SPIRV::MemoryModel::GLSL450
160 : SPIRV::MemoryModel::OpenCL;
161 if (
MAI.Mem == SPIRV::MemoryModel::OpenCL) {
162 unsigned PtrSize = ST->getPointerSize();
163 MAI.Addr = PtrSize == 32 ? SPIRV::AddressingModel::Physical32
164 : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
165 : SPIRV::AddressingModel::Logical;
168 MAI.Addr = SPIRV::AddressingModel::Logical;
173 if (
auto VerNode =
M.getNamedMetadata(
"opencl.ocl.version")) {
174 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
177 assert(VerNode->getNumOperands() > 0 &&
"Invalid SPIR");
178 auto VersionMD = VerNode->getOperand(0);
179 unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
180 unsigned MinorNum = getMetadataUInt(VersionMD, 1);
181 unsigned RevNum = getMetadataUInt(VersionMD, 2);
184 (std::max(1U, MajorNum) * 100 + MinorNum) * 1000 + RevNum;
190 if (!ST->isShader()) {
191 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_CPP;
192 MAI.SrcLangVersion = 100000;
194 MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
195 MAI.SrcLangVersion = 0;
199 if (
auto ExtNode =
M.getNamedMetadata(
"opencl.used.extensions")) {
200 for (
unsigned I = 0,
E = ExtNode->getNumOperands();
I !=
E; ++
I) {
201 MDNode *MD = ExtNode->getOperand(
I);
211 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand,
213 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::SourceLanguageOperand,
215 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
218 if (
MAI.Mem == SPIRV::MemoryModel::VulkanKHR)
219 MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_vulkan_memory_model);
221 if (!ST->isShader()) {
223 MAI.ExtInstSetMap[
static_cast<unsigned>(
224 SPIRV::InstructionSet::OpenCL_std)] =
MAI.getNextIDRegister();
235 if (
UseMI.getOpcode() != SPIRV::OpDecorate &&
236 UseMI.getOpcode() != SPIRV::OpMemberDecorate)
239 for (
unsigned I = 0;
I <
UseMI.getNumOperands(); ++
I) {
257 for (
unsigned i = 0; i <
MI.getNumOperands(); ++i) {
266 unsigned Opcode =
MI.getOpcode();
267 if ((Opcode == SPIRV::OpDecorate) && i >= 2) {
268 unsigned DecorationID =
MI.getOperand(1).getImm();
269 if (DecorationID != SPIRV::Decoration::FuncParamAttr &&
270 DecorationID != SPIRV::Decoration::UserSemantic &&
271 DecorationID != SPIRV::Decoration::CacheControlLoadINTEL &&
272 DecorationID != SPIRV::Decoration::CacheControlStoreINTEL)
278 if (!UseDefReg && MO.
isDef()) {
286 dbgs() <<
"Unexpectedly, no global id found for the operand ";
288 dbgs() <<
"\nInstruction: ";
307 appendDecorationsForReg(
MI.getMF()->getRegInfo(), DefReg, Signature);
314 unsigned Opcode =
MI.getOpcode();
316 case SPIRV::OpTypeForwardPointer:
319 case SPIRV::OpVariable:
320 return static_cast<SPIRV::StorageClass::StorageClass
>(
321 MI.getOperand(2).
getImm()) != SPIRV::StorageClass::Function;
322 case SPIRV::OpFunction:
323 case SPIRV::OpFunctionParameter:
326 if (GR->hasConstFunPtr() && Opcode == SPIRV::OpUndef) {
329 if (
UseMI.getOpcode() != SPIRV::OpConstantFunctionPointerINTEL)
335 MAI.setSkipEmission(&
MI);
339 return TII->isTypeDeclInstr(
MI) || TII->isConstantInstr(
MI) ||
340 TII->isInlineAsmDefInstr(
MI);
346void SPIRVModuleAnalysis::visitFunPtrUse(
348 std::map<const Value *, unsigned> &GlobalToGReg,
const MachineFunction *MF,
350 const MachineOperand *OpFunDef =
351 GR->getFunctionDefinitionByUse(&
MI.getOperand(2));
354 const MachineInstr *OpDefMI = OpFunDef->
getParent();
357 const MachineRegisterInfo &FunDefMRI = FunDefMF->
getRegInfo();
359 visitDecl(FunDefMRI, SignatureToGReg, GlobalToGReg, FunDefMF, *OpDefMI);
361 }
while (OpDefMI && (OpDefMI->
getOpcode() == SPIRV::OpFunction ||
362 OpDefMI->
getOpcode() == SPIRV::OpFunctionParameter));
364 MCRegister GlobalFunDefReg =
365 MAI.getRegisterAlias(FunDefMF, OpFunDef->
getReg());
367 "Function definition must refer to a global register");
368 MAI.setRegisterAlias(MF, OpReg, GlobalFunDefReg);
373void SPIRVModuleAnalysis::visitDecl(
375 std::map<const Value *, unsigned> &GlobalToGReg,
const MachineFunction *MF,
377 unsigned Opcode =
MI.getOpcode();
380 for (
const MachineOperand &MO :
MI.operands()) {
385 if (Opcode == SPIRV::OpConstantFunctionPointerINTEL &&
387 visitFunPtrUse(OpReg, SignatureToGReg, GlobalToGReg, MF,
MI);
391 if (
MAI.hasRegisterAlias(MF, MO.
getReg()))
395 if (isDeclSection(MRI, *OpDefMI))
396 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, *OpDefMI);
402 dbgs() <<
"Unexpectedly, no unique definition for the operand ";
404 dbgs() <<
"\nInstruction: ";
409 "No unique definition is found for the virtual register");
413 bool IsFunDef =
false;
414 if (TII->isSpecConstantInstr(
MI)) {
415 GReg =
MAI.getNextIDRegister();
417 }
else if (Opcode == SPIRV::OpFunction ||
418 Opcode == SPIRV::OpFunctionParameter) {
419 GReg = handleFunctionOrParameter(MF,
MI, GlobalToGReg, IsFunDef);
420 }
else if (Opcode == SPIRV::OpTypeStruct ||
421 Opcode == SPIRV::OpConstantComposite) {
422 GReg = handleTypeDeclOrConstant(
MI, SignatureToGReg);
423 const MachineInstr *NextInstr =
MI.getNextNode();
425 ((Opcode == SPIRV::OpTypeStruct &&
426 NextInstr->
getOpcode() == SPIRV::OpTypeStructContinuedINTEL) ||
427 (Opcode == SPIRV::OpConstantComposite &&
429 SPIRV::OpConstantCompositeContinuedINTEL))) {
430 MCRegister Tmp = handleTypeDeclOrConstant(*NextInstr, SignatureToGReg);
432 MAI.setSkipEmission(NextInstr);
435 }
else if (TII->isTypeDeclInstr(
MI) || TII->isConstantInstr(
MI) ||
436 TII->isInlineAsmDefInstr(
MI)) {
437 GReg = handleTypeDeclOrConstant(
MI, SignatureToGReg);
438 }
else if (Opcode == SPIRV::OpVariable) {
439 GReg = handleVariable(MF,
MI, GlobalToGReg);
442 dbgs() <<
"\nInstruction: ";
448 MAI.setRegisterAlias(MF,
MI.getOperand(0).getReg(), GReg);
450 MAI.setSkipEmission(&
MI);
453MCRegister SPIRVModuleAnalysis::handleFunctionOrParameter(
455 std::map<const Value *, unsigned> &GlobalToGReg,
bool &IsFunDef) {
456 const Value *GObj = GR->getGlobalObject(MF,
MI.getOperand(0).getReg());
457 assert(GObj &&
"Unregistered global definition");
461 assert(
F &&
"Expected a reference to a function or an argument");
462 IsFunDef = !
F->isDeclaration();
463 auto [It,
Inserted] = GlobalToGReg.try_emplace(GObj);
466 MCRegister GReg =
MAI.getNextIDRegister();
474SPIRVModuleAnalysis::handleTypeDeclOrConstant(
const MachineInstr &
MI,
477 auto [It,
Inserted] = SignatureToGReg.try_emplace(MISign);
480 MCRegister GReg =
MAI.getNextIDRegister();
486MCRegister SPIRVModuleAnalysis::handleVariable(
488 std::map<const Value *, unsigned> &GlobalToGReg) {
489 MAI.GlobalVarList.push_back(&
MI);
490 const Value *GObj = GR->getGlobalObject(MF,
MI.getOperand(0).getReg());
491 assert(GObj &&
"Unregistered global definition");
492 auto [It,
Inserted] = GlobalToGReg.try_emplace(GObj);
495 MCRegister GReg =
MAI.getNextIDRegister();
499 MAI.GlobalObjMap[GV] = GReg;
503void SPIRVModuleAnalysis::collectDeclarations(
const Module &M) {
505 std::map<const Value *, unsigned> GlobalToGReg;
506 for (
const Function &
F : M) {
507 MachineFunction *MF = MMI->getMachineFunction(
F);
510 const MachineRegisterInfo &MRI = MF->
getRegInfo();
511 unsigned PastHeader = 0;
512 for (MachineBasicBlock &
MBB : *MF) {
513 for (MachineInstr &
MI :
MBB) {
514 if (
MI.getNumOperands() == 0)
516 unsigned Opcode =
MI.getOpcode();
517 if (Opcode == SPIRV::OpFunction) {
518 if (PastHeader == 0) {
522 }
else if (Opcode == SPIRV::OpFunctionParameter) {
525 }
else if (PastHeader > 0) {
529 const MachineOperand &DefMO =
MI.getOperand(0);
531 case SPIRV::OpExtension:
532 MAI.Reqs.addExtension(SPIRV::Extension::Extension(DefMO.
getImm()));
533 MAI.setSkipEmission(&
MI);
535 case SPIRV::OpCapability:
536 MAI.Reqs.addCapability(SPIRV::Capability::Capability(DefMO.
getImm()));
537 MAI.setSkipEmission(&
MI);
542 if (DefMO.
isReg() && isDeclSection(MRI,
MI) &&
543 !
MAI.hasRegisterAlias(MF, DefMO.
getReg()))
544 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF,
MI);
557 if (
MI.getOpcode() == SPIRV::OpDecorate) {
559 auto Dec =
MI.getOperand(1).getImm();
560 if (Dec == SPIRV::Decoration::LinkageAttributes) {
561 auto Lnk =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
562 if (Lnk == SPIRV::LinkageType::Import) {
567 MAI.GlobalObjMap[ImportedFunc] =
568 MAI.getRegisterAlias(
MI.getMF(), Target);
571 }
else if (
MI.getOpcode() == SPIRV::OpFunction) {
574 MCRegister GlobalReg =
MAI.getRegisterAlias(
MI.getMF(),
Reg);
576 MAI.GlobalObjMap[
F] = GlobalReg;
588 auto FoundMI = IS.insert(std::move(MISign));
589 if (!FoundMI.second) {
590 if (
MI.getOpcode() == SPIRV::OpDecorate) {
592 "Decoration instructions must have at least 2 operands");
594 "Only OpDecorate instructions can be duplicates");
599 if (
MI.getOperand(1).getImm() != SPIRV::Decoration::FPFastMathMode)
604 if (instrToSignature(*OrigMI, MAI,
true) == MISign) {
605 assert(OrigMI->getNumOperands() ==
MI.getNumOperands() &&
606 "Original instruction must have the same number of operands");
608 OrigMI->getNumOperands() == 3 &&
609 "FPFastMathMode decoration must have 3 operands for OpDecorate");
610 unsigned OrigFlags = OrigMI->getOperand(2).getImm();
611 unsigned NewFlags =
MI.getOperand(2).getImm();
612 if (OrigFlags == NewFlags)
616 unsigned FinalFlags = OrigFlags | NewFlags;
618 <<
"Warning: Conflicting FPFastMathMode decoration flags "
620 << *OrigMI <<
"Original flags: " << OrigFlags
621 <<
", new flags: " << NewFlags
622 <<
". They will be merged on a best effort basis, but not "
623 "validated. Final flags: "
624 << FinalFlags <<
"\n";
631 assert(
false &&
"No original instruction found for the duplicate "
632 "OpDecorate, but we found one in IS.");
645void SPIRVModuleAnalysis::processOtherInstrs(
const Module &M) {
647 for (
const Function &
F : M) {
648 if (
F.isDeclaration())
650 MachineFunction *MF = MMI->getMachineFunction(
F);
653 for (MachineBasicBlock &
MBB : *MF)
654 for (MachineInstr &
MI :
MBB) {
655 if (
MAI.getSkipEmission(&
MI))
657 const unsigned OpCode =
MI.getOpcode();
658 if (OpCode == SPIRV::OpString) {
660 }
else if (OpCode == SPIRV::OpExtInst &&
MI.getOperand(2).isImm() &&
661 MI.getOperand(2).getImm() ==
662 SPIRV::InstructionSet::
663 NonSemantic_Shader_DebugInfo_100) {
664 MachineOperand Ins =
MI.getOperand(3);
665 namespace NS = SPIRV::NonSemanticExtInst;
666 static constexpr int64_t GlobalNonSemanticDITy[] = {
667 NS::DebugSource, NS::DebugCompilationUnit, NS::DebugInfoNone,
668 NS::DebugTypeBasic, NS::DebugTypePointer};
669 bool IsGlobalDI =
false;
670 for (
unsigned Idx = 0; Idx < std::size(GlobalNonSemanticDITy); ++Idx)
671 IsGlobalDI |= Ins.
getImm() == GlobalNonSemanticDITy[Idx];
674 }
else if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
676 }
else if (OpCode == SPIRV::OpEntryPoint) {
678 }
else if (TII->isAliasingInstr(
MI)) {
680 }
else if (TII->isDecorationInstr(
MI)) {
682 collectFuncNames(
MI, &
F);
683 }
else if (TII->isConstantInstr(
MI)) {
687 }
else if (OpCode == SPIRV::OpFunction) {
688 collectFuncNames(
MI, &
F);
689 }
else if (OpCode == SPIRV::OpTypeForwardPointer) {
699void SPIRVModuleAnalysis::numberRegistersGlobally(
const Module &M) {
700 for (
const Function &
F : M) {
701 if (
F.isDeclaration())
703 MachineFunction *MF = MMI->getMachineFunction(
F);
705 for (MachineBasicBlock &
MBB : *MF) {
706 for (MachineInstr &
MI :
MBB) {
707 for (MachineOperand &
Op :
MI.operands()) {
711 if (
MAI.hasRegisterAlias(MF,
Reg))
713 MCRegister NewReg =
MAI.getNextIDRegister();
714 MAI.setRegisterAlias(MF,
Reg, NewReg);
716 if (
MI.getOpcode() != SPIRV::OpExtInst)
718 auto Set =
MI.getOperand(2).getImm();
719 auto [It,
Inserted] =
MAI.ExtInstSetMap.try_emplace(Set);
721 It->second =
MAI.getNextIDRegister();
729 SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
731 addRequirements(getSymbolicOperandRequirements(Category, i, ST, *
this));
734void SPIRV::RequirementHandler::recursiveAddCapabilities(
736 for (
const auto &Cap : ToPrune) {
740 recursiveAddCapabilities(ImplicitDecls);
745 for (
const auto &Cap : ToAdd) {
746 bool IsNewlyInserted = AllCaps.insert(Cap).second;
747 if (!IsNewlyInserted)
751 recursiveAddCapabilities(ImplicitDecls);
752 MinimalCaps.push_back(Cap);
757 const SPIRV::Requirements &Req) {
761 if (Req.
Cap.has_value())
762 addCapabilities({Req.
Cap.value()});
764 addExtensions(Req.
Exts);
767 if (!MaxVersion.empty() && Req.
MinVer > MaxVersion) {
769 <<
" and <= " << MaxVersion <<
"\n");
773 if (MinVersion.empty() || Req.
MinVer > MinVersion)
778 if (!MinVersion.empty() && Req.
MaxVer < MinVersion) {
780 <<
" and >= " << MinVersion <<
"\n");
784 if (MaxVersion.empty() || Req.
MaxVer < MaxVersion)
790 const SPIRVSubtarget &ST)
const {
792 bool IsSatisfiable =
true;
793 auto TargetVer =
ST.getSPIRVVersion();
795 if (!MaxVersion.empty() && !TargetVer.empty() && MaxVersion < TargetVer) {
797 dbgs() <<
"Target SPIR-V version too high for required features\n"
798 <<
"Required max version: " << MaxVersion <<
" target version "
799 << TargetVer <<
"\n");
800 IsSatisfiable =
false;
803 if (!MinVersion.empty() && !TargetVer.empty() && MinVersion > TargetVer) {
804 LLVM_DEBUG(
dbgs() <<
"Target SPIR-V version too low for required features\n"
805 <<
"Required min version: " << MinVersion
806 <<
" target version " << TargetVer <<
"\n");
807 IsSatisfiable =
false;
810 if (!MinVersion.empty() && !MaxVersion.empty() && MinVersion > MaxVersion) {
813 <<
"Version is too low for some features and too high for others.\n"
814 <<
"Required SPIR-V min version: " << MinVersion
815 <<
" required SPIR-V max version " << MaxVersion <<
"\n");
816 IsSatisfiable =
false;
819 AvoidCapabilitiesSet AvoidCaps;
821 AvoidCaps.
S.
insert(SPIRV::Capability::Shader);
823 AvoidCaps.
S.
insert(SPIRV::Capability::Kernel);
825 for (
auto Cap : MinimalCaps) {
826 if (AvailableCaps.contains(Cap) && !AvoidCaps.
S.
contains(Cap))
830 OperandCategory::CapabilityOperand, Cap)
832 IsSatisfiable =
false;
835 for (
auto Ext : AllExtensions) {
836 if (
ST.canUseExtension(Ext))
840 OperandCategory::ExtensionOperand, Ext)
842 IsSatisfiable =
false;
851 for (
const auto Cap : ToAdd)
852 if (AvailableCaps.insert(Cap).second)
854 SPIRV::OperandCategory::CapabilityOperand, Cap));
858 const Capability::Capability
ToRemove,
859 const Capability::Capability IfPresent) {
860 if (AllCaps.contains(IfPresent))
868 addAvailableCaps({Capability::Shader, Capability::Linkage, Capability::Int8,
871 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 3)))
873 Capability::GroupNonUniformVote,
874 Capability::GroupNonUniformArithmetic,
875 Capability::GroupNonUniformBallot,
876 Capability::GroupNonUniformClustered,
877 Capability::GroupNonUniformShuffle,
878 Capability::GroupNonUniformShuffleRelative,
879 Capability::GroupNonUniformQuad});
881 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
883 Capability::DotProductInput4x8Bit,
884 Capability::DotProductInput4x8BitPacked,
885 Capability::DemoteToHelperInvocation});
888 for (
auto Extension :
ST.getAllAvailableExtensions()) {
894 if (!
ST.isShader()) {
895 initAvailableCapabilitiesForOpenCL(ST);
900 initAvailableCapabilitiesForVulkan(ST);
907void RequirementHandler::initAvailableCapabilitiesForOpenCL(
908 const SPIRVSubtarget &ST) {
911 Capability::Kernel, Capability::Vector16,
912 Capability::Groups, Capability::GenericPointer,
913 Capability::StorageImageWriteWithoutFormat,
914 Capability::StorageImageReadWithoutFormat});
915 if (
ST.hasOpenCLFullProfile())
917 if (
ST.hasOpenCLImageSupport()) {
919 Capability::Image1D, Capability::SampledBuffer,
920 Capability::ImageBuffer});
921 if (
ST.isAtLeastOpenCLVer(VersionTuple(2, 0)))
924 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 1)) &&
925 ST.isAtLeastOpenCLVer(VersionTuple(2, 2)))
927 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 4)))
928 addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
929 Capability::SignedZeroInfNanPreserve,
930 Capability::RoundingModeRTE,
931 Capability::RoundingModeRTZ});
938void RequirementHandler::initAvailableCapabilitiesForVulkan(
939 const SPIRVSubtarget &ST) {
942 addAvailableCaps({Capability::Int64, Capability::Float16, Capability::Float64,
943 Capability::GroupNonUniform, Capability::Image1D,
944 Capability::SampledBuffer, Capability::ImageBuffer,
945 Capability::UniformBufferArrayDynamicIndexing,
946 Capability::SampledImageArrayDynamicIndexing,
947 Capability::StorageBufferArrayDynamicIndexing,
948 Capability::StorageImageArrayDynamicIndexing,
949 Capability::DerivativeControl, Capability::MinLod,
950 Capability::ImageGatherExtended, Capability::Addresses,
951 Capability::VulkanMemoryModelKHR});
954 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 5))) {
956 {Capability::ShaderNonUniformEXT, Capability::RuntimeDescriptorArrayEXT,
957 Capability::InputAttachmentArrayDynamicIndexingEXT,
958 Capability::UniformTexelBufferArrayDynamicIndexingEXT,
959 Capability::StorageTexelBufferArrayDynamicIndexingEXT,
960 Capability::UniformBufferArrayNonUniformIndexingEXT,
961 Capability::SampledImageArrayNonUniformIndexingEXT,
962 Capability::StorageBufferArrayNonUniformIndexingEXT,
963 Capability::StorageImageArrayNonUniformIndexingEXT,
964 Capability::InputAttachmentArrayNonUniformIndexingEXT,
965 Capability::UniformTexelBufferArrayNonUniformIndexingEXT,
966 Capability::StorageTexelBufferArrayNonUniformIndexingEXT});
970 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
972 Capability::StorageImageReadWithoutFormat});
980static void addOpDecorateReqs(
const MachineInstr &
MI,
unsigned DecIndex,
983 int64_t DecOp =
MI.getOperand(DecIndex).getImm();
984 auto Dec =
static_cast<SPIRV::Decoration::Decoration
>(DecOp);
986 SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
988 if (Dec == SPIRV::Decoration::BuiltIn) {
989 int64_t BuiltInOp =
MI.getOperand(DecIndex + 1).getImm();
990 auto BuiltIn =
static_cast<SPIRV::BuiltIn::BuiltIn
>(BuiltInOp);
992 SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
993 }
else if (Dec == SPIRV::Decoration::LinkageAttributes) {
994 int64_t LinkageOp =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
995 SPIRV::LinkageType::LinkageType LnkType =
996 static_cast<SPIRV::LinkageType::LinkageType
>(LinkageOp);
997 if (LnkType == SPIRV::LinkageType::LinkOnceODR)
998 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_linkonce_odr);
999 }
else if (Dec == SPIRV::Decoration::CacheControlLoadINTEL ||
1000 Dec == SPIRV::Decoration::CacheControlStoreINTEL) {
1001 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_cache_controls);
1002 }
else if (Dec == SPIRV::Decoration::HostAccessINTEL) {
1003 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_global_variable_host_access);
1004 }
else if (Dec == SPIRV::Decoration::InitModeINTEL ||
1005 Dec == SPIRV::Decoration::ImplementInRegisterMapINTEL) {
1007 SPIRV::Extension::SPV_INTEL_global_variable_fpga_decorations);
1008 }
else if (Dec == SPIRV::Decoration::NonUniformEXT) {
1010 }
else if (Dec == SPIRV::Decoration::FPMaxErrorDecorationINTEL) {
1012 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
1013 }
else if (Dec == SPIRV::Decoration::FPFastMathMode) {
1014 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) {
1016 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_float_controls2);
1025 assert(
MI.getNumOperands() >= 8 &&
"Insufficient operands for OpTypeImage");
1028 int64_t ImgFormatOp =
MI.getOperand(7).getImm();
1029 auto ImgFormat =
static_cast<SPIRV::ImageFormat::ImageFormat
>(ImgFormatOp);
1033 bool IsArrayed =
MI.getOperand(4).getImm() == 1;
1034 bool IsMultisampled =
MI.getOperand(5).getImm() == 1;
1035 bool NoSampler =
MI.getOperand(6).getImm() == 2;
1038 switch (
MI.getOperand(2).getImm()) {
1039 case SPIRV::Dim::DIM_1D:
1041 : SPIRV::Capability::Sampled1D);
1043 case SPIRV::Dim::DIM_2D:
1044 if (IsMultisampled && NoSampler)
1047 case SPIRV::Dim::DIM_Cube:
1051 : SPIRV::Capability::SampledCubeArray);
1053 case SPIRV::Dim::DIM_Rect:
1055 : SPIRV::Capability::SampledRect);
1057 case SPIRV::Dim::DIM_Buffer:
1059 : SPIRV::Capability::SampledBuffer);
1061 case SPIRV::Dim::DIM_SubpassData:
1067 if (!
ST.isShader()) {
1068 if (
MI.getNumOperands() > 8 &&
1069 MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
1078 TypeDef->
getOpcode() == SPIRV::OpTypeFloat &&
1084#define ATOM_FLT_REQ_EXT_MSG(ExtName) \
1085 "The atomic float instruction requires the following SPIR-V " \
1086 "extension: SPV_EXT_shader_atomic_float" ExtName
1087static void AddAtomicVectorFloatRequirements(
const MachineInstr &
MI,
1091 MI.getMF()->getRegInfo().getVRegDef(
MI.getOperand(1).getReg());
1094 if (Rank != 2 && Rank != 4)
1096 "must be a 2-component or 4 component vector");
1101 if (EltTypeDef->
getOpcode() != SPIRV::OpTypeFloat ||
1104 "The element type for the result type of an atomic vector float "
1105 "instruction must be a 16-bit floating-point scalar");
1107 if (isBFloat16Type(EltTypeDef))
1109 "The element type for the result type of an atomic vector float "
1110 "instruction cannot be a bfloat16 scalar");
1111 if (!
ST.canUseExtension(SPIRV::Extension::SPV_NV_shader_atomic_fp16_vector))
1113 "The atomic float16 vector instruction requires the following SPIR-V "
1114 "extension: SPV_NV_shader_atomic_fp16_vector");
1116 Reqs.
addExtension(SPIRV::Extension::SPV_NV_shader_atomic_fp16_vector);
1117 Reqs.
addCapability(SPIRV::Capability::AtomicFloat16VectorNV);
1124 "Expect register operand in atomic float instruction");
1125 Register TypeReg =
MI.getOperand(1).getReg();
1128 if (TypeDef->
getOpcode() == SPIRV::OpTypeVector)
1129 return AddAtomicVectorFloatRequirements(
MI, Reqs, ST);
1131 if (TypeDef->
getOpcode() != SPIRV::OpTypeFloat)
1133 "floating-point type scalar");
1136 unsigned Op =
MI.getOpcode();
1137 if (
Op == SPIRV::OpAtomicFAddEXT) {
1138 if (!
ST.canUseExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add))
1140 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add);
1143 if (isBFloat16Type(TypeDef)) {
1144 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1146 "The atomic bfloat16 instruction requires the following SPIR-V "
1147 "extension: SPV_INTEL_16bit_atomics",
1149 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1150 Reqs.
addCapability(SPIRV::Capability::AtomicBFloat16AddINTEL);
1152 if (!
ST.canUseExtension(
1153 SPIRV::Extension::SPV_EXT_shader_atomic_float16_add))
1155 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add);
1167 "Unexpected floating-point type width in atomic float instruction");
1170 if (!
ST.canUseExtension(
1171 SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max))
1173 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max);
1176 if (isBFloat16Type(TypeDef)) {
1177 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1179 "The atomic bfloat16 instruction requires the following SPIR-V "
1180 "extension: SPV_INTEL_16bit_atomics",
1182 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1183 Reqs.
addCapability(SPIRV::Capability::AtomicBFloat16MinMaxINTEL);
1185 Reqs.
addCapability(SPIRV::Capability::AtomicFloat16MinMaxEXT);
1189 Reqs.
addCapability(SPIRV::Capability::AtomicFloat32MinMaxEXT);
1192 Reqs.
addCapability(SPIRV::Capability::AtomicFloat64MinMaxEXT);
1196 "Unexpected floating-point type width in atomic float instruction");
1202 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1206 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 1;
1210 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1214 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 2;
1218 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1222 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 1;
1226 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1230 return Dim == SPIRV::Dim::DIM_SubpassData && Sampled == 2;
1234 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1238 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 2;
1241bool isCombinedImageSampler(
MachineInstr *SampledImageInst) {
1242 if (SampledImageInst->
getOpcode() != SPIRV::OpTypeSampledImage)
1248 return isSampledImage(ImageInst);
1253 if (
MI.getOpcode() != SPIRV::OpDecorate)
1257 if (Dec == SPIRV::Decoration::NonUniformEXT)
1275 if (
StorageClass != SPIRV::StorageClass::StorageClass::UniformConstant &&
1276 StorageClass != SPIRV::StorageClass::StorageClass::Uniform &&
1277 StorageClass != SPIRV::StorageClass::StorageClass::StorageBuffer) {
1282 hasNonUniformDecoration(
Instr.getOperand(0).getReg(), MRI);
1284 auto FirstIndexReg =
Instr.getOperand(3).getReg();
1285 bool FirstIndexIsConstant =
1288 if (
StorageClass == SPIRV::StorageClass::StorageClass::StorageBuffer) {
1291 SPIRV::Capability::StorageBufferArrayNonUniformIndexingEXT);
1292 else if (!FirstIndexIsConstant)
1294 SPIRV::Capability::StorageBufferArrayDynamicIndexing);
1300 if (PointeeType->
getOpcode() != SPIRV::OpTypeImage &&
1301 PointeeType->
getOpcode() != SPIRV::OpTypeSampledImage &&
1302 PointeeType->
getOpcode() != SPIRV::OpTypeSampler) {
1306 if (isUniformTexelBuffer(PointeeType)) {
1309 SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
1310 else if (!FirstIndexIsConstant)
1312 SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
1313 }
else if (isInputAttachment(PointeeType)) {
1316 SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
1317 else if (!FirstIndexIsConstant)
1319 SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
1320 }
else if (isStorageTexelBuffer(PointeeType)) {
1323 SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
1324 else if (!FirstIndexIsConstant)
1326 SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
1327 }
else if (isSampledImage(PointeeType) ||
1328 isCombinedImageSampler(PointeeType) ||
1329 PointeeType->
getOpcode() == SPIRV::OpTypeSampler) {
1332 SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
1333 else if (!FirstIndexIsConstant)
1335 SPIRV::Capability::SampledImageArrayDynamicIndexing);
1336 }
else if (isStorageImage(PointeeType)) {
1339 SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
1340 else if (!FirstIndexIsConstant)
1342 SPIRV::Capability::StorageImageArrayDynamicIndexing);
1346static bool isImageTypeWithUnknownFormat(
SPIRVTypeInst TypeInst) {
1347 if (TypeInst->
getOpcode() != SPIRV::OpTypeImage)
1356 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product))
1357 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_integer_dot_product);
1361 assert(
MI.getOperand(2).isReg() &&
"Unexpected operand in dot");
1365 assert(
Input->getOperand(1).isReg() &&
"Unexpected operand in dot input");
1369 if (TypeDef->
getOpcode() == SPIRV::OpTypeInt) {
1371 Reqs.
addCapability(SPIRV::Capability::DotProductInput4x8BitPacked);
1372 }
else if (TypeDef->
getOpcode() == SPIRV::OpTypeVector) {
1378 "Dot operand of 8-bit integer type requires 4 components");
1379 Reqs.
addCapability(SPIRV::Capability::DotProductInput4x8Bit);
1394 unsigned AddrSpace = ASOp.
getImm();
1395 if (AddrSpace != SPIRV::StorageClass::UniformConstant) {
1396 if (!
ST.canUseExtension(
1398 SPV_EXT_relaxed_printf_string_address_space)) {
1400 "required because printf uses a format string not "
1401 "in constant address space.",
1405 SPIRV::Extension::SPV_EXT_relaxed_printf_string_address_space);
1414 if (
MI.getNumOperands() <=
OpIdx)
1418 if (Mask & (1U <<
I))
1427 unsigned Op =
MI.getOpcode();
1429 case SPIRV::OpMemoryModel: {
1430 int64_t Addr =
MI.getOperand(0).getImm();
1433 int64_t Mem =
MI.getOperand(1).getImm();
1438 case SPIRV::OpEntryPoint: {
1439 int64_t
Exe =
MI.getOperand(0).getImm();
1444 case SPIRV::OpExecutionMode:
1445 case SPIRV::OpExecutionModeId: {
1446 int64_t
Exe =
MI.getOperand(1).getImm();
1451 case SPIRV::OpTypeMatrix:
1454 case SPIRV::OpTypeInt: {
1455 unsigned BitWidth =
MI.getOperand(1).getImm();
1463 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_int4)) {
1467 if (!
ST.canUseExtension(
1468 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers))
1470 "OpTypeInt type with a width other than 8, 16, 32 or 64 bits "
1471 "requires the following SPIR-V extension: "
1472 "SPV_ALTERA_arbitrary_precision_integers");
1474 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_integers);
1475 Reqs.
addCapability(SPIRV::Capability::ArbitraryPrecisionIntegersALTERA);
1479 case SPIRV::OpDot: {
1482 if (isBFloat16Type(TypeDef))
1483 Reqs.
addCapability(SPIRV::Capability::BFloat16DotProductKHR);
1486 case SPIRV::OpTypeFloat: {
1487 unsigned BitWidth =
MI.getOperand(1).getImm();
1491 if (isBFloat16Type(&
MI)) {
1492 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_bfloat16))
1494 "following SPIR-V extension: SPV_KHR_bfloat16",
1504 case SPIRV::OpTypeVector: {
1505 unsigned NumComponents =
MI.getOperand(2).getImm();
1506 if (NumComponents == 8 || NumComponents == 16)
1512 if (ElemTypeDef->
getOpcode() == SPIRV::OpTypePointer &&
1513 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_masked_gather_scatter)) {
1514 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_masked_gather_scatter);
1515 Reqs.
addCapability(SPIRV::Capability::MaskedGatherScatterINTEL);
1519 case SPIRV::OpTypePointer: {
1520 auto SC =
MI.getOperand(1).getImm();
1531 (TypeDef->
getOpcode() == SPIRV::OpTypeFloat) &&
1536 case SPIRV::OpExtInst: {
1537 if (
MI.getOperand(2).getImm() ==
1538 static_cast<int64_t
>(
1539 SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) {
1540 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info);
1543 if (
MI.getOperand(3).getImm() ==
1544 static_cast<int64_t
>(SPIRV::OpenCLExtInst::printf)) {
1545 addPrintfRequirements(
MI, Reqs, ST);
1552 case SPIRV::OpAliasDomainDeclINTEL:
1553 case SPIRV::OpAliasScopeDeclINTEL:
1554 case SPIRV::OpAliasScopeListDeclINTEL: {
1555 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing);
1556 Reqs.
addCapability(SPIRV::Capability::MemoryAccessAliasingINTEL);
1559 case SPIRV::OpBitReverse:
1560 case SPIRV::OpBitFieldInsert:
1561 case SPIRV::OpBitFieldSExtract:
1562 case SPIRV::OpBitFieldUExtract:
1563 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1567 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_bit_instructions);
1570 case SPIRV::OpTypeRuntimeArray:
1573 case SPIRV::OpTypeOpaque:
1574 case SPIRV::OpTypeEvent:
1577 case SPIRV::OpTypePipe:
1578 case SPIRV::OpTypeReserveId:
1581 case SPIRV::OpTypeDeviceEvent:
1582 case SPIRV::OpTypeQueue:
1583 case SPIRV::OpBuildNDRange:
1586 case SPIRV::OpDecorate:
1587 case SPIRV::OpDecorateId:
1588 case SPIRV::OpDecorateString:
1589 addOpDecorateReqs(
MI, 1, Reqs, ST);
1591 case SPIRV::OpMemberDecorate:
1592 case SPIRV::OpMemberDecorateString:
1593 addOpDecorateReqs(
MI, 2, Reqs, ST);
1595 case SPIRV::OpInBoundsPtrAccessChain:
1598 case SPIRV::OpConstantSampler:
1601 case SPIRV::OpInBoundsAccessChain:
1602 case SPIRV::OpAccessChain:
1603 addOpAccessChainReqs(
MI, Reqs, ST);
1605 case SPIRV::OpTypeImage:
1606 addOpTypeImageReqs(
MI, Reqs, ST);
1608 case SPIRV::OpTypeSampler:
1609 if (!
ST.isShader()) {
1613 case SPIRV::OpTypeForwardPointer:
1617 case SPIRV::OpAtomicFlagTestAndSet:
1618 case SPIRV::OpAtomicLoad:
1619 case SPIRV::OpAtomicStore:
1620 case SPIRV::OpAtomicExchange:
1621 case SPIRV::OpAtomicCompareExchange:
1622 case SPIRV::OpAtomicIIncrement:
1623 case SPIRV::OpAtomicIDecrement:
1624 case SPIRV::OpAtomicIAdd:
1625 case SPIRV::OpAtomicISub:
1626 case SPIRV::OpAtomicUMin:
1627 case SPIRV::OpAtomicUMax:
1628 case SPIRV::OpAtomicSMin:
1629 case SPIRV::OpAtomicSMax:
1630 case SPIRV::OpAtomicAnd:
1631 case SPIRV::OpAtomicOr:
1632 case SPIRV::OpAtomicXor: {
1635 if (
Op == SPIRV::OpAtomicStore) {
1638 assert(InstrPtr &&
"Unexpected type instruction for OpAtomicStore");
1644 if (TypeDef->
getOpcode() == SPIRV::OpTypeInt) {
1649 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1651 "16-bit integer atomic operations require the following SPIR-V "
1652 "extension: SPV_INTEL_16bit_atomics",
1654 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1656 case SPIRV::OpAtomicLoad:
1657 case SPIRV::OpAtomicStore:
1658 case SPIRV::OpAtomicExchange:
1659 case SPIRV::OpAtomicCompareExchange:
1660 case SPIRV::OpAtomicCompareExchangeWeak:
1662 SPIRV::Capability::AtomicInt16CompareExchangeINTEL);
1669 }
else if (isBFloat16Type(TypeDef)) {
1670 if (
is_contained({SPIRV::OpAtomicLoad, SPIRV::OpAtomicStore,
1671 SPIRV::OpAtomicExchange},
1673 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1675 "The atomic bfloat16 instruction requires the following SPIR-V "
1676 "extension: SPV_INTEL_16bit_atomics",
1678 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1679 Reqs.
addCapability(SPIRV::Capability::AtomicBFloat16LoadStoreINTEL);
1684 case SPIRV::OpGroupNonUniformIAdd:
1685 case SPIRV::OpGroupNonUniformFAdd:
1686 case SPIRV::OpGroupNonUniformIMul:
1687 case SPIRV::OpGroupNonUniformFMul:
1688 case SPIRV::OpGroupNonUniformSMin:
1689 case SPIRV::OpGroupNonUniformUMin:
1690 case SPIRV::OpGroupNonUniformFMin:
1691 case SPIRV::OpGroupNonUniformSMax:
1692 case SPIRV::OpGroupNonUniformUMax:
1693 case SPIRV::OpGroupNonUniformFMax:
1694 case SPIRV::OpGroupNonUniformBitwiseAnd:
1695 case SPIRV::OpGroupNonUniformBitwiseOr:
1696 case SPIRV::OpGroupNonUniformBitwiseXor:
1697 case SPIRV::OpGroupNonUniformLogicalAnd:
1698 case SPIRV::OpGroupNonUniformLogicalOr:
1699 case SPIRV::OpGroupNonUniformLogicalXor: {
1701 int64_t GroupOp =
MI.getOperand(3).getImm();
1703 case SPIRV::GroupOperation::Reduce:
1704 case SPIRV::GroupOperation::InclusiveScan:
1705 case SPIRV::GroupOperation::ExclusiveScan:
1706 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
1708 case SPIRV::GroupOperation::ClusteredReduce:
1709 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformClustered);
1711 case SPIRV::GroupOperation::PartitionedReduceNV:
1712 case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
1713 case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
1714 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
1719 case SPIRV::OpGroupNonUniformQuadSwap:
1722 case SPIRV::OpImageQueryFormat: {
1723 Register ResultReg =
MI.getOperand(0).getReg();
1725 static const unsigned CompareOps[] = {
1726 SPIRV::OpIEqual, SPIRV::OpINotEqual,
1727 SPIRV::OpUGreaterThan, SPIRV::OpUGreaterThanEqual,
1728 SPIRV::OpULessThan, SPIRV::OpULessThanEqual,
1729 SPIRV::OpSGreaterThan, SPIRV::OpSGreaterThanEqual,
1730 SPIRV::OpSLessThan, SPIRV::OpSLessThanEqual};
1732 auto CheckAndAddExtension = [&](int64_t ImmVal) {
1733 if (ImmVal == 4323 || ImmVal == 4324) {
1734 if (
ST.canUseExtension(SPIRV::Extension::SPV_EXT_image_raw10_raw12))
1735 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_image_raw10_raw12);
1738 "SPV_EXT_image_raw10_raw12 extension");
1743 unsigned Opc = UseInst.getOpcode();
1745 if (
Opc == SPIRV::OpSwitch) {
1748 CheckAndAddExtension(
Op.getImm());
1750 for (
unsigned i = 1; i < UseInst.getNumOperands(); ++i) {
1753 if (ConstInst && ConstInst->
getOpcode() == SPIRV::OpConstantI) {
1756 CheckAndAddExtension(ImmVal);
1764 case SPIRV::OpGroupNonUniformShuffle:
1765 case SPIRV::OpGroupNonUniformShuffleXor:
1766 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffle);
1768 case SPIRV::OpGroupNonUniformShuffleUp:
1769 case SPIRV::OpGroupNonUniformShuffleDown:
1770 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
1772 case SPIRV::OpGroupAll:
1773 case SPIRV::OpGroupAny:
1774 case SPIRV::OpGroupBroadcast:
1775 case SPIRV::OpGroupIAdd:
1776 case SPIRV::OpGroupFAdd:
1777 case SPIRV::OpGroupFMin:
1778 case SPIRV::OpGroupUMin:
1779 case SPIRV::OpGroupSMin:
1780 case SPIRV::OpGroupFMax:
1781 case SPIRV::OpGroupUMax:
1782 case SPIRV::OpGroupSMax:
1785 case SPIRV::OpGroupNonUniformElect:
1788 case SPIRV::OpGroupNonUniformAll:
1789 case SPIRV::OpGroupNonUniformAny:
1790 case SPIRV::OpGroupNonUniformAllEqual:
1793 case SPIRV::OpGroupNonUniformBroadcast:
1794 case SPIRV::OpGroupNonUniformBroadcastFirst:
1795 case SPIRV::OpGroupNonUniformBallot:
1796 case SPIRV::OpGroupNonUniformInverseBallot:
1797 case SPIRV::OpGroupNonUniformBallotBitExtract:
1798 case SPIRV::OpGroupNonUniformBallotBitCount:
1799 case SPIRV::OpGroupNonUniformBallotFindLSB:
1800 case SPIRV::OpGroupNonUniformBallotFindMSB:
1801 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformBallot);
1803 case SPIRV::OpSubgroupShuffleINTEL:
1804 case SPIRV::OpSubgroupShuffleDownINTEL:
1805 case SPIRV::OpSubgroupShuffleUpINTEL:
1806 case SPIRV::OpSubgroupShuffleXorINTEL:
1807 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1808 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1809 Reqs.
addCapability(SPIRV::Capability::SubgroupShuffleINTEL);
1812 case SPIRV::OpSubgroupBlockReadINTEL:
1813 case SPIRV::OpSubgroupBlockWriteINTEL:
1814 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1815 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1816 Reqs.
addCapability(SPIRV::Capability::SubgroupBufferBlockIOINTEL);
1819 case SPIRV::OpSubgroupImageBlockReadINTEL:
1820 case SPIRV::OpSubgroupImageBlockWriteINTEL:
1821 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1822 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1823 Reqs.
addCapability(SPIRV::Capability::SubgroupImageBlockIOINTEL);
1826 case SPIRV::OpSubgroupImageMediaBlockReadINTEL:
1827 case SPIRV::OpSubgroupImageMediaBlockWriteINTEL:
1828 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1829 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_media_block_io);
1830 Reqs.
addCapability(SPIRV::Capability::SubgroupImageMediaBlockIOINTEL);
1833 case SPIRV::OpAssumeTrueKHR:
1834 case SPIRV::OpExpectKHR:
1835 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
1836 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_expect_assume);
1840 case SPIRV::OpFmaKHR:
1841 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_fma)) {
1846 case SPIRV::OpPtrCastToCrossWorkgroupINTEL:
1847 case SPIRV::OpCrossWorkgroupCastToPtrINTEL:
1848 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) {
1849 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes);
1850 Reqs.
addCapability(SPIRV::Capability::USMStorageClassesINTEL);
1853 case SPIRV::OpConstantFunctionPointerINTEL:
1854 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1855 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1856 Reqs.
addCapability(SPIRV::Capability::FunctionPointersINTEL);
1859 case SPIRV::OpGroupNonUniformRotateKHR:
1860 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate))
1862 "following SPIR-V extension: SPV_KHR_subgroup_rotate",
1864 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate);
1865 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformRotateKHR);
1868 case SPIRV::OpFixedCosALTERA:
1869 case SPIRV::OpFixedSinALTERA:
1870 case SPIRV::OpFixedCosPiALTERA:
1871 case SPIRV::OpFixedSinPiALTERA:
1872 case SPIRV::OpFixedExpALTERA:
1873 case SPIRV::OpFixedLogALTERA:
1874 case SPIRV::OpFixedRecipALTERA:
1875 case SPIRV::OpFixedSqrtALTERA:
1876 case SPIRV::OpFixedSinCosALTERA:
1877 case SPIRV::OpFixedSinCosPiALTERA:
1878 case SPIRV::OpFixedRsqrtALTERA:
1879 if (!
ST.canUseExtension(
1880 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_fixed_point))
1882 "following SPIR-V extension: "
1883 "SPV_ALTERA_arbitrary_precision_fixed_point",
1886 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_fixed_point);
1887 Reqs.
addCapability(SPIRV::Capability::ArbitraryPrecisionFixedPointALTERA);
1889 case SPIRV::OpGroupIMulKHR:
1890 case SPIRV::OpGroupFMulKHR:
1891 case SPIRV::OpGroupBitwiseAndKHR:
1892 case SPIRV::OpGroupBitwiseOrKHR:
1893 case SPIRV::OpGroupBitwiseXorKHR:
1894 case SPIRV::OpGroupLogicalAndKHR:
1895 case SPIRV::OpGroupLogicalOrKHR:
1896 case SPIRV::OpGroupLogicalXorKHR:
1897 if (
ST.canUseExtension(
1898 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1899 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_uniform_group_instructions);
1900 Reqs.
addCapability(SPIRV::Capability::GroupUniformArithmeticKHR);
1903 case SPIRV::OpReadClockKHR:
1904 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock))
1906 "following SPIR-V extension: SPV_KHR_shader_clock",
1908 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_shader_clock);
1911 case SPIRV::OpFunctionPointerCallINTEL:
1912 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1913 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1914 Reqs.
addCapability(SPIRV::Capability::FunctionPointersINTEL);
1917 case SPIRV::OpAtomicFAddEXT:
1918 case SPIRV::OpAtomicFMinEXT:
1919 case SPIRV::OpAtomicFMaxEXT:
1920 AddAtomicFloatRequirements(
MI, Reqs, ST);
1922 case SPIRV::OpConvertBF16ToFINTEL:
1923 case SPIRV::OpConvertFToBF16INTEL:
1924 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion)) {
1925 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion);
1926 Reqs.
addCapability(SPIRV::Capability::BFloat16ConversionINTEL);
1929 case SPIRV::OpRoundFToTF32INTEL:
1930 if (
ST.canUseExtension(
1931 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion)) {
1932 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_tensor_float32_conversion);
1933 Reqs.
addCapability(SPIRV::Capability::TensorFloat32RoundingINTEL);
1936 case SPIRV::OpVariableLengthArrayINTEL:
1937 case SPIRV::OpSaveMemoryINTEL:
1938 case SPIRV::OpRestoreMemoryINTEL:
1939 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array)) {
1940 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_variable_length_array);
1941 Reqs.
addCapability(SPIRV::Capability::VariableLengthArrayINTEL);
1944 case SPIRV::OpAsmTargetINTEL:
1945 case SPIRV::OpAsmINTEL:
1946 case SPIRV::OpAsmCallINTEL:
1947 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly)) {
1948 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_inline_assembly);
1952 case SPIRV::OpTypeCooperativeMatrixKHR: {
1953 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1955 "OpTypeCooperativeMatrixKHR type requires the "
1956 "following SPIR-V extension: SPV_KHR_cooperative_matrix",
1958 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1959 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1962 if (isBFloat16Type(TypeDef))
1963 Reqs.
addCapability(SPIRV::Capability::BFloat16CooperativeMatrixKHR);
1966 case SPIRV::OpArithmeticFenceEXT:
1967 if (!
ST.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
1969 "following SPIR-V extension: SPV_EXT_arithmetic_fence",
1971 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence);
1974 case SPIRV::OpControlBarrierArriveINTEL:
1975 case SPIRV::OpControlBarrierWaitINTEL:
1976 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
1977 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_split_barrier);
1981 case SPIRV::OpCooperativeMatrixMulAddKHR: {
1982 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1984 "following SPIR-V extension: "
1985 "SPV_KHR_cooperative_matrix",
1987 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1988 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1989 constexpr unsigned MulAddMaxSize = 6;
1990 if (
MI.getNumOperands() != MulAddMaxSize)
1992 const int64_t CoopOperands =
MI.getOperand(MulAddMaxSize - 1).getImm();
1994 SPIRV::CooperativeMatrixOperands::MatrixAAndBTF32ComponentsINTEL) {
1995 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1997 "require the following SPIR-V extension: "
1998 "SPV_INTEL_joint_matrix",
2000 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2002 SPIRV::Capability::CooperativeMatrixTF32ComponentTypeINTEL);
2005 MatrixAAndBBFloat16ComponentsINTEL ||
2007 SPIRV::CooperativeMatrixOperands::MatrixCBFloat16ComponentsINTEL ||
2009 MatrixResultBFloat16ComponentsINTEL) {
2010 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
2012 "require the following SPIR-V extension: "
2013 "SPV_INTEL_joint_matrix",
2015 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2017 SPIRV::Capability::CooperativeMatrixBFloat16ComponentTypeINTEL);
2021 case SPIRV::OpCooperativeMatrixLoadKHR:
2022 case SPIRV::OpCooperativeMatrixStoreKHR:
2023 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2024 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2025 case SPIRV::OpCooperativeMatrixPrefetchINTEL: {
2026 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
2028 "following SPIR-V extension: "
2029 "SPV_KHR_cooperative_matrix",
2031 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
2032 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
2036 std::unordered_map<unsigned, unsigned> LayoutToInstMap = {
2037 {SPIRV::OpCooperativeMatrixLoadKHR, 3},
2038 {SPIRV::OpCooperativeMatrixStoreKHR, 2},
2039 {SPIRV::OpCooperativeMatrixLoadCheckedINTEL, 5},
2040 {SPIRV::OpCooperativeMatrixStoreCheckedINTEL, 4},
2041 {SPIRV::OpCooperativeMatrixPrefetchINTEL, 4}};
2043 const unsigned LayoutNum = LayoutToInstMap[
Op];
2044 Register RegLayout =
MI.getOperand(LayoutNum).getReg();
2047 if (MILayout->
getOpcode() == SPIRV::OpConstantI) {
2050 static_cast<unsigned>(SPIRV::CooperativeMatrixLayout::PackedINTEL)) {
2051 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
2053 "extension: SPV_INTEL_joint_matrix",
2055 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2056 Reqs.
addCapability(SPIRV::Capability::PackedCooperativeMatrixINTEL);
2061 if (
Op == SPIRV::OpCooperativeMatrixLoadKHR ||
2062 Op == SPIRV::OpCooperativeMatrixStoreKHR)
2065 std::string InstName;
2067 case SPIRV::OpCooperativeMatrixPrefetchINTEL:
2068 InstName =
"OpCooperativeMatrixPrefetchINTEL";
2070 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2071 InstName =
"OpCooperativeMatrixLoadCheckedINTEL";
2073 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2074 InstName =
"OpCooperativeMatrixStoreCheckedINTEL";
2078 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix)) {
2079 const std::string ErrorMsg =
2080 InstName +
" instruction requires the "
2081 "following SPIR-V extension: SPV_INTEL_joint_matrix";
2084 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2085 if (
Op == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2086 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixPrefetchINTEL);
2090 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
2093 case SPIRV::OpCooperativeMatrixConstructCheckedINTEL:
2094 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
2096 "instructions require the following SPIR-V extension: "
2097 "SPV_INTEL_joint_matrix",
2099 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2101 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
2103 case SPIRV::OpReadPipeBlockingALTERA:
2104 case SPIRV::OpWritePipeBlockingALTERA:
2105 if (
ST.canUseExtension(SPIRV::Extension::SPV_ALTERA_blocking_pipes)) {
2106 Reqs.
addExtension(SPIRV::Extension::SPV_ALTERA_blocking_pipes);
2110 case SPIRV::OpCooperativeMatrixGetElementCoordINTEL:
2111 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
2113 "following SPIR-V extension: SPV_INTEL_joint_matrix",
2115 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
2117 SPIRV::Capability::CooperativeMatrixInvocationInstructionsINTEL);
2119 case SPIRV::OpConvertHandleToImageINTEL:
2120 case SPIRV::OpConvertHandleToSamplerINTEL:
2121 case SPIRV::OpConvertHandleToSampledImageINTEL: {
2122 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bindless_images))
2124 "instructions require the following SPIR-V extension: "
2125 "SPV_INTEL_bindless_images",
2128 SPIRV::AddressingModel::AddressingModel AddrModel = MAI.
Addr;
2130 if (
Op == SPIRV::OpConvertHandleToImageINTEL &&
2131 TyDef->
getOpcode() != SPIRV::OpTypeImage) {
2133 "OpConvertHandleToImageINTEL",
2135 }
else if (
Op == SPIRV::OpConvertHandleToSamplerINTEL &&
2136 TyDef->
getOpcode() != SPIRV::OpTypeSampler) {
2138 "OpConvertHandleToSamplerINTEL",
2140 }
else if (
Op == SPIRV::OpConvertHandleToSampledImageINTEL &&
2141 TyDef->
getOpcode() != SPIRV::OpTypeSampledImage) {
2143 "OpConvertHandleToSampledImageINTEL",
2148 if (!(Bitwidth == 32 && AddrModel == SPIRV::AddressingModel::Physical32) &&
2149 !(Bitwidth == 64 && AddrModel == SPIRV::AddressingModel::Physical64)) {
2151 "Parameter value must be a 32-bit scalar in case of "
2152 "Physical32 addressing model or a 64-bit scalar in case of "
2153 "Physical64 addressing model",
2156 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bindless_images);
2160 case SPIRV::OpSubgroup2DBlockLoadINTEL:
2161 case SPIRV::OpSubgroup2DBlockLoadTransposeINTEL:
2162 case SPIRV::OpSubgroup2DBlockLoadTransformINTEL:
2163 case SPIRV::OpSubgroup2DBlockPrefetchINTEL:
2164 case SPIRV::OpSubgroup2DBlockStoreINTEL: {
2165 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_2d_block_io))
2167 "Prefetch/Store]INTEL instructions require the "
2168 "following SPIR-V extension: SPV_INTEL_2d_block_io",
2170 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_2d_block_io);
2171 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockIOINTEL);
2173 if (
Op == SPIRV::OpSubgroup2DBlockLoadTransposeINTEL) {
2174 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockTransposeINTEL);
2177 if (
Op == SPIRV::OpSubgroup2DBlockLoadTransformINTEL) {
2178 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockTransformINTEL);
2183 case SPIRV::OpKill: {
2186 case SPIRV::OpDemoteToHelperInvocation:
2187 Reqs.
addCapability(SPIRV::Capability::DemoteToHelperInvocation);
2189 if (
ST.canUseExtension(
2190 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation)) {
2193 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation);
2198 case SPIRV::OpSUDot:
2199 case SPIRV::OpSDotAccSat:
2200 case SPIRV::OpUDotAccSat:
2201 case SPIRV::OpSUDotAccSat:
2202 AddDotProductRequirements(
MI, Reqs, ST);
2204 case SPIRV::OpImageSampleImplicitLod:
2206 addImageOperandReqs(
MI, Reqs, ST, 4);
2208 case SPIRV::OpImageSampleExplicitLod:
2209 addImageOperandReqs(
MI, Reqs, ST, 4);
2211 case SPIRV::OpImageSampleDrefImplicitLod:
2213 addImageOperandReqs(
MI, Reqs, ST, 5);
2215 case SPIRV::OpImageSampleDrefExplicitLod:
2217 addImageOperandReqs(
MI, Reqs, ST, 5);
2219 case SPIRV::OpImageFetch:
2221 addImageOperandReqs(
MI, Reqs, ST, 4);
2223 case SPIRV::OpImageDrefGather:
2224 case SPIRV::OpImageGather:
2226 addImageOperandReqs(
MI, Reqs, ST, 5);
2228 case SPIRV::OpImageRead: {
2229 Register ImageReg =
MI.getOperand(2).getReg();
2238 if (isImageTypeWithUnknownFormat(TypeDef) &&
ST.isShader())
2239 Reqs.
addCapability(SPIRV::Capability::StorageImageReadWithoutFormat);
2242 case SPIRV::OpImageWrite: {
2243 Register ImageReg =
MI.getOperand(0).getReg();
2252 if (isImageTypeWithUnknownFormat(TypeDef) &&
ST.isShader())
2253 Reqs.
addCapability(SPIRV::Capability::StorageImageWriteWithoutFormat);
2256 case SPIRV::OpTypeStructContinuedINTEL:
2257 case SPIRV::OpConstantCompositeContinuedINTEL:
2258 case SPIRV::OpSpecConstantCompositeContinuedINTEL:
2259 case SPIRV::OpCompositeConstructContinuedINTEL: {
2260 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_long_composites))
2262 "Continued instructions require the "
2263 "following SPIR-V extension: SPV_INTEL_long_composites",
2265 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_long_composites);
2269 case SPIRV::OpArbitraryFloatEQALTERA:
2270 case SPIRV::OpArbitraryFloatGEALTERA:
2271 case SPIRV::OpArbitraryFloatGTALTERA:
2272 case SPIRV::OpArbitraryFloatLEALTERA:
2273 case SPIRV::OpArbitraryFloatLTALTERA:
2274 case SPIRV::OpArbitraryFloatCbrtALTERA:
2275 case SPIRV::OpArbitraryFloatCosALTERA:
2276 case SPIRV::OpArbitraryFloatCosPiALTERA:
2277 case SPIRV::OpArbitraryFloatExp10ALTERA:
2278 case SPIRV::OpArbitraryFloatExp2ALTERA:
2279 case SPIRV::OpArbitraryFloatExpALTERA:
2280 case SPIRV::OpArbitraryFloatExpm1ALTERA:
2281 case SPIRV::OpArbitraryFloatHypotALTERA:
2282 case SPIRV::OpArbitraryFloatLog10ALTERA:
2283 case SPIRV::OpArbitraryFloatLog1pALTERA:
2284 case SPIRV::OpArbitraryFloatLog2ALTERA:
2285 case SPIRV::OpArbitraryFloatLogALTERA:
2286 case SPIRV::OpArbitraryFloatRecipALTERA:
2287 case SPIRV::OpArbitraryFloatSinCosALTERA:
2288 case SPIRV::OpArbitraryFloatSinCosPiALTERA:
2289 case SPIRV::OpArbitraryFloatSinALTERA:
2290 case SPIRV::OpArbitraryFloatSinPiALTERA:
2291 case SPIRV::OpArbitraryFloatSqrtALTERA:
2292 case SPIRV::OpArbitraryFloatACosALTERA:
2293 case SPIRV::OpArbitraryFloatACosPiALTERA:
2294 case SPIRV::OpArbitraryFloatAddALTERA:
2295 case SPIRV::OpArbitraryFloatASinALTERA:
2296 case SPIRV::OpArbitraryFloatASinPiALTERA:
2297 case SPIRV::OpArbitraryFloatATan2ALTERA:
2298 case SPIRV::OpArbitraryFloatATanALTERA:
2299 case SPIRV::OpArbitraryFloatATanPiALTERA:
2300 case SPIRV::OpArbitraryFloatCastFromIntALTERA:
2301 case SPIRV::OpArbitraryFloatCastALTERA:
2302 case SPIRV::OpArbitraryFloatCastToIntALTERA:
2303 case SPIRV::OpArbitraryFloatDivALTERA:
2304 case SPIRV::OpArbitraryFloatMulALTERA:
2305 case SPIRV::OpArbitraryFloatPowALTERA:
2306 case SPIRV::OpArbitraryFloatPowNALTERA:
2307 case SPIRV::OpArbitraryFloatPowRALTERA:
2308 case SPIRV::OpArbitraryFloatRSqrtALTERA:
2309 case SPIRV::OpArbitraryFloatSubALTERA: {
2310 if (!
ST.canUseExtension(
2311 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_floating_point))
2313 "Floating point instructions can't be translated correctly without "
2314 "enabled SPV_ALTERA_arbitrary_precision_floating_point extension!",
2317 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_floating_point);
2319 SPIRV::Capability::ArbitraryPrecisionFloatingPointALTERA);
2322 case SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL: {
2323 if (!
ST.canUseExtension(
2324 SPIRV::Extension::SPV_INTEL_subgroup_matrix_multiply_accumulate))
2326 "OpSubgroupMatrixMultiplyAccumulateINTEL instruction requires the "
2328 "extension: SPV_INTEL_subgroup_matrix_multiply_accumulate",
2331 SPIRV::Extension::SPV_INTEL_subgroup_matrix_multiply_accumulate);
2333 SPIRV::Capability::SubgroupMatrixMultiplyAccumulateINTEL);
2336 case SPIRV::OpBitwiseFunctionINTEL: {
2337 if (!
ST.canUseExtension(
2338 SPIRV::Extension::SPV_INTEL_ternary_bitwise_function))
2340 "OpBitwiseFunctionINTEL instruction requires the following SPIR-V "
2341 "extension: SPV_INTEL_ternary_bitwise_function",
2343 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_ternary_bitwise_function);
2344 Reqs.
addCapability(SPIRV::Capability::TernaryBitwiseFunctionINTEL);
2347 case SPIRV::OpCopyMemorySized: {
2352 case SPIRV::OpPredicatedLoadINTEL:
2353 case SPIRV::OpPredicatedStoreINTEL: {
2354 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_predicated_io))
2356 "OpPredicated[Load/Store]INTEL instructions require "
2357 "the following SPIR-V extension: SPV_INTEL_predicated_io",
2359 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_predicated_io);
2363 case SPIRV::OpFAddS:
2364 case SPIRV::OpFSubS:
2365 case SPIRV::OpFMulS:
2366 case SPIRV::OpFDivS:
2367 case SPIRV::OpFRemS:
2369 case SPIRV::OpFNegate:
2370 case SPIRV::OpFAddV:
2371 case SPIRV::OpFSubV:
2372 case SPIRV::OpFMulV:
2373 case SPIRV::OpFDivV:
2374 case SPIRV::OpFRemV:
2375 case SPIRV::OpFNegateV: {
2378 if (TypeDef->
getOpcode() == SPIRV::OpTypeVector)
2380 if (isBFloat16Type(TypeDef)) {
2381 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic))
2383 "Arithmetic instructions with bfloat16 arguments require the "
2384 "following SPIR-V extension: SPV_INTEL_bfloat16_arithmetic",
2386 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic);
2387 Reqs.
addCapability(SPIRV::Capability::BFloat16ArithmeticINTEL);
2391 case SPIRV::OpOrdered:
2392 case SPIRV::OpUnordered:
2393 case SPIRV::OpFOrdEqual:
2394 case SPIRV::OpFOrdNotEqual:
2395 case SPIRV::OpFOrdLessThan:
2396 case SPIRV::OpFOrdLessThanEqual:
2397 case SPIRV::OpFOrdGreaterThan:
2398 case SPIRV::OpFOrdGreaterThanEqual:
2399 case SPIRV::OpFUnordEqual:
2400 case SPIRV::OpFUnordNotEqual:
2401 case SPIRV::OpFUnordLessThan:
2402 case SPIRV::OpFUnordLessThanEqual:
2403 case SPIRV::OpFUnordGreaterThan:
2404 case SPIRV::OpFUnordGreaterThanEqual: {
2408 if (TypeDef->
getOpcode() == SPIRV::OpTypeVector)
2410 if (isBFloat16Type(TypeDef)) {
2411 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic))
2413 "Relational instructions with bfloat16 arguments require the "
2414 "following SPIR-V extension: SPV_INTEL_bfloat16_arithmetic",
2416 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic);
2417 Reqs.
addCapability(SPIRV::Capability::BFloat16ArithmeticINTEL);
2421 case SPIRV::OpDPdxCoarse:
2422 case SPIRV::OpDPdyCoarse:
2423 case SPIRV::OpDPdxFine:
2424 case SPIRV::OpDPdyFine: {
2428 case SPIRV::OpLoopControlINTEL: {
2429 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_unstructured_loop_controls);
2430 Reqs.
addCapability(SPIRV::Capability::UnstructuredLoopControlsINTEL);
2442 SPIRV::Capability::Shader);
2454 addInstrRequirements(
MI, MAI, ST);
2457 auto Node = M.getNamedMetadata(
"spirv.ExecutionMode");
2459 bool RequireFloatControls =
false, RequireIntelFloatControls2 =
false,
2460 RequireKHRFloatControls2 =
false,
2462 bool HasIntelFloatControls2 =
2463 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
2464 bool HasKHRFloatControls2 =
2465 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2466 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2472 auto EM =
Const->getZExtValue();
2476 case SPIRV::ExecutionMode::DenormPreserve:
2477 case SPIRV::ExecutionMode::DenormFlushToZero:
2478 case SPIRV::ExecutionMode::RoundingModeRTE:
2479 case SPIRV::ExecutionMode::RoundingModeRTZ:
2480 RequireFloatControls = VerLower14;
2482 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2484 case SPIRV::ExecutionMode::RoundingModeRTPINTEL:
2485 case SPIRV::ExecutionMode::RoundingModeRTNINTEL:
2486 case SPIRV::ExecutionMode::FloatingPointModeALTINTEL:
2487 case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL:
2488 if (HasIntelFloatControls2) {
2489 RequireIntelFloatControls2 =
true;
2491 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2494 case SPIRV::ExecutionMode::FPFastMathDefault: {
2495 if (HasKHRFloatControls2) {
2496 RequireKHRFloatControls2 =
true;
2498 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2502 case SPIRV::ExecutionMode::ContractionOff:
2503 case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
2504 if (HasKHRFloatControls2) {
2505 RequireKHRFloatControls2 =
true;
2507 SPIRV::OperandCategory::ExecutionModeOperand,
2508 SPIRV::ExecutionMode::FPFastMathDefault, ST);
2511 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2516 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2521 if (RequireFloatControls &&
2522 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls))
2524 if (RequireIntelFloatControls2)
2526 if (RequireKHRFloatControls2)
2530 if (
F.isDeclaration())
2532 if (
F.getMetadata(
"reqd_work_group_size"))
2534 SPIRV::OperandCategory::ExecutionModeOperand,
2535 SPIRV::ExecutionMode::LocalSize, ST);
2536 if (
F.getFnAttribute(
"hlsl.numthreads").isValid()) {
2538 SPIRV::OperandCategory::ExecutionModeOperand,
2539 SPIRV::ExecutionMode::LocalSize, ST);
2541 if (
F.getFnAttribute(
"enable-maximal-reconvergence").getValueAsBool()) {
2544 if (
F.getMetadata(
"work_group_size_hint"))
2546 SPIRV::OperandCategory::ExecutionModeOperand,
2547 SPIRV::ExecutionMode::LocalSizeHint, ST);
2548 if (
F.getMetadata(
"intel_reqd_sub_group_size"))
2550 SPIRV::OperandCategory::ExecutionModeOperand,
2551 SPIRV::ExecutionMode::SubgroupSize, ST);
2552 if (
F.getMetadata(
"max_work_group_size"))
2554 SPIRV::OperandCategory::ExecutionModeOperand,
2555 SPIRV::ExecutionMode::MaxWorkgroupSizeINTEL, ST);
2556 if (
F.getMetadata(
"vec_type_hint"))
2558 SPIRV::OperandCategory::ExecutionModeOperand,
2559 SPIRV::ExecutionMode::VecTypeHint, ST);
2561 if (
F.hasOptNone()) {
2562 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
2565 }
else if (
ST.canUseExtension(SPIRV::Extension::SPV_EXT_optnone)) {
2575 unsigned Flags = SPIRV::FPFastMathMode::None;
2576 bool CanUseKHRFloatControls2 =
2577 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2579 Flags |= SPIRV::FPFastMathMode::NotNaN;
2581 Flags |= SPIRV::FPFastMathMode::NotInf;
2583 Flags |= SPIRV::FPFastMathMode::NSZ;
2585 Flags |= SPIRV::FPFastMathMode::AllowRecip;
2587 Flags |= SPIRV::FPFastMathMode::AllowContract;
2589 if (CanUseKHRFloatControls2)
2597 Flags |= SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2598 SPIRV::FPFastMathMode::NSZ | SPIRV::FPFastMathMode::AllowRecip |
2599 SPIRV::FPFastMathMode::AllowTransform |
2600 SPIRV::FPFastMathMode::AllowReassoc |
2601 SPIRV::FPFastMathMode::AllowContract;
2603 Flags |= SPIRV::FPFastMathMode::Fast;
2606 if (CanUseKHRFloatControls2) {
2608 assert(!(Flags & SPIRV::FPFastMathMode::Fast) &&
2609 "SPIRV::FPFastMathMode::Fast is deprecated and should not be used "
2614 assert((!(Flags & SPIRV::FPFastMathMode::AllowTransform) ||
2615 ((Flags & SPIRV::FPFastMathMode::AllowReassoc &&
2616 Flags & SPIRV::FPFastMathMode::AllowContract))) &&
2617 "SPIRV::FPFastMathMode::AllowTransform requires AllowReassoc and "
2618 "AllowContract flags to be enabled as well.");
2629 return ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2632static void handleMIFlagDecoration(
2637 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
2638 SPIRV::Decoration::NoSignedWrap, ST, Reqs)
2641 SPIRV::Decoration::NoSignedWrap, {});
2644 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
2645 SPIRV::Decoration::NoUnsignedWrap, ST,
2649 SPIRV::Decoration::NoUnsignedWrap, {});
2651 if (!
TII.canUseFastMathFlags(
2652 I,
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)))
2655 unsigned FMFlags = getFastMathFlags(
I, ST);
2656 if (FMFlags == SPIRV::FPFastMathMode::None) {
2659 if (FPFastMathDefaultInfoVec.
empty())
2675 assert(
I.getNumOperands() >= 3 &&
"Expected at least 3 operands");
2676 Register ResReg =
I.getOpcode() == SPIRV::OpExtInst
2677 ?
I.getOperand(1).getReg()
2678 :
I.getOperand(2).getReg();
2686 if (Ty == Elem.Ty) {
2687 FMFlags = Elem.FastMathFlags;
2688 Emit = Elem.ContractionOff || Elem.SignedZeroInfNanPreserve ||
2689 Elem.FPFastMathDefault;
2694 if (FMFlags == SPIRV::FPFastMathMode::None && !Emit)
2697 if (isFastMathModeAvailable(ST)) {
2698 Register DstReg =
I.getOperand(0).getReg();
2714 for (
auto &
MBB : *MF)
2715 for (
auto &
MI :
MBB)
2716 handleMIFlagDecoration(
MI, ST,
TII, MAI.
Reqs, GR,
2733 for (
auto &
MBB : *MF) {
2734 if (!
MBB.hasName() ||
MBB.empty())
2753 for (
auto &
MBB : *MF) {
2755 MI.setDesc(
TII.get(SPIRV::OpPhi));
2758 MI.insert(
MI.operands_begin() + 1,
2759 {MachineOperand::CreateReg(ResTypeReg, false)});
2778 SPIRV::FPFastMathMode::None);
2780 SPIRV::FPFastMathMode::None);
2782 SPIRV::FPFastMathMode::None);
2789 size_t BitWidth = Ty->getScalarSizeInBits();
2793 assert(Index >= 0 && Index < 3 &&
2794 "Expected FPFastMathDefaultInfo for half, float, or double");
2795 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2796 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2797 return FPFastMathDefaultInfoVec[Index];
2800static void collectFPFastMathDefaults(
const Module &M,
2803 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2812 auto Node = M.getNamedMetadata(
"spirv.ExecutionMode");
2816 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2825 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2827 "Expected 4 operands for FPFastMathDefault");
2838 Info.FastMathFlags = Flags;
2839 Info.FPFastMathDefault =
true;
2840 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2842 "Expected no operands for ContractionOff");
2849 Info.ContractionOff =
true;
2851 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2853 "Expected 1 operand for SignedZeroInfNanPreserve");
2854 unsigned TargetWidth =
2863 assert(Index >= 0 && Index < 3 &&
2864 "Expected FPFastMathDefaultInfo for half, float, or double");
2865 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2866 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2867 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve =
true;
2878 SPIRVTargetMachine &TM =
2882 TII = ST->getInstrInfo();
2888 patchPhis(M, GR, *TII, MMI);
2890 addMBBNames(M, *TII, MMI, *ST,
MAI);
2891 collectFPFastMathDefaults(M,
MAI, *ST);
2892 addDecorations(M, *TII, MMI, *ST,
MAI, GR);
2894 collectReqs(M,
MAI, MMI, *ST);
2898 collectReqs(M,
MAI, MMI, *ST);
2899 collectDeclarations(M);
2902 numberRegistersGlobally(M);
2905 processOtherInstrs(M);
2909 MAI.Reqs.addCapability(SPIRV::Capability::Linkage);
2912 GR->setBound(
MAI.MaxID);
MachineInstrBuilder & UseMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
ReachingDefInfo InstSet & ToRemove
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
static Register UseReg(const MachineOperand &MO)
const HexagonInstrInfo * TII
Promote Memory to Register
MachineInstr unsigned OpIdx
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
static SPIRV::FPFastMathDefaultInfoVector & getOrCreateFPFastMathDefaultInfoVec(const Module &M, DenseMap< Function *, SPIRV::FPFastMathDefaultInfoVector > &FPFastMathDefaultInfoMap, Function *F)
static SPIRV::FPFastMathDefaultInfo & getFPFastMathDefaultInfo(SPIRV::FPFastMathDefaultInfoVector &FPFastMathDefaultInfoVec, const Type *Ty)
#define ATOM_FLT_REQ_EXT_MSG(ExtName)
static cl::opt< bool > SPVDumpDeps("spv-dump-deps", cl::desc("Dump MIR with SPIR-V dependencies info"), cl::Optional, cl::init(false))
static cl::list< SPIRV::Capability::Capability > AvoidCapabilities("avoid-spirv-capabilities", cl::desc("SPIR-V capabilities to avoid if there are " "other options enabling a feature"), cl::Hidden, cl::values(clEnumValN(SPIRV::Capability::Shader, "Shader", "SPIR-V Shader capability")))
#define SPIRV_BACKEND_SERVICE_FUN_NAME
Target-Independent Code Generator Pass Configuration Options pass.
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
bool isValid() const
Return true if the attribute is any kind of attribute.
This is the shared class of boolean and integer constants.
This is an important base class in LLVM.
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Wrapper class representing physical registers. Should be passed by value.
constexpr bool isValid() const
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineFunctionProperties & getProperties() const
Get the function properties.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
This class contains meta information specific to a module.
LLVM_ABI MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
LLVM_ABI void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr) const
Print the MachineOperand to os.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
iterator_range< reg_instr_iterator > reg_instructions(Register Reg) const
iterator_range< use_instr_iterator > use_instructions(Register Reg) const
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
AnalysisType & getAnalysis() const
getAnalysis<AnalysisType>() - This function is used by subclasses to get to the analysis information ...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
unsigned getScalarOrVectorBitWidth(SPIRVTypeInst Type) const
const Type * getTypeForSPIRVType(SPIRVTypeInst Ty) const
Register getSPIRVTypeID(SPIRVTypeInst SpirvType) const
SPIRVTypeInst getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
bool isConstantInstr(const MachineInstr &MI) const
const SPIRVInstrInfo * getInstrInfo() const override
SPIRVGlobalRegistry * getSPIRVGlobalRegistry() const
const SPIRVSubtarget * getSubtargetImpl() const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
bool contains(const T &V) const
Check if the SmallSet contains the given element.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
reference emplace_back(ArgTypes &&... Args)
void append(ItTy in_start, ItTy in_end)
Add the specified range to the end of the SmallVector.
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVectorTy() const
True if this is an instance of VectorType.
static LLVM_ABI Type * getDoubleTy(LLVMContext &C)
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Represents a version number in the form major[.minor[.subminor[.build]]].
bool empty() const
Determine whether this version information is empty (e.g., all version components are zero).
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
SmallVector< const MachineInstr * > InstrList
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
std::enable_if_t< detail::IsValidPointer< X, Y >::value, X * > extract(Y &&MD)
Extract a Value from Metadata.
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
FunctionAddr VTableAddr Value
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
hash_code hash_value(const FixedPointSemantics &Val)
ExtensionList getSymbolicOperandExtensions(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
CapabilityList getSymbolicOperandCapabilities(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
SmallVector< SPIRV::Extension::Extension, 8 > ExtensionList
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
SmallVector< size_t > InstrSignature
VersionTuple getSymbolicOperandMaxVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
CapabilityList getCapabilitiesEnabledByExtension(SPIRV::Extension::Extension Extension)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
std::string getSymbolicOperandMnemonic(SPIRV::OperandCategory::OperandCategory Category, int32_t Value)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
DWARFExpression::Operation Op
VersionTuple getSymbolicOperandMinVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
SmallVector< SPIRV::Capability::Capability, 8 > CapabilityList
std::set< InstrSignature > InstrTraces
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
std::map< SmallVector< size_t >, unsigned > InstrGRegsMap
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
SmallSet< SPIRV::Capability::Capability, 4 > S
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
SPIRV::ModuleAnalysisInfo MAI
bool runOnModule(Module &M) override
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
static size_t computeFPFastMathDefaultInfoVecIndex(size_t BitWidth)
void setSkipEmission(const MachineInstr *MI)
MCRegister getRegisterAlias(const MachineFunction *MF, Register Reg)
MCRegister getOrCreateMBBRegister(const MachineBasicBlock &MBB)
InstrList MS[NUM_MODULE_SECTIONS]
AddressingModel::AddressingModel Addr
void setRegisterAlias(const MachineFunction *MF, Register Reg, MCRegister AliasReg)
DenseMap< const Function *, SPIRV::FPFastMathDefaultInfoVector > FPFastMathDefaultInfoMap
void addCapabilities(const CapabilityList &ToAdd)
bool isCapabilityAvailable(Capability::Capability Cap) const
void checkSatisfiable(const SPIRVSubtarget &ST) const
void getAndAddRequirements(SPIRV::OperandCategory::OperandCategory Category, uint32_t i, const SPIRVSubtarget &ST)
void addExtension(Extension::Extension ToAdd)
void initAvailableCapabilities(const SPIRVSubtarget &ST)
void removeCapabilityIf(const Capability::Capability ToRemove, const Capability::Capability IfPresent)
void addCapability(Capability::Capability ToAdd)
void addAvailableCaps(const CapabilityList &ToAdd)
void addRequirements(const Requirements &Req)
const std::optional< Capability::Capability > Cap
const VersionTuple MinVer
const VersionTuple MaxVer