59 const auto &
Op = MdNode->getOperand(
OpIndex);
66getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
72 AvoidCaps.
S.
insert(SPIRV::Capability::Shader);
74 AvoidCaps.
S.
insert(SPIRV::Capability::Kernel);
79 bool MinVerOK = SPIRVVersion.
empty() || SPIRVVersion >= ReqMinVer;
81 ReqMaxVer.
empty() || SPIRVVersion.
empty() || SPIRVVersion <= ReqMaxVer;
84 if (ReqCaps.
empty()) {
85 if (ReqExts.
empty()) {
86 if (MinVerOK && MaxVerOK)
87 return {
true, {}, {}, ReqMinVer, ReqMaxVer};
90 }
else if (MinVerOK && MaxVerOK) {
91 if (ReqCaps.
size() == 1) {
92 auto Cap = ReqCaps[0];
95 SPIRV::OperandCategory::CapabilityOperand, Cap));
96 return {
true, {Cap}, std::move(ReqExts), ReqMinVer, ReqMaxVer};
106 for (
auto Cap : ReqCaps)
109 for (
size_t i = 0, Sz = UseCaps.
size(); i < Sz; ++i) {
110 auto Cap = UseCaps[i];
111 if (i == Sz - 1 || !AvoidCaps.
S.
contains(Cap)) {
113 SPIRV::OperandCategory::CapabilityOperand, Cap));
114 return {
true, {Cap}, std::move(ReqExts), ReqMinVer, ReqMaxVer};
122 if (
llvm::all_of(ReqExts, [&ST](
const SPIRV::Extension::Extension &Ext) {
123 return ST.canUseExtension(Ext);
134void SPIRVModuleAnalysis::setBaseInfo(
const Module &M) {
138 MAI.RegisterAliasTable.clear();
139 MAI.InstrsToDelete.clear();
141 MAI.GlobalVarList.clear();
142 MAI.ExtInstSetMap.clear();
144 MAI.Reqs.initAvailableCapabilities(*ST);
147 if (
auto MemModel =
M.getNamedMetadata(
"spirv.MemoryModel")) {
148 auto MemMD = MemModel->getOperand(0);
149 MAI.Addr =
static_cast<SPIRV::AddressingModel::AddressingModel
>(
150 getMetadataUInt(MemMD, 0));
152 static_cast<SPIRV::MemoryModel::MemoryModel
>(getMetadataUInt(MemMD, 1));
155 MAI.Mem = ST->isShader() ? SPIRV::MemoryModel::GLSL450
156 : SPIRV::MemoryModel::OpenCL;
157 if (
MAI.Mem == SPIRV::MemoryModel::OpenCL) {
158 unsigned PtrSize = ST->getPointerSize();
159 MAI.Addr = PtrSize == 32 ? SPIRV::AddressingModel::Physical32
160 : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
161 : SPIRV::AddressingModel::Logical;
164 MAI.Addr = SPIRV::AddressingModel::Logical;
169 if (
auto VerNode =
M.getNamedMetadata(
"opencl.ocl.version")) {
170 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
173 assert(VerNode->getNumOperands() > 0 &&
"Invalid SPIR");
174 auto VersionMD = VerNode->getOperand(0);
175 unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
176 unsigned MinorNum = getMetadataUInt(VersionMD, 1);
177 unsigned RevNum = getMetadataUInt(VersionMD, 2);
180 (std::max(1U, MajorNum) * 100 + MinorNum) * 1000 + RevNum;
186 if (!ST->isShader()) {
187 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_CPP;
188 MAI.SrcLangVersion = 100000;
190 MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
191 MAI.SrcLangVersion = 0;
195 if (
auto ExtNode =
M.getNamedMetadata(
"opencl.used.extensions")) {
196 for (
unsigned I = 0,
E = ExtNode->getNumOperands();
I !=
E; ++
I) {
197 MDNode *MD = ExtNode->getOperand(
I);
207 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand,
209 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::SourceLanguageOperand,
211 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
214 if (!ST->isShader()) {
216 MAI.ExtInstSetMap[
static_cast<unsigned>(
217 SPIRV::InstructionSet::OpenCL_std)] =
MAI.getNextIDRegister();
228 if (
UseMI.getOpcode() != SPIRV::OpDecorate &&
229 UseMI.getOpcode() != SPIRV::OpMemberDecorate)
232 for (
unsigned I = 0;
I <
UseMI.getNumOperands(); ++
I) {
250 for (
unsigned i = 0; i <
MI.getNumOperands(); ++i) {
259 unsigned Opcode =
MI.getOpcode();
260 if ((Opcode == SPIRV::OpDecorate) && i >= 2) {
261 unsigned DecorationID =
MI.getOperand(1).getImm();
262 if (DecorationID != SPIRV::Decoration::UserSemantic &&
263 DecorationID != SPIRV::Decoration::CacheControlLoadINTEL &&
264 DecorationID != SPIRV::Decoration::CacheControlStoreINTEL)
270 if (!UseDefReg && MO.
isDef()) {
278 dbgs() <<
"Unexpectedly, no global id found for the operand ";
280 dbgs() <<
"\nInstruction: ";
299 appendDecorationsForReg(
MI.getMF()->getRegInfo(), DefReg, Signature);
306 unsigned Opcode =
MI.getOpcode();
308 case SPIRV::OpTypeForwardPointer:
311 case SPIRV::OpVariable:
312 return static_cast<SPIRV::StorageClass::StorageClass
>(
313 MI.getOperand(2).
getImm()) != SPIRV::StorageClass::Function;
314 case SPIRV::OpFunction:
315 case SPIRV::OpFunctionParameter:
318 if (GR->hasConstFunPtr() && Opcode == SPIRV::OpUndef) {
320 for (MachineInstr &
UseMI :
MRI.use_instructions(DefReg)) {
321 if (
UseMI.getOpcode() != SPIRV::OpConstantFunctionPointerINTEL)
327 MAI.setSkipEmission(&
MI);
331 return TII->isTypeDeclInstr(
MI) || TII->isConstantInstr(
MI) ||
332 TII->isInlineAsmDefInstr(
MI);
338void SPIRVModuleAnalysis::visitFunPtrUse(
340 std::map<const Value *, unsigned> &GlobalToGReg,
const MachineFunction *MF,
342 const MachineOperand *OpFunDef =
343 GR->getFunctionDefinitionByUse(&
MI.getOperand(2));
346 const MachineInstr *OpDefMI = OpFunDef->
getParent();
349 const MachineRegisterInfo &FunDefMRI = FunDefMF->
getRegInfo();
351 visitDecl(FunDefMRI, SignatureToGReg, GlobalToGReg, FunDefMF, *OpDefMI);
353 }
while (OpDefMI && (OpDefMI->
getOpcode() == SPIRV::OpFunction ||
354 OpDefMI->
getOpcode() == SPIRV::OpFunctionParameter));
356 MCRegister GlobalFunDefReg =
357 MAI.getRegisterAlias(FunDefMF, OpFunDef->
getReg());
359 "Function definition must refer to a global register");
360 MAI.setRegisterAlias(MF, OpReg, GlobalFunDefReg);
365void SPIRVModuleAnalysis::visitDecl(
367 std::map<const Value *, unsigned> &GlobalToGReg,
const MachineFunction *MF,
369 unsigned Opcode =
MI.getOpcode();
372 for (
const MachineOperand &MO :
MI.operands()) {
377 if (Opcode == SPIRV::OpConstantFunctionPointerINTEL &&
378 MRI.getRegClass(OpReg) == &SPIRV::pIDRegClass) {
379 visitFunPtrUse(OpReg, SignatureToGReg, GlobalToGReg, MF,
MI);
383 if (
MAI.hasRegisterAlias(MF, MO.
getReg()))
386 if (
const MachineInstr *OpDefMI =
MRI.getUniqueVRegDef(OpReg)) {
387 if (isDeclSection(
MRI, *OpDefMI))
388 visitDecl(
MRI, SignatureToGReg, GlobalToGReg, MF, *OpDefMI);
394 dbgs() <<
"Unexpectedly, no unique definition for the operand ";
396 dbgs() <<
"\nInstruction: ";
401 "No unique definition is found for the virtual register");
405 bool IsFunDef =
false;
406 if (TII->isSpecConstantInstr(
MI)) {
407 GReg =
MAI.getNextIDRegister();
409 }
else if (Opcode == SPIRV::OpFunction ||
410 Opcode == SPIRV::OpFunctionParameter) {
411 GReg = handleFunctionOrParameter(MF,
MI, GlobalToGReg, IsFunDef);
412 }
else if (Opcode == SPIRV::OpTypeStruct ||
413 Opcode == SPIRV::OpConstantComposite) {
414 GReg = handleTypeDeclOrConstant(
MI, SignatureToGReg);
415 const MachineInstr *NextInstr =
MI.getNextNode();
417 ((Opcode == SPIRV::OpTypeStruct &&
418 NextInstr->
getOpcode() == SPIRV::OpTypeStructContinuedINTEL) ||
419 (Opcode == SPIRV::OpConstantComposite &&
421 SPIRV::OpConstantCompositeContinuedINTEL))) {
422 MCRegister Tmp = handleTypeDeclOrConstant(*NextInstr, SignatureToGReg);
424 MAI.setSkipEmission(NextInstr);
427 }
else if (TII->isTypeDeclInstr(
MI) || TII->isConstantInstr(
MI) ||
428 TII->isInlineAsmDefInstr(
MI)) {
429 GReg = handleTypeDeclOrConstant(
MI, SignatureToGReg);
430 }
else if (Opcode == SPIRV::OpVariable) {
431 GReg = handleVariable(MF,
MI, GlobalToGReg);
434 dbgs() <<
"\nInstruction: ";
440 MAI.setRegisterAlias(MF,
MI.getOperand(0).getReg(), GReg);
442 MAI.setSkipEmission(&
MI);
445MCRegister SPIRVModuleAnalysis::handleFunctionOrParameter(
447 std::map<const Value *, unsigned> &GlobalToGReg,
bool &IsFunDef) {
448 const Value *GObj = GR->getGlobalObject(MF,
MI.getOperand(0).getReg());
449 assert(GObj &&
"Unregistered global definition");
453 assert(
F &&
"Expected a reference to a function or an argument");
454 IsFunDef = !
F->isDeclaration();
455 auto [It,
Inserted] = GlobalToGReg.try_emplace(GObj);
458 MCRegister GReg =
MAI.getNextIDRegister();
466SPIRVModuleAnalysis::handleTypeDeclOrConstant(
const MachineInstr &
MI,
469 auto [It,
Inserted] = SignatureToGReg.try_emplace(MISign);
472 MCRegister GReg =
MAI.getNextIDRegister();
478MCRegister SPIRVModuleAnalysis::handleVariable(
480 std::map<const Value *, unsigned> &GlobalToGReg) {
481 MAI.GlobalVarList.push_back(&
MI);
482 const Value *GObj = GR->getGlobalObject(MF,
MI.getOperand(0).getReg());
483 assert(GObj &&
"Unregistered global definition");
484 auto [It,
Inserted] = GlobalToGReg.try_emplace(GObj);
487 MCRegister GReg =
MAI.getNextIDRegister();
493void SPIRVModuleAnalysis::collectDeclarations(
const Module &M) {
495 std::map<const Value *, unsigned> GlobalToGReg;
496 for (
auto F =
M.begin(),
E =
M.end();
F !=
E; ++
F) {
497 MachineFunction *MF = MMI->getMachineFunction(*
F);
501 unsigned PastHeader = 0;
502 for (MachineBasicBlock &
MBB : *MF) {
503 for (MachineInstr &
MI :
MBB) {
504 if (
MI.getNumOperands() == 0)
506 unsigned Opcode =
MI.getOpcode();
507 if (Opcode == SPIRV::OpFunction) {
508 if (PastHeader == 0) {
512 }
else if (Opcode == SPIRV::OpFunctionParameter) {
515 }
else if (PastHeader > 0) {
519 const MachineOperand &DefMO =
MI.getOperand(0);
521 case SPIRV::OpExtension:
522 MAI.Reqs.addExtension(SPIRV::Extension::Extension(DefMO.
getImm()));
523 MAI.setSkipEmission(&
MI);
525 case SPIRV::OpCapability:
526 MAI.Reqs.addCapability(SPIRV::Capability::Capability(DefMO.
getImm()));
527 MAI.setSkipEmission(&
MI);
532 if (DefMO.
isReg() && isDeclSection(
MRI,
MI) &&
533 !
MAI.hasRegisterAlias(MF, DefMO.
getReg()))
534 visitDecl(
MRI, SignatureToGReg, GlobalToGReg, MF,
MI);
547 if (
MI.getOpcode() == SPIRV::OpDecorate) {
549 auto Dec =
MI.getOperand(1).getImm();
550 if (Dec == SPIRV::Decoration::LinkageAttributes) {
551 auto Lnk =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
552 if (Lnk == SPIRV::LinkageType::Import) {
557 MAI.FuncMap[ImportedFunc] =
MAI.getRegisterAlias(
MI.getMF(), Target);
560 }
else if (
MI.getOpcode() == SPIRV::OpFunction) {
563 MCRegister GlobalReg =
MAI.getRegisterAlias(
MI.getMF(),
Reg);
565 MAI.FuncMap[
F] = GlobalReg;
574 bool Append =
true) {
577 auto FoundMI = IS.insert(std::move(MISign));
578 if (!FoundMI.second) {
579 if (
MI.getOpcode() == SPIRV::OpDecorate) {
581 "Decoration instructions must have at least 2 operands");
583 "Only OpDecorate instructions can be duplicates");
588 if (
MI.getOperand(1).getImm() != SPIRV::Decoration::FPFastMathMode)
593 if (instrToSignature(*OrigMI, MAI,
true) == MISign) {
594 assert(OrigMI->getNumOperands() ==
MI.getNumOperands() &&
595 "Original instruction must have the same number of operands");
597 OrigMI->getNumOperands() == 3 &&
598 "FPFastMathMode decoration must have 3 operands for OpDecorate");
599 unsigned OrigFlags = OrigMI->getOperand(2).getImm();
600 unsigned NewFlags =
MI.getOperand(2).getImm();
601 if (OrigFlags == NewFlags)
605 unsigned FinalFlags = OrigFlags | NewFlags;
607 <<
"Warning: Conflicting FPFastMathMode decoration flags "
609 << *OrigMI <<
"Original flags: " << OrigFlags
610 <<
", new flags: " << NewFlags
611 <<
". They will be merged on a best effort basis, but not "
612 "validated. Final flags: "
613 << FinalFlags <<
"\n";
620 assert(
false &&
"No original instruction found for the duplicate "
621 "OpDecorate, but we found one in IS.");
634void SPIRVModuleAnalysis::processOtherInstrs(
const Module &M) {
636 for (
auto F =
M.begin(),
E =
M.end();
F !=
E; ++
F) {
637 if (
F->isDeclaration())
639 MachineFunction *MF = MMI->getMachineFunction(*
F);
642 for (MachineBasicBlock &
MBB : *MF)
643 for (MachineInstr &
MI :
MBB) {
644 if (
MAI.getSkipEmission(&
MI))
646 const unsigned OpCode =
MI.getOpcode();
647 if (OpCode == SPIRV::OpString) {
649 }
else if (OpCode == SPIRV::OpExtInst &&
MI.getOperand(2).isImm() &&
650 MI.getOperand(2).getImm() ==
651 SPIRV::InstructionSet::
652 NonSemantic_Shader_DebugInfo_100) {
653 MachineOperand
Ins =
MI.getOperand(3);
654 namespace NS = SPIRV::NonSemanticExtInst;
655 static constexpr int64_t GlobalNonSemanticDITy[] = {
656 NS::DebugSource, NS::DebugCompilationUnit, NS::DebugInfoNone,
657 NS::DebugTypeBasic, NS::DebugTypePointer};
658 bool IsGlobalDI =
false;
659 for (
unsigned Idx = 0; Idx < std::size(GlobalNonSemanticDITy); ++Idx)
660 IsGlobalDI |=
Ins.getImm() == GlobalNonSemanticDITy[Idx];
663 }
else if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
665 }
else if (OpCode == SPIRV::OpEntryPoint) {
667 }
else if (TII->isAliasingInstr(
MI)) {
669 }
else if (TII->isDecorationInstr(
MI)) {
671 collectFuncNames(
MI, &*
F);
672 }
else if (TII->isConstantInstr(
MI)) {
676 }
else if (OpCode == SPIRV::OpFunction) {
677 collectFuncNames(
MI, &*
F);
678 }
else if (OpCode == SPIRV::OpTypeForwardPointer) {
688void SPIRVModuleAnalysis::numberRegistersGlobally(
const Module &M) {
689 for (
auto F =
M.begin(),
E =
M.end();
F !=
E; ++
F) {
690 if ((*F).isDeclaration())
692 MachineFunction *MF = MMI->getMachineFunction(*
F);
694 for (MachineBasicBlock &
MBB : *MF) {
695 for (MachineInstr &
MI :
MBB) {
696 for (MachineOperand &
Op :
MI.operands()) {
700 if (
MAI.hasRegisterAlias(MF,
Reg))
702 MCRegister NewReg =
MAI.getNextIDRegister();
703 MAI.setRegisterAlias(MF,
Reg, NewReg);
705 if (
MI.getOpcode() != SPIRV::OpExtInst)
707 auto Set =
MI.getOperand(2).getImm();
708 auto [It,
Inserted] =
MAI.ExtInstSetMap.try_emplace(Set);
710 It->second =
MAI.getNextIDRegister();
718 SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
720 addRequirements(getSymbolicOperandRequirements(Category, i, ST, *
this));
723void SPIRV::RequirementHandler::recursiveAddCapabilities(
725 for (
const auto &Cap : ToPrune) {
729 recursiveAddCapabilities(ImplicitDecls);
734 for (
const auto &Cap : ToAdd) {
735 bool IsNewlyInserted = AllCaps.insert(Cap).second;
736 if (!IsNewlyInserted)
740 recursiveAddCapabilities(ImplicitDecls);
741 MinimalCaps.push_back(Cap);
746 const SPIRV::Requirements &Req) {
750 if (Req.
Cap.has_value())
751 addCapabilities({Req.
Cap.value()});
753 addExtensions(Req.
Exts);
756 if (!MaxVersion.empty() && Req.
MinVer > MaxVersion) {
758 <<
" and <= " << MaxVersion <<
"\n");
762 if (MinVersion.empty() || Req.
MinVer > MinVersion)
767 if (!MinVersion.empty() && Req.
MaxVer < MinVersion) {
769 <<
" and >= " << MinVersion <<
"\n");
773 if (MaxVersion.empty() || Req.
MaxVer < MaxVersion)
779 const SPIRVSubtarget &ST)
const {
781 bool IsSatisfiable =
true;
782 auto TargetVer =
ST.getSPIRVVersion();
784 if (!MaxVersion.empty() && !TargetVer.empty() && MaxVersion < TargetVer) {
786 dbgs() <<
"Target SPIR-V version too high for required features\n"
787 <<
"Required max version: " << MaxVersion <<
" target version "
788 << TargetVer <<
"\n");
789 IsSatisfiable =
false;
792 if (!MinVersion.empty() && !TargetVer.empty() && MinVersion > TargetVer) {
793 LLVM_DEBUG(
dbgs() <<
"Target SPIR-V version too low for required features\n"
794 <<
"Required min version: " << MinVersion
795 <<
" target version " << TargetVer <<
"\n");
796 IsSatisfiable =
false;
799 if (!MinVersion.empty() && !MaxVersion.empty() && MinVersion > MaxVersion) {
802 <<
"Version is too low for some features and too high for others.\n"
803 <<
"Required SPIR-V min version: " << MinVersion
804 <<
" required SPIR-V max version " << MaxVersion <<
"\n");
805 IsSatisfiable =
false;
808 AvoidCapabilitiesSet AvoidCaps;
810 AvoidCaps.
S.
insert(SPIRV::Capability::Shader);
812 AvoidCaps.
S.
insert(SPIRV::Capability::Kernel);
814 for (
auto Cap : MinimalCaps) {
815 if (AvailableCaps.contains(Cap) && !AvoidCaps.
S.
contains(Cap))
819 OperandCategory::CapabilityOperand, Cap)
821 IsSatisfiable =
false;
824 for (
auto Ext : AllExtensions) {
825 if (
ST.canUseExtension(Ext))
829 OperandCategory::ExtensionOperand, Ext)
831 IsSatisfiable =
false;
840 for (
const auto Cap : ToAdd)
841 if (AvailableCaps.insert(Cap).second)
843 SPIRV::OperandCategory::CapabilityOperand, Cap));
847 const Capability::Capability
ToRemove,
848 const Capability::Capability IfPresent) {
849 if (AllCaps.contains(IfPresent))
857 addAvailableCaps({Capability::Shader, Capability::Linkage, Capability::Int8,
860 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 3)))
862 Capability::GroupNonUniformVote,
863 Capability::GroupNonUniformArithmetic,
864 Capability::GroupNonUniformBallot,
865 Capability::GroupNonUniformClustered,
866 Capability::GroupNonUniformShuffle,
867 Capability::GroupNonUniformShuffleRelative});
869 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
871 Capability::DotProductInput4x8Bit,
872 Capability::DotProductInput4x8BitPacked,
873 Capability::DemoteToHelperInvocation});
876 for (
auto Extension :
ST.getAllAvailableExtensions()) {
882 if (!
ST.isShader()) {
883 initAvailableCapabilitiesForOpenCL(ST);
888 initAvailableCapabilitiesForVulkan(ST);
895void RequirementHandler::initAvailableCapabilitiesForOpenCL(
896 const SPIRVSubtarget &ST) {
899 Capability::Kernel, Capability::Vector16,
900 Capability::Groups, Capability::GenericPointer,
901 Capability::StorageImageWriteWithoutFormat,
902 Capability::StorageImageReadWithoutFormat});
903 if (
ST.hasOpenCLFullProfile())
905 if (
ST.hasOpenCLImageSupport()) {
907 Capability::Image1D, Capability::SampledBuffer,
908 Capability::ImageBuffer});
909 if (
ST.isAtLeastOpenCLVer(VersionTuple(2, 0)))
912 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 1)) &&
913 ST.isAtLeastOpenCLVer(VersionTuple(2, 2)))
915 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 4)))
916 addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
917 Capability::SignedZeroInfNanPreserve,
918 Capability::RoundingModeRTE,
919 Capability::RoundingModeRTZ});
926void RequirementHandler::initAvailableCapabilitiesForVulkan(
927 const SPIRVSubtarget &ST) {
930 addAvailableCaps({Capability::Int64, Capability::Float16, Capability::Float64,
931 Capability::GroupNonUniform, Capability::Image1D,
932 Capability::SampledBuffer, Capability::ImageBuffer,
933 Capability::UniformBufferArrayDynamicIndexing,
934 Capability::SampledImageArrayDynamicIndexing,
935 Capability::StorageBufferArrayDynamicIndexing,
936 Capability::StorageImageArrayDynamicIndexing});
939 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 5))) {
941 {Capability::ShaderNonUniformEXT, Capability::RuntimeDescriptorArrayEXT,
942 Capability::InputAttachmentArrayDynamicIndexingEXT,
943 Capability::UniformTexelBufferArrayDynamicIndexingEXT,
944 Capability::StorageTexelBufferArrayDynamicIndexingEXT,
945 Capability::UniformBufferArrayNonUniformIndexingEXT,
946 Capability::SampledImageArrayNonUniformIndexingEXT,
947 Capability::StorageBufferArrayNonUniformIndexingEXT,
948 Capability::StorageImageArrayNonUniformIndexingEXT,
949 Capability::InputAttachmentArrayNonUniformIndexingEXT,
950 Capability::UniformTexelBufferArrayNonUniformIndexingEXT,
951 Capability::StorageTexelBufferArrayNonUniformIndexingEXT});
955 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
957 Capability::StorageImageReadWithoutFormat});
965static void addOpDecorateReqs(
const MachineInstr &
MI,
unsigned DecIndex,
968 int64_t DecOp =
MI.getOperand(DecIndex).getImm();
969 auto Dec =
static_cast<SPIRV::Decoration::Decoration
>(DecOp);
971 SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
973 if (Dec == SPIRV::Decoration::BuiltIn) {
974 int64_t BuiltInOp =
MI.getOperand(DecIndex + 1).getImm();
975 auto BuiltIn =
static_cast<SPIRV::BuiltIn::BuiltIn
>(BuiltInOp);
977 SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
978 }
else if (Dec == SPIRV::Decoration::LinkageAttributes) {
979 int64_t LinkageOp =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
980 SPIRV::LinkageType::LinkageType LnkType =
981 static_cast<SPIRV::LinkageType::LinkageType
>(LinkageOp);
982 if (LnkType == SPIRV::LinkageType::LinkOnceODR)
983 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_linkonce_odr);
984 }
else if (Dec == SPIRV::Decoration::CacheControlLoadINTEL ||
985 Dec == SPIRV::Decoration::CacheControlStoreINTEL) {
986 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_cache_controls);
987 }
else if (Dec == SPIRV::Decoration::HostAccessINTEL) {
988 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_global_variable_host_access);
989 }
else if (Dec == SPIRV::Decoration::InitModeINTEL ||
990 Dec == SPIRV::Decoration::ImplementInRegisterMapINTEL) {
992 SPIRV::Extension::SPV_INTEL_global_variable_fpga_decorations);
993 }
else if (Dec == SPIRV::Decoration::NonUniformEXT) {
995 }
else if (Dec == SPIRV::Decoration::FPMaxErrorDecorationINTEL) {
997 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
998 }
else if (Dec == SPIRV::Decoration::FPFastMathMode) {
999 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) {
1001 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_float_controls2);
1010 assert(
MI.getNumOperands() >= 8 &&
"Insufficient operands for OpTypeImage");
1013 int64_t ImgFormatOp =
MI.getOperand(7).getImm();
1014 auto ImgFormat =
static_cast<SPIRV::ImageFormat::ImageFormat
>(ImgFormatOp);
1018 bool IsArrayed =
MI.getOperand(4).getImm() == 1;
1019 bool IsMultisampled =
MI.getOperand(5).getImm() == 1;
1020 bool NoSampler =
MI.getOperand(6).getImm() == 2;
1023 switch (
MI.getOperand(2).getImm()) {
1024 case SPIRV::Dim::DIM_1D:
1026 : SPIRV::Capability::Sampled1D);
1028 case SPIRV::Dim::DIM_2D:
1029 if (IsMultisampled && NoSampler)
1032 case SPIRV::Dim::DIM_Cube:
1036 : SPIRV::Capability::SampledCubeArray);
1038 case SPIRV::Dim::DIM_Rect:
1040 : SPIRV::Capability::SampledRect);
1042 case SPIRV::Dim::DIM_Buffer:
1044 : SPIRV::Capability::SampledBuffer);
1046 case SPIRV::Dim::DIM_SubpassData:
1052 if (!
ST.isShader()) {
1053 if (
MI.getNumOperands() > 8 &&
1054 MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
1062#define ATOM_FLT_REQ_EXT_MSG(ExtName) \
1063 "The atomic float instruction requires the following SPIR-V " \
1064 "extension: SPV_EXT_shader_atomic_float" ExtName
1069 "Expect register operand in atomic float instruction");
1070 Register TypeReg =
MI.getOperand(1).getReg();
1071 SPIRVType *TypeDef =
MI.getMF()->getRegInfo().getVRegDef(TypeReg);
1072 if (TypeDef->
getOpcode() != SPIRV::OpTypeFloat)
1074 "floating-point type scalar");
1077 unsigned Op =
MI.getOpcode();
1078 if (
Op == SPIRV::OpAtomicFAddEXT) {
1079 if (!
ST.canUseExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add))
1081 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add);
1084 if (!
ST.canUseExtension(
1085 SPIRV::Extension::SPV_EXT_shader_atomic_float16_add))
1087 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add);
1098 "Unexpected floating-point type width in atomic float instruction");
1101 if (!
ST.canUseExtension(
1102 SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max))
1104 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max);
1107 Reqs.
addCapability(SPIRV::Capability::AtomicFloat16MinMaxEXT);
1110 Reqs.
addCapability(SPIRV::Capability::AtomicFloat32MinMaxEXT);
1113 Reqs.
addCapability(SPIRV::Capability::AtomicFloat64MinMaxEXT);
1117 "Unexpected floating-point type width in atomic float instruction");
1123 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1127 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 1;
1131 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1135 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 2;
1139 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1143 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 1;
1147 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1151 return Dim == SPIRV::Dim::DIM_SubpassData && Sampled == 2;
1155 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1159 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 2;
1162bool isCombinedImageSampler(
MachineInstr *SampledImageInst) {
1163 if (SampledImageInst->
getOpcode() != SPIRV::OpTypeSampledImage)
1168 auto *ImageInst =
MRI.getUniqueVRegDef(ImageReg);
1169 return isSampledImage(ImageInst);
1173 for (
const auto &
MI :
MRI.reg_instructions(
Reg)) {
1174 if (
MI.getOpcode() != SPIRV::OpDecorate)
1178 if (Dec == SPIRV::Decoration::NonUniformEXT)
1196 if (
StorageClass != SPIRV::StorageClass::StorageClass::UniformConstant &&
1197 StorageClass != SPIRV::StorageClass::StorageClass::Uniform &&
1198 StorageClass != SPIRV::StorageClass::StorageClass::StorageBuffer) {
1203 hasNonUniformDecoration(
Instr.getOperand(0).getReg(),
MRI);
1205 auto FirstIndexReg =
Instr.getOperand(3).getReg();
1206 bool FirstIndexIsConstant =
1209 if (
StorageClass == SPIRV::StorageClass::StorageClass::StorageBuffer) {
1212 SPIRV::Capability::StorageBufferArrayNonUniformIndexingEXT);
1213 else if (!FirstIndexIsConstant)
1215 SPIRV::Capability::StorageBufferArrayDynamicIndexing);
1221 if (PointeeType->
getOpcode() != SPIRV::OpTypeImage &&
1222 PointeeType->
getOpcode() != SPIRV::OpTypeSampledImage &&
1223 PointeeType->
getOpcode() != SPIRV::OpTypeSampler) {
1227 if (isUniformTexelBuffer(PointeeType)) {
1230 SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
1231 else if (!FirstIndexIsConstant)
1233 SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
1234 }
else if (isInputAttachment(PointeeType)) {
1237 SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
1238 else if (!FirstIndexIsConstant)
1240 SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
1241 }
else if (isStorageTexelBuffer(PointeeType)) {
1244 SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
1245 else if (!FirstIndexIsConstant)
1247 SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
1248 }
else if (isSampledImage(PointeeType) ||
1249 isCombinedImageSampler(PointeeType) ||
1250 PointeeType->
getOpcode() == SPIRV::OpTypeSampler) {
1253 SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
1254 else if (!FirstIndexIsConstant)
1256 SPIRV::Capability::SampledImageArrayDynamicIndexing);
1257 }
else if (isStorageImage(PointeeType)) {
1260 SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
1261 else if (!FirstIndexIsConstant)
1263 SPIRV::Capability::StorageImageArrayDynamicIndexing);
1267static bool isImageTypeWithUnknownFormat(
SPIRVType *TypeInst) {
1268 if (TypeInst->
getOpcode() != SPIRV::OpTypeImage)
1277 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product))
1278 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_integer_dot_product);
1282 assert(
MI.getOperand(2).isReg() &&
"Unexpected operand in dot");
1286 assert(
Input->getOperand(1).isReg() &&
"Unexpected operand in dot input");
1290 if (TypeDef->
getOpcode() == SPIRV::OpTypeInt) {
1292 Reqs.
addCapability(SPIRV::Capability::DotProductInput4x8BitPacked);
1293 }
else if (TypeDef->
getOpcode() == SPIRV::OpTypeVector) {
1298 "Dot operand of 8-bit integer type requires 4 components");
1299 Reqs.
addCapability(SPIRV::Capability::DotProductInput4x8Bit);
1314 unsigned AddrSpace = ASOp.
getImm();
1315 if (AddrSpace != SPIRV::StorageClass::UniformConstant) {
1316 if (!
ST.canUseExtension(
1318 SPV_EXT_relaxed_printf_string_address_space)) {
1320 "required because printf uses a format string not "
1321 "in constant address space.",
1325 SPIRV::Extension::SPV_EXT_relaxed_printf_string_address_space);
1331static bool isBFloat16Type(
const SPIRVType *TypeDef) {
1333 TypeDef->
getOpcode() == SPIRV::OpTypeFloat &&
1342 switch (
MI.getOpcode()) {
1343 case SPIRV::OpMemoryModel: {
1344 int64_t Addr =
MI.getOperand(0).getImm();
1347 int64_t Mem =
MI.getOperand(1).getImm();
1352 case SPIRV::OpEntryPoint: {
1353 int64_t
Exe =
MI.getOperand(0).getImm();
1358 case SPIRV::OpExecutionMode:
1359 case SPIRV::OpExecutionModeId: {
1360 int64_t
Exe =
MI.getOperand(1).getImm();
1365 case SPIRV::OpTypeMatrix:
1368 case SPIRV::OpTypeInt: {
1369 unsigned BitWidth =
MI.getOperand(1).getImm();
1378 case SPIRV::OpDot: {
1381 if (isBFloat16Type(TypeDef))
1382 Reqs.
addCapability(SPIRV::Capability::BFloat16DotProductKHR);
1385 case SPIRV::OpTypeFloat: {
1386 unsigned BitWidth =
MI.getOperand(1).getImm();
1390 if (isBFloat16Type(&
MI)) {
1391 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_bfloat16))
1393 "following SPIR-V extension: SPV_KHR_bfloat16",
1403 case SPIRV::OpTypeVector: {
1404 unsigned NumComponents =
MI.getOperand(2).getImm();
1405 if (NumComponents == 8 || NumComponents == 16)
1409 case SPIRV::OpTypePointer: {
1410 auto SC =
MI.getOperand(1).getImm();
1421 (TypeDef->
getOpcode() == SPIRV::OpTypeFloat) &&
1426 case SPIRV::OpExtInst: {
1427 if (
MI.getOperand(2).getImm() ==
1428 static_cast<int64_t
>(
1429 SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) {
1430 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info);
1433 if (
MI.getOperand(3).getImm() ==
1434 static_cast<int64_t
>(SPIRV::OpenCLExtInst::printf)) {
1435 addPrintfRequirements(
MI, Reqs, ST);
1440 case SPIRV::OpAliasDomainDeclINTEL:
1441 case SPIRV::OpAliasScopeDeclINTEL:
1442 case SPIRV::OpAliasScopeListDeclINTEL: {
1443 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing);
1444 Reqs.
addCapability(SPIRV::Capability::MemoryAccessAliasingINTEL);
1447 case SPIRV::OpBitReverse:
1448 case SPIRV::OpBitFieldInsert:
1449 case SPIRV::OpBitFieldSExtract:
1450 case SPIRV::OpBitFieldUExtract:
1451 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1455 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_bit_instructions);
1458 case SPIRV::OpTypeRuntimeArray:
1461 case SPIRV::OpTypeOpaque:
1462 case SPIRV::OpTypeEvent:
1465 case SPIRV::OpTypePipe:
1466 case SPIRV::OpTypeReserveId:
1469 case SPIRV::OpTypeDeviceEvent:
1470 case SPIRV::OpTypeQueue:
1471 case SPIRV::OpBuildNDRange:
1474 case SPIRV::OpDecorate:
1475 case SPIRV::OpDecorateId:
1476 case SPIRV::OpDecorateString:
1477 addOpDecorateReqs(
MI, 1, Reqs, ST);
1479 case SPIRV::OpMemberDecorate:
1480 case SPIRV::OpMemberDecorateString:
1481 addOpDecorateReqs(
MI, 2, Reqs, ST);
1483 case SPIRV::OpInBoundsPtrAccessChain:
1486 case SPIRV::OpConstantSampler:
1489 case SPIRV::OpInBoundsAccessChain:
1490 case SPIRV::OpAccessChain:
1491 addOpAccessChainReqs(
MI, Reqs, ST);
1493 case SPIRV::OpTypeImage:
1494 addOpTypeImageReqs(
MI, Reqs, ST);
1496 case SPIRV::OpTypeSampler:
1497 if (!
ST.isShader()) {
1501 case SPIRV::OpTypeForwardPointer:
1505 case SPIRV::OpAtomicFlagTestAndSet:
1506 case SPIRV::OpAtomicLoad:
1507 case SPIRV::OpAtomicStore:
1508 case SPIRV::OpAtomicExchange:
1509 case SPIRV::OpAtomicCompareExchange:
1510 case SPIRV::OpAtomicIIncrement:
1511 case SPIRV::OpAtomicIDecrement:
1512 case SPIRV::OpAtomicIAdd:
1513 case SPIRV::OpAtomicISub:
1514 case SPIRV::OpAtomicUMin:
1515 case SPIRV::OpAtomicUMax:
1516 case SPIRV::OpAtomicSMin:
1517 case SPIRV::OpAtomicSMax:
1518 case SPIRV::OpAtomicAnd:
1519 case SPIRV::OpAtomicOr:
1520 case SPIRV::OpAtomicXor: {
1523 if (
MI.getOpcode() == SPIRV::OpAtomicStore) {
1525 InstrPtr =
MRI.getVRegDef(
MI.getOperand(3).getReg());
1526 assert(InstrPtr &&
"Unexpected type instruction for OpAtomicStore");
1531 if (TypeDef->
getOpcode() == SPIRV::OpTypeInt) {
1538 case SPIRV::OpGroupNonUniformIAdd:
1539 case SPIRV::OpGroupNonUniformFAdd:
1540 case SPIRV::OpGroupNonUniformIMul:
1541 case SPIRV::OpGroupNonUniformFMul:
1542 case SPIRV::OpGroupNonUniformSMin:
1543 case SPIRV::OpGroupNonUniformUMin:
1544 case SPIRV::OpGroupNonUniformFMin:
1545 case SPIRV::OpGroupNonUniformSMax:
1546 case SPIRV::OpGroupNonUniformUMax:
1547 case SPIRV::OpGroupNonUniformFMax:
1548 case SPIRV::OpGroupNonUniformBitwiseAnd:
1549 case SPIRV::OpGroupNonUniformBitwiseOr:
1550 case SPIRV::OpGroupNonUniformBitwiseXor:
1551 case SPIRV::OpGroupNonUniformLogicalAnd:
1552 case SPIRV::OpGroupNonUniformLogicalOr:
1553 case SPIRV::OpGroupNonUniformLogicalXor: {
1555 int64_t GroupOp =
MI.getOperand(3).getImm();
1557 case SPIRV::GroupOperation::Reduce:
1558 case SPIRV::GroupOperation::InclusiveScan:
1559 case SPIRV::GroupOperation::ExclusiveScan:
1560 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
1562 case SPIRV::GroupOperation::ClusteredReduce:
1563 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformClustered);
1565 case SPIRV::GroupOperation::PartitionedReduceNV:
1566 case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
1567 case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
1568 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
1573 case SPIRV::OpGroupNonUniformShuffle:
1574 case SPIRV::OpGroupNonUniformShuffleXor:
1575 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffle);
1577 case SPIRV::OpGroupNonUniformShuffleUp:
1578 case SPIRV::OpGroupNonUniformShuffleDown:
1579 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
1581 case SPIRV::OpGroupAll:
1582 case SPIRV::OpGroupAny:
1583 case SPIRV::OpGroupBroadcast:
1584 case SPIRV::OpGroupIAdd:
1585 case SPIRV::OpGroupFAdd:
1586 case SPIRV::OpGroupFMin:
1587 case SPIRV::OpGroupUMin:
1588 case SPIRV::OpGroupSMin:
1589 case SPIRV::OpGroupFMax:
1590 case SPIRV::OpGroupUMax:
1591 case SPIRV::OpGroupSMax:
1594 case SPIRV::OpGroupNonUniformElect:
1597 case SPIRV::OpGroupNonUniformAll:
1598 case SPIRV::OpGroupNonUniformAny:
1599 case SPIRV::OpGroupNonUniformAllEqual:
1602 case SPIRV::OpGroupNonUniformBroadcast:
1603 case SPIRV::OpGroupNonUniformBroadcastFirst:
1604 case SPIRV::OpGroupNonUniformBallot:
1605 case SPIRV::OpGroupNonUniformInverseBallot:
1606 case SPIRV::OpGroupNonUniformBallotBitExtract:
1607 case SPIRV::OpGroupNonUniformBallotBitCount:
1608 case SPIRV::OpGroupNonUniformBallotFindLSB:
1609 case SPIRV::OpGroupNonUniformBallotFindMSB:
1610 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformBallot);
1612 case SPIRV::OpSubgroupShuffleINTEL:
1613 case SPIRV::OpSubgroupShuffleDownINTEL:
1614 case SPIRV::OpSubgroupShuffleUpINTEL:
1615 case SPIRV::OpSubgroupShuffleXorINTEL:
1616 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1617 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1618 Reqs.
addCapability(SPIRV::Capability::SubgroupShuffleINTEL);
1621 case SPIRV::OpSubgroupBlockReadINTEL:
1622 case SPIRV::OpSubgroupBlockWriteINTEL:
1623 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1624 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1625 Reqs.
addCapability(SPIRV::Capability::SubgroupBufferBlockIOINTEL);
1628 case SPIRV::OpSubgroupImageBlockReadINTEL:
1629 case SPIRV::OpSubgroupImageBlockWriteINTEL:
1630 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1631 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1632 Reqs.
addCapability(SPIRV::Capability::SubgroupImageBlockIOINTEL);
1635 case SPIRV::OpSubgroupImageMediaBlockReadINTEL:
1636 case SPIRV::OpSubgroupImageMediaBlockWriteINTEL:
1637 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1638 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_media_block_io);
1639 Reqs.
addCapability(SPIRV::Capability::SubgroupImageMediaBlockIOINTEL);
1642 case SPIRV::OpAssumeTrueKHR:
1643 case SPIRV::OpExpectKHR:
1644 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
1645 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_expect_assume);
1649 case SPIRV::OpPtrCastToCrossWorkgroupINTEL:
1650 case SPIRV::OpCrossWorkgroupCastToPtrINTEL:
1651 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) {
1652 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes);
1653 Reqs.
addCapability(SPIRV::Capability::USMStorageClassesINTEL);
1656 case SPIRV::OpConstantFunctionPointerINTEL:
1657 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1658 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1659 Reqs.
addCapability(SPIRV::Capability::FunctionPointersINTEL);
1662 case SPIRV::OpGroupNonUniformRotateKHR:
1663 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate))
1665 "following SPIR-V extension: SPV_KHR_subgroup_rotate",
1667 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate);
1668 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformRotateKHR);
1671 case SPIRV::OpGroupIMulKHR:
1672 case SPIRV::OpGroupFMulKHR:
1673 case SPIRV::OpGroupBitwiseAndKHR:
1674 case SPIRV::OpGroupBitwiseOrKHR:
1675 case SPIRV::OpGroupBitwiseXorKHR:
1676 case SPIRV::OpGroupLogicalAndKHR:
1677 case SPIRV::OpGroupLogicalOrKHR:
1678 case SPIRV::OpGroupLogicalXorKHR:
1679 if (
ST.canUseExtension(
1680 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1681 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_uniform_group_instructions);
1682 Reqs.
addCapability(SPIRV::Capability::GroupUniformArithmeticKHR);
1685 case SPIRV::OpReadClockKHR:
1686 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock))
1688 "following SPIR-V extension: SPV_KHR_shader_clock",
1690 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_shader_clock);
1693 case SPIRV::OpFunctionPointerCallINTEL:
1694 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1695 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1696 Reqs.
addCapability(SPIRV::Capability::FunctionPointersINTEL);
1699 case SPIRV::OpAtomicFAddEXT:
1700 case SPIRV::OpAtomicFMinEXT:
1701 case SPIRV::OpAtomicFMaxEXT:
1702 AddAtomicFloatRequirements(
MI, Reqs, ST);
1704 case SPIRV::OpConvertBF16ToFINTEL:
1705 case SPIRV::OpConvertFToBF16INTEL:
1706 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion)) {
1707 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion);
1708 Reqs.
addCapability(SPIRV::Capability::BFloat16ConversionINTEL);
1711 case SPIRV::OpRoundFToTF32INTEL:
1712 if (
ST.canUseExtension(
1713 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion)) {
1714 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_tensor_float32_conversion);
1715 Reqs.
addCapability(SPIRV::Capability::TensorFloat32RoundingINTEL);
1718 case SPIRV::OpVariableLengthArrayINTEL:
1719 case SPIRV::OpSaveMemoryINTEL:
1720 case SPIRV::OpRestoreMemoryINTEL:
1721 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array)) {
1722 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_variable_length_array);
1723 Reqs.
addCapability(SPIRV::Capability::VariableLengthArrayINTEL);
1726 case SPIRV::OpAsmTargetINTEL:
1727 case SPIRV::OpAsmINTEL:
1728 case SPIRV::OpAsmCallINTEL:
1729 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly)) {
1730 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_inline_assembly);
1734 case SPIRV::OpTypeCooperativeMatrixKHR: {
1735 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1737 "OpTypeCooperativeMatrixKHR type requires the "
1738 "following SPIR-V extension: SPV_KHR_cooperative_matrix",
1740 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1741 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1744 if (isBFloat16Type(TypeDef))
1745 Reqs.
addCapability(SPIRV::Capability::BFloat16CooperativeMatrixKHR);
1748 case SPIRV::OpArithmeticFenceEXT:
1749 if (!
ST.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
1751 "following SPIR-V extension: SPV_EXT_arithmetic_fence",
1753 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence);
1756 case SPIRV::OpControlBarrierArriveINTEL:
1757 case SPIRV::OpControlBarrierWaitINTEL:
1758 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
1759 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_split_barrier);
1763 case SPIRV::OpCooperativeMatrixMulAddKHR: {
1764 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1766 "following SPIR-V extension: "
1767 "SPV_KHR_cooperative_matrix",
1769 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1770 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1771 constexpr unsigned MulAddMaxSize = 6;
1772 if (
MI.getNumOperands() != MulAddMaxSize)
1774 const int64_t CoopOperands =
MI.getOperand(MulAddMaxSize - 1).getImm();
1776 SPIRV::CooperativeMatrixOperands::MatrixAAndBTF32ComponentsINTEL) {
1777 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1779 "require the following SPIR-V extension: "
1780 "SPV_INTEL_joint_matrix",
1782 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1784 SPIRV::Capability::CooperativeMatrixTF32ComponentTypeINTEL);
1787 MatrixAAndBBFloat16ComponentsINTEL ||
1789 SPIRV::CooperativeMatrixOperands::MatrixCBFloat16ComponentsINTEL ||
1791 MatrixResultBFloat16ComponentsINTEL) {
1792 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1794 "require the following SPIR-V extension: "
1795 "SPV_INTEL_joint_matrix",
1797 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1799 SPIRV::Capability::CooperativeMatrixBFloat16ComponentTypeINTEL);
1803 case SPIRV::OpCooperativeMatrixLoadKHR:
1804 case SPIRV::OpCooperativeMatrixStoreKHR:
1805 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1806 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1807 case SPIRV::OpCooperativeMatrixPrefetchINTEL: {
1808 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1810 "following SPIR-V extension: "
1811 "SPV_KHR_cooperative_matrix",
1813 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1814 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1818 std::unordered_map<unsigned, unsigned> LayoutToInstMap = {
1819 {SPIRV::OpCooperativeMatrixLoadKHR, 3},
1820 {SPIRV::OpCooperativeMatrixStoreKHR, 2},
1821 {SPIRV::OpCooperativeMatrixLoadCheckedINTEL, 5},
1822 {SPIRV::OpCooperativeMatrixStoreCheckedINTEL, 4},
1823 {SPIRV::OpCooperativeMatrixPrefetchINTEL, 4}};
1825 const auto OpCode =
MI.getOpcode();
1826 const unsigned LayoutNum = LayoutToInstMap[OpCode];
1827 Register RegLayout =
MI.getOperand(LayoutNum).getReg();
1830 if (MILayout->
getOpcode() == SPIRV::OpConstantI) {
1833 static_cast<unsigned>(SPIRV::CooperativeMatrixLayout::PackedINTEL)) {
1834 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1836 "extension: SPV_INTEL_joint_matrix",
1838 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1839 Reqs.
addCapability(SPIRV::Capability::PackedCooperativeMatrixINTEL);
1844 if (OpCode == SPIRV::OpCooperativeMatrixLoadKHR ||
1845 OpCode == SPIRV::OpCooperativeMatrixStoreKHR)
1848 std::string InstName;
1850 case SPIRV::OpCooperativeMatrixPrefetchINTEL:
1851 InstName =
"OpCooperativeMatrixPrefetchINTEL";
1853 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1854 InstName =
"OpCooperativeMatrixLoadCheckedINTEL";
1856 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1857 InstName =
"OpCooperativeMatrixStoreCheckedINTEL";
1861 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix)) {
1862 const std::string ErrorMsg =
1863 InstName +
" instruction requires the "
1864 "following SPIR-V extension: SPV_INTEL_joint_matrix";
1867 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1868 if (OpCode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
1869 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixPrefetchINTEL);
1873 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1876 case SPIRV::OpCooperativeMatrixConstructCheckedINTEL:
1877 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1879 "instructions require the following SPIR-V extension: "
1880 "SPV_INTEL_joint_matrix",
1882 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1884 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1886 case SPIRV::OpCooperativeMatrixGetElementCoordINTEL:
1887 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1889 "following SPIR-V extension: SPV_INTEL_joint_matrix",
1891 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1893 SPIRV::Capability::CooperativeMatrixInvocationInstructionsINTEL);
1895 case SPIRV::OpConvertHandleToImageINTEL:
1896 case SPIRV::OpConvertHandleToSamplerINTEL:
1897 case SPIRV::OpConvertHandleToSampledImageINTEL: {
1898 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bindless_images))
1900 "instructions require the following SPIR-V extension: "
1901 "SPV_INTEL_bindless_images",
1904 SPIRV::AddressingModel::AddressingModel AddrModel = MAI.
Addr;
1906 if (
MI.getOpcode() == SPIRV::OpConvertHandleToImageINTEL &&
1907 TyDef->
getOpcode() != SPIRV::OpTypeImage) {
1909 "OpConvertHandleToImageINTEL",
1911 }
else if (
MI.getOpcode() == SPIRV::OpConvertHandleToSamplerINTEL &&
1912 TyDef->
getOpcode() != SPIRV::OpTypeSampler) {
1914 "OpConvertHandleToSamplerINTEL",
1916 }
else if (
MI.getOpcode() == SPIRV::OpConvertHandleToSampledImageINTEL &&
1917 TyDef->
getOpcode() != SPIRV::OpTypeSampledImage) {
1919 "OpConvertHandleToSampledImageINTEL",
1924 if (!(Bitwidth == 32 && AddrModel == SPIRV::AddressingModel::Physical32) &&
1925 !(Bitwidth == 64 && AddrModel == SPIRV::AddressingModel::Physical64)) {
1927 "Parameter value must be a 32-bit scalar in case of "
1928 "Physical32 addressing model or a 64-bit scalar in case of "
1929 "Physical64 addressing model",
1932 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bindless_images);
1936 case SPIRV::OpSubgroup2DBlockLoadINTEL:
1937 case SPIRV::OpSubgroup2DBlockLoadTransposeINTEL:
1938 case SPIRV::OpSubgroup2DBlockLoadTransformINTEL:
1939 case SPIRV::OpSubgroup2DBlockPrefetchINTEL:
1940 case SPIRV::OpSubgroup2DBlockStoreINTEL: {
1941 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_2d_block_io))
1943 "Prefetch/Store]INTEL instructions require the "
1944 "following SPIR-V extension: SPV_INTEL_2d_block_io",
1946 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_2d_block_io);
1947 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockIOINTEL);
1949 const auto OpCode =
MI.getOpcode();
1950 if (OpCode == SPIRV::OpSubgroup2DBlockLoadTransposeINTEL) {
1951 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockTransposeINTEL);
1954 if (OpCode == SPIRV::OpSubgroup2DBlockLoadTransformINTEL) {
1955 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockTransformINTEL);
1960 case SPIRV::OpKill: {
1963 case SPIRV::OpDemoteToHelperInvocation:
1964 Reqs.
addCapability(SPIRV::Capability::DemoteToHelperInvocation);
1966 if (
ST.canUseExtension(
1967 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation)) {
1970 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation);
1975 case SPIRV::OpSUDot:
1976 case SPIRV::OpSDotAccSat:
1977 case SPIRV::OpUDotAccSat:
1978 case SPIRV::OpSUDotAccSat:
1979 AddDotProductRequirements(
MI, Reqs, ST);
1981 case SPIRV::OpImageRead: {
1982 Register ImageReg =
MI.getOperand(2).getReg();
1983 SPIRVType *TypeDef =
ST.getSPIRVGlobalRegistry()->getResultType(
1991 if (isImageTypeWithUnknownFormat(TypeDef) &&
ST.isShader())
1992 Reqs.
addCapability(SPIRV::Capability::StorageImageReadWithoutFormat);
1995 case SPIRV::OpImageWrite: {
1996 Register ImageReg =
MI.getOperand(0).getReg();
1997 SPIRVType *TypeDef =
ST.getSPIRVGlobalRegistry()->getResultType(
2005 if (isImageTypeWithUnknownFormat(TypeDef) &&
ST.isShader())
2006 Reqs.
addCapability(SPIRV::Capability::StorageImageWriteWithoutFormat);
2009 case SPIRV::OpTypeStructContinuedINTEL:
2010 case SPIRV::OpConstantCompositeContinuedINTEL:
2011 case SPIRV::OpSpecConstantCompositeContinuedINTEL:
2012 case SPIRV::OpCompositeConstructContinuedINTEL: {
2013 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_long_composites))
2015 "Continued instructions require the "
2016 "following SPIR-V extension: SPV_INTEL_long_composites",
2018 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_long_composites);
2022 case SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL: {
2023 if (!
ST.canUseExtension(
2024 SPIRV::Extension::SPV_INTEL_subgroup_matrix_multiply_accumulate))
2026 "OpSubgroupMatrixMultiplyAccumulateINTEL instruction requires the "
2028 "extension: SPV_INTEL_subgroup_matrix_multiply_accumulate",
2031 SPIRV::Extension::SPV_INTEL_subgroup_matrix_multiply_accumulate);
2033 SPIRV::Capability::SubgroupMatrixMultiplyAccumulateINTEL);
2036 case SPIRV::OpBitwiseFunctionINTEL: {
2037 if (!
ST.canUseExtension(
2038 SPIRV::Extension::SPV_INTEL_ternary_bitwise_function))
2040 "OpBitwiseFunctionINTEL instruction requires the following SPIR-V "
2041 "extension: SPV_INTEL_ternary_bitwise_function",
2043 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_ternary_bitwise_function);
2044 Reqs.
addCapability(SPIRV::Capability::TernaryBitwiseFunctionINTEL);
2047 case SPIRV::OpCopyMemorySized: {
2052 case SPIRV::OpPredicatedLoadINTEL:
2053 case SPIRV::OpPredicatedStoreINTEL: {
2054 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_predicated_io))
2056 "OpPredicated[Load/Store]INTEL instructions require "
2057 "the following SPIR-V extension: SPV_INTEL_predicated_io",
2059 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_predicated_io);
2072 SPIRV::Capability::Shader);
2078 for (
auto F = M.begin(),
E = M.end();
F !=
E; ++
F) {
2084 addInstrRequirements(
MI, MAI, ST);
2087 auto Node = M.getNamedMetadata(
"spirv.ExecutionMode");
2089 bool RequireFloatControls =
false, RequireIntelFloatControls2 =
false,
2090 RequireKHRFloatControls2 =
false,
2092 bool HasIntelFloatControls2 =
2093 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
2094 bool HasKHRFloatControls2 =
2095 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2096 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2102 auto EM =
Const->getZExtValue();
2106 case SPIRV::ExecutionMode::DenormPreserve:
2107 case SPIRV::ExecutionMode::DenormFlushToZero:
2108 case SPIRV::ExecutionMode::RoundingModeRTE:
2109 case SPIRV::ExecutionMode::RoundingModeRTZ:
2110 RequireFloatControls = VerLower14;
2112 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2114 case SPIRV::ExecutionMode::RoundingModeRTPINTEL:
2115 case SPIRV::ExecutionMode::RoundingModeRTNINTEL:
2116 case SPIRV::ExecutionMode::FloatingPointModeALTINTEL:
2117 case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL:
2118 if (HasIntelFloatControls2) {
2119 RequireIntelFloatControls2 =
true;
2121 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2124 case SPIRV::ExecutionMode::FPFastMathDefault: {
2125 if (HasKHRFloatControls2) {
2126 RequireKHRFloatControls2 =
true;
2128 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2132 case SPIRV::ExecutionMode::ContractionOff:
2133 case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
2134 if (HasKHRFloatControls2) {
2135 RequireKHRFloatControls2 =
true;
2137 SPIRV::OperandCategory::ExecutionModeOperand,
2138 SPIRV::ExecutionMode::FPFastMathDefault, ST);
2141 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2146 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2151 if (RequireFloatControls &&
2152 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls))
2154 if (RequireIntelFloatControls2)
2156 if (RequireKHRFloatControls2)
2159 for (
auto FI = M.begin(),
E = M.end(); FI !=
E; ++FI) {
2161 if (
F.isDeclaration())
2163 if (
F.getMetadata(
"reqd_work_group_size"))
2165 SPIRV::OperandCategory::ExecutionModeOperand,
2166 SPIRV::ExecutionMode::LocalSize, ST);
2167 if (
F.getFnAttribute(
"hlsl.numthreads").isValid()) {
2169 SPIRV::OperandCategory::ExecutionModeOperand,
2170 SPIRV::ExecutionMode::LocalSize, ST);
2172 if (
F.getFnAttribute(
"enable-maximal-reconvergence").getValueAsBool()) {
2175 if (
F.getMetadata(
"work_group_size_hint"))
2177 SPIRV::OperandCategory::ExecutionModeOperand,
2178 SPIRV::ExecutionMode::LocalSizeHint, ST);
2179 if (
F.getMetadata(
"intel_reqd_sub_group_size"))
2181 SPIRV::OperandCategory::ExecutionModeOperand,
2182 SPIRV::ExecutionMode::SubgroupSize, ST);
2183 if (
F.getMetadata(
"vec_type_hint"))
2185 SPIRV::OperandCategory::ExecutionModeOperand,
2186 SPIRV::ExecutionMode::VecTypeHint, ST);
2188 if (
F.hasOptNone()) {
2189 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
2192 }
else if (
ST.canUseExtension(SPIRV::Extension::SPV_EXT_optnone)) {
2202 unsigned Flags = SPIRV::FPFastMathMode::None;
2203 bool CanUseKHRFloatControls2 =
2204 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2206 Flags |= SPIRV::FPFastMathMode::NotNaN;
2208 Flags |= SPIRV::FPFastMathMode::NotInf;
2210 Flags |= SPIRV::FPFastMathMode::NSZ;
2212 Flags |= SPIRV::FPFastMathMode::AllowRecip;
2214 Flags |= SPIRV::FPFastMathMode::AllowContract;
2216 if (CanUseKHRFloatControls2)
2224 Flags |= SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2225 SPIRV::FPFastMathMode::NSZ | SPIRV::FPFastMathMode::AllowRecip |
2226 SPIRV::FPFastMathMode::AllowTransform |
2227 SPIRV::FPFastMathMode::AllowReassoc |
2228 SPIRV::FPFastMathMode::AllowContract;
2230 Flags |= SPIRV::FPFastMathMode::Fast;
2233 if (CanUseKHRFloatControls2) {
2235 assert(!(Flags & SPIRV::FPFastMathMode::Fast) &&
2236 "SPIRV::FPFastMathMode::Fast is deprecated and should not be used "
2241 assert((!(Flags & SPIRV::FPFastMathMode::AllowTransform) ||
2242 ((Flags & SPIRV::FPFastMathMode::AllowReassoc &&
2243 Flags & SPIRV::FPFastMathMode::AllowContract))) &&
2244 "SPIRV::FPFastMathMode::AllowTransform requires AllowReassoc and "
2245 "AllowContract flags to be enabled as well.");
2256 return ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2259static void handleMIFlagDecoration(
2264 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
2265 SPIRV::Decoration::NoSignedWrap, ST, Reqs)
2268 SPIRV::Decoration::NoSignedWrap, {});
2271 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
2272 SPIRV::Decoration::NoUnsignedWrap, ST,
2276 SPIRV::Decoration::NoUnsignedWrap, {});
2278 if (!
TII.canUseFastMathFlags(
2279 I,
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)))
2282 unsigned FMFlags = getFastMathFlags(
I, ST);
2283 if (FMFlags == SPIRV::FPFastMathMode::None) {
2286 if (FPFastMathDefaultInfoVec.
empty())
2302 assert(
I.getNumOperands() >= 3 &&
"Expected at least 3 operands");
2303 Register ResReg =
I.getOpcode() == SPIRV::OpExtInst
2304 ?
I.getOperand(1).getReg()
2305 :
I.getOperand(2).getReg();
2313 if (Ty == Elem.Ty) {
2314 FMFlags = Elem.FastMathFlags;
2315 Emit = Elem.ContractionOff || Elem.SignedZeroInfNanPreserve ||
2316 Elem.FPFastMathDefault;
2321 if (FMFlags == SPIRV::FPFastMathMode::None && !Emit)
2324 if (isFastMathModeAvailable(ST)) {
2325 Register DstReg =
I.getOperand(0).getReg();
2336 for (
auto F = M.begin(),
E = M.end();
F !=
E; ++
F) {
2341 for (
auto &
MBB : *MF)
2342 for (
auto &
MI :
MBB)
2343 handleMIFlagDecoration(
MI, ST,
TII, MAI.
Reqs, GR,
2351 for (
auto F = M.begin(),
E = M.end();
F !=
E; ++
F) {
2356 for (
auto &
MBB : *MF) {
2357 if (!
MBB.hasName() ||
MBB.empty())
2361 MRI.setRegClass(
Reg, &SPIRV::IDRegClass);
2372 for (
auto F = M.begin(),
E = M.end();
F !=
E; ++
F) {
2376 for (
auto &
MBB : *MF) {
2378 MI.setDesc(
TII.get(SPIRV::OpPhi));
2381 MI.insert(
MI.operands_begin() + 1,
2382 {MachineOperand::CreateReg(ResTypeReg, false)});
2401 SPIRV::FPFastMathMode::None);
2403 SPIRV::FPFastMathMode::None);
2405 SPIRV::FPFastMathMode::None);
2412 size_t BitWidth = Ty->getScalarSizeInBits();
2416 assert(Index >= 0 && Index < 3 &&
2417 "Expected FPFastMathDefaultInfo for half, float, or double");
2418 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2419 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2420 return FPFastMathDefaultInfoVec[Index];
2423static void collectFPFastMathDefaults(
const Module &M,
2426 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2435 auto Node = M.getNamedMetadata(
"spirv.ExecutionMode");
2439 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2448 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2450 "Expected 4 operands for FPFastMathDefault");
2461 Info.FastMathFlags = Flags;
2462 Info.FPFastMathDefault =
true;
2463 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2465 "Expected no operands for ContractionOff");
2472 Info.ContractionOff =
true;
2474 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2476 "Expected 1 operand for SignedZeroInfNanPreserve");
2477 unsigned TargetWidth =
2486 assert(Index >= 0 && Index < 3 &&
2487 "Expected FPFastMathDefaultInfo for half, float, or double");
2488 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2489 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2490 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve =
true;