63 const auto &
Op = MdNode->getOperand(
OpIndex);
70getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
76 AvoidCaps.
S.
insert(SPIRV::Capability::Shader);
78 AvoidCaps.
S.
insert(SPIRV::Capability::Kernel);
83 bool MinVerOK = SPIRVVersion.
empty() || SPIRVVersion >= ReqMinVer;
85 ReqMaxVer.
empty() || SPIRVVersion.
empty() || SPIRVVersion <= ReqMaxVer;
88 if (ReqCaps.
empty()) {
89 if (ReqExts.
empty()) {
90 if (MinVerOK && MaxVerOK)
91 return {
true, {}, {}, ReqMinVer, ReqMaxVer};
94 }
else if (MinVerOK && MaxVerOK) {
95 if (ReqCaps.
size() == 1) {
96 auto Cap = ReqCaps[0];
99 SPIRV::OperandCategory::CapabilityOperand, Cap));
100 return {
true, {Cap}, std::move(ReqExts), ReqMinVer, ReqMaxVer};
110 for (
auto Cap : ReqCaps)
113 for (
size_t i = 0, Sz = UseCaps.
size(); i < Sz; ++i) {
114 auto Cap = UseCaps[i];
115 if (i == Sz - 1 || !AvoidCaps.
S.
contains(Cap)) {
117 SPIRV::OperandCategory::CapabilityOperand, Cap));
118 return {
true, {Cap}, std::move(ReqExts), ReqMinVer, ReqMaxVer};
126 if (
llvm::all_of(ReqExts, [&ST](
const SPIRV::Extension::Extension &Ext) {
127 return ST.canUseExtension(Ext);
138void SPIRVModuleAnalysis::setBaseInfo(
const Module &M) {
142 MAI.RegisterAliasTable.clear();
143 MAI.InstrsToDelete.clear();
145 MAI.GlobalVarList.clear();
146 MAI.ExtInstSetMap.clear();
148 MAI.Reqs.initAvailableCapabilities(*ST);
151 if (
auto MemModel =
M.getNamedMetadata(
"spirv.MemoryModel")) {
152 auto MemMD = MemModel->getOperand(0);
153 MAI.Addr =
static_cast<SPIRV::AddressingModel::AddressingModel
>(
154 getMetadataUInt(MemMD, 0));
156 static_cast<SPIRV::MemoryModel::MemoryModel
>(getMetadataUInt(MemMD, 1));
159 MAI.Mem = ST->isShader() ? SPIRV::MemoryModel::GLSL450
160 : SPIRV::MemoryModel::OpenCL;
161 if (
MAI.Mem == SPIRV::MemoryModel::OpenCL) {
162 unsigned PtrSize = ST->getPointerSize();
163 MAI.Addr = PtrSize == 32 ? SPIRV::AddressingModel::Physical32
164 : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
165 : SPIRV::AddressingModel::Logical;
168 MAI.Addr = SPIRV::AddressingModel::Logical;
173 if (
auto VerNode =
M.getNamedMetadata(
"opencl.ocl.version")) {
174 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
177 assert(VerNode->getNumOperands() > 0 &&
"Invalid SPIR");
178 auto VersionMD = VerNode->getOperand(0);
179 unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
180 unsigned MinorNum = getMetadataUInt(VersionMD, 1);
181 unsigned RevNum = getMetadataUInt(VersionMD, 2);
184 (std::max(1U, MajorNum) * 100 + MinorNum) * 1000 + RevNum;
190 if (!ST->isShader()) {
191 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_CPP;
192 MAI.SrcLangVersion = 100000;
194 MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
195 MAI.SrcLangVersion = 0;
199 if (
auto ExtNode =
M.getNamedMetadata(
"opencl.used.extensions")) {
200 for (
unsigned I = 0,
E = ExtNode->getNumOperands();
I !=
E; ++
I) {
201 MDNode *MD = ExtNode->getOperand(
I);
211 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand,
213 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::SourceLanguageOperand,
215 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
218 if (!ST->isShader()) {
220 MAI.ExtInstSetMap[
static_cast<unsigned>(
221 SPIRV::InstructionSet::OpenCL_std)] =
MAI.getNextIDRegister();
232 if (
UseMI.getOpcode() != SPIRV::OpDecorate &&
233 UseMI.getOpcode() != SPIRV::OpMemberDecorate)
236 for (
unsigned I = 0;
I <
UseMI.getNumOperands(); ++
I) {
254 for (
unsigned i = 0; i <
MI.getNumOperands(); ++i) {
263 unsigned Opcode =
MI.getOpcode();
264 if ((Opcode == SPIRV::OpDecorate) && i >= 2) {
265 unsigned DecorationID =
MI.getOperand(1).getImm();
266 if (DecorationID != SPIRV::Decoration::FuncParamAttr &&
267 DecorationID != SPIRV::Decoration::UserSemantic &&
268 DecorationID != SPIRV::Decoration::CacheControlLoadINTEL &&
269 DecorationID != SPIRV::Decoration::CacheControlStoreINTEL)
275 if (!UseDefReg && MO.
isDef()) {
283 dbgs() <<
"Unexpectedly, no global id found for the operand ";
285 dbgs() <<
"\nInstruction: ";
304 appendDecorationsForReg(
MI.getMF()->getRegInfo(), DefReg, Signature);
311 unsigned Opcode =
MI.getOpcode();
313 case SPIRV::OpTypeForwardPointer:
316 case SPIRV::OpVariable:
317 return static_cast<SPIRV::StorageClass::StorageClass
>(
318 MI.getOperand(2).
getImm()) != SPIRV::StorageClass::Function;
319 case SPIRV::OpFunction:
320 case SPIRV::OpFunctionParameter:
323 if (GR->hasConstFunPtr() && Opcode == SPIRV::OpUndef) {
325 for (MachineInstr &
UseMI :
MRI.use_instructions(DefReg)) {
326 if (
UseMI.getOpcode() != SPIRV::OpConstantFunctionPointerINTEL)
332 MAI.setSkipEmission(&
MI);
336 return TII->isTypeDeclInstr(
MI) || TII->isConstantInstr(
MI) ||
337 TII->isInlineAsmDefInstr(
MI);
343void SPIRVModuleAnalysis::visitFunPtrUse(
345 std::map<const Value *, unsigned> &GlobalToGReg,
const MachineFunction *MF,
347 const MachineOperand *OpFunDef =
348 GR->getFunctionDefinitionByUse(&
MI.getOperand(2));
351 const MachineInstr *OpDefMI = OpFunDef->
getParent();
354 const MachineRegisterInfo &FunDefMRI = FunDefMF->
getRegInfo();
356 visitDecl(FunDefMRI, SignatureToGReg, GlobalToGReg, FunDefMF, *OpDefMI);
358 }
while (OpDefMI && (OpDefMI->
getOpcode() == SPIRV::OpFunction ||
359 OpDefMI->
getOpcode() == SPIRV::OpFunctionParameter));
361 MCRegister GlobalFunDefReg =
362 MAI.getRegisterAlias(FunDefMF, OpFunDef->
getReg());
364 "Function definition must refer to a global register");
365 MAI.setRegisterAlias(MF, OpReg, GlobalFunDefReg);
370void SPIRVModuleAnalysis::visitDecl(
372 std::map<const Value *, unsigned> &GlobalToGReg,
const MachineFunction *MF,
374 unsigned Opcode =
MI.getOpcode();
377 for (
const MachineOperand &MO :
MI.operands()) {
382 if (Opcode == SPIRV::OpConstantFunctionPointerINTEL &&
383 MRI.getRegClass(OpReg) == &SPIRV::pIDRegClass) {
384 visitFunPtrUse(OpReg, SignatureToGReg, GlobalToGReg, MF,
MI);
388 if (
MAI.hasRegisterAlias(MF, MO.
getReg()))
391 if (
const MachineInstr *OpDefMI =
MRI.getUniqueVRegDef(OpReg)) {
392 if (isDeclSection(
MRI, *OpDefMI))
393 visitDecl(
MRI, SignatureToGReg, GlobalToGReg, MF, *OpDefMI);
399 dbgs() <<
"Unexpectedly, no unique definition for the operand ";
401 dbgs() <<
"\nInstruction: ";
406 "No unique definition is found for the virtual register");
410 bool IsFunDef =
false;
411 if (TII->isSpecConstantInstr(
MI)) {
412 GReg =
MAI.getNextIDRegister();
414 }
else if (Opcode == SPIRV::OpFunction ||
415 Opcode == SPIRV::OpFunctionParameter) {
416 GReg = handleFunctionOrParameter(MF,
MI, GlobalToGReg, IsFunDef);
417 }
else if (Opcode == SPIRV::OpTypeStruct ||
418 Opcode == SPIRV::OpConstantComposite) {
419 GReg = handleTypeDeclOrConstant(
MI, SignatureToGReg);
420 const MachineInstr *NextInstr =
MI.getNextNode();
422 ((Opcode == SPIRV::OpTypeStruct &&
423 NextInstr->
getOpcode() == SPIRV::OpTypeStructContinuedINTEL) ||
424 (Opcode == SPIRV::OpConstantComposite &&
426 SPIRV::OpConstantCompositeContinuedINTEL))) {
427 MCRegister Tmp = handleTypeDeclOrConstant(*NextInstr, SignatureToGReg);
429 MAI.setSkipEmission(NextInstr);
432 }
else if (TII->isTypeDeclInstr(
MI) || TII->isConstantInstr(
MI) ||
433 TII->isInlineAsmDefInstr(
MI)) {
434 GReg = handleTypeDeclOrConstant(
MI, SignatureToGReg);
435 }
else if (Opcode == SPIRV::OpVariable) {
436 GReg = handleVariable(MF,
MI, GlobalToGReg);
439 dbgs() <<
"\nInstruction: ";
445 MAI.setRegisterAlias(MF,
MI.getOperand(0).getReg(), GReg);
447 MAI.setSkipEmission(&
MI);
450MCRegister SPIRVModuleAnalysis::handleFunctionOrParameter(
452 std::map<const Value *, unsigned> &GlobalToGReg,
bool &IsFunDef) {
453 const Value *GObj = GR->getGlobalObject(MF,
MI.getOperand(0).getReg());
454 assert(GObj &&
"Unregistered global definition");
458 assert(
F &&
"Expected a reference to a function or an argument");
459 IsFunDef = !
F->isDeclaration();
460 auto [It,
Inserted] = GlobalToGReg.try_emplace(GObj);
463 MCRegister GReg =
MAI.getNextIDRegister();
471SPIRVModuleAnalysis::handleTypeDeclOrConstant(
const MachineInstr &
MI,
474 auto [It,
Inserted] = SignatureToGReg.try_emplace(MISign);
477 MCRegister GReg =
MAI.getNextIDRegister();
483MCRegister SPIRVModuleAnalysis::handleVariable(
485 std::map<const Value *, unsigned> &GlobalToGReg) {
486 MAI.GlobalVarList.push_back(&
MI);
487 const Value *GObj = GR->getGlobalObject(MF,
MI.getOperand(0).getReg());
488 assert(GObj &&
"Unregistered global definition");
489 auto [It,
Inserted] = GlobalToGReg.try_emplace(GObj);
492 MCRegister GReg =
MAI.getNextIDRegister();
498void SPIRVModuleAnalysis::collectDeclarations(
const Module &M) {
500 std::map<const Value *, unsigned> GlobalToGReg;
501 for (
const Function &
F : M) {
502 MachineFunction *MF = MMI->getMachineFunction(
F);
506 unsigned PastHeader = 0;
507 for (MachineBasicBlock &
MBB : *MF) {
508 for (MachineInstr &
MI :
MBB) {
509 if (
MI.getNumOperands() == 0)
511 unsigned Opcode =
MI.getOpcode();
512 if (Opcode == SPIRV::OpFunction) {
513 if (PastHeader == 0) {
517 }
else if (Opcode == SPIRV::OpFunctionParameter) {
520 }
else if (PastHeader > 0) {
524 const MachineOperand &DefMO =
MI.getOperand(0);
526 case SPIRV::OpExtension:
527 MAI.Reqs.addExtension(SPIRV::Extension::Extension(DefMO.
getImm()));
528 MAI.setSkipEmission(&
MI);
530 case SPIRV::OpCapability:
531 MAI.Reqs.addCapability(SPIRV::Capability::Capability(DefMO.
getImm()));
532 MAI.setSkipEmission(&
MI);
537 if (DefMO.
isReg() && isDeclSection(
MRI,
MI) &&
538 !
MAI.hasRegisterAlias(MF, DefMO.
getReg()))
539 visitDecl(
MRI, SignatureToGReg, GlobalToGReg, MF,
MI);
552 if (
MI.getOpcode() == SPIRV::OpDecorate) {
554 auto Dec =
MI.getOperand(1).getImm();
555 if (Dec == SPIRV::Decoration::LinkageAttributes) {
556 auto Lnk =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
557 if (Lnk == SPIRV::LinkageType::Import) {
562 MAI.FuncMap[ImportedFunc] =
MAI.getRegisterAlias(
MI.getMF(), Target);
565 }
else if (
MI.getOpcode() == SPIRV::OpFunction) {
568 MCRegister GlobalReg =
MAI.getRegisterAlias(
MI.getMF(),
Reg);
570 MAI.FuncMap[
F] = GlobalReg;
579 bool Append =
true) {
582 auto FoundMI = IS.insert(std::move(MISign));
583 if (!FoundMI.second) {
584 if (
MI.getOpcode() == SPIRV::OpDecorate) {
586 "Decoration instructions must have at least 2 operands");
588 "Only OpDecorate instructions can be duplicates");
593 if (
MI.getOperand(1).getImm() != SPIRV::Decoration::FPFastMathMode)
598 if (instrToSignature(*OrigMI, MAI,
true) == MISign) {
599 assert(OrigMI->getNumOperands() ==
MI.getNumOperands() &&
600 "Original instruction must have the same number of operands");
602 OrigMI->getNumOperands() == 3 &&
603 "FPFastMathMode decoration must have 3 operands for OpDecorate");
604 unsigned OrigFlags = OrigMI->getOperand(2).getImm();
605 unsigned NewFlags =
MI.getOperand(2).getImm();
606 if (OrigFlags == NewFlags)
610 unsigned FinalFlags = OrigFlags | NewFlags;
612 <<
"Warning: Conflicting FPFastMathMode decoration flags "
614 << *OrigMI <<
"Original flags: " << OrigFlags
615 <<
", new flags: " << NewFlags
616 <<
". They will be merged on a best effort basis, but not "
617 "validated. Final flags: "
618 << FinalFlags <<
"\n";
625 assert(
false &&
"No original instruction found for the duplicate "
626 "OpDecorate, but we found one in IS.");
639void SPIRVModuleAnalysis::processOtherInstrs(
const Module &M) {
641 for (
const Function &
F : M) {
642 if (
F.isDeclaration())
644 MachineFunction *MF = MMI->getMachineFunction(
F);
647 for (MachineBasicBlock &
MBB : *MF)
648 for (MachineInstr &
MI :
MBB) {
649 if (
MAI.getSkipEmission(&
MI))
651 const unsigned OpCode =
MI.getOpcode();
652 if (OpCode == SPIRV::OpString) {
654 }
else if (OpCode == SPIRV::OpExtInst &&
MI.getOperand(2).isImm() &&
655 MI.getOperand(2).getImm() ==
656 SPIRV::InstructionSet::
657 NonSemantic_Shader_DebugInfo_100) {
658 MachineOperand Ins =
MI.getOperand(3);
659 namespace NS = SPIRV::NonSemanticExtInst;
660 static constexpr int64_t GlobalNonSemanticDITy[] = {
661 NS::DebugSource, NS::DebugCompilationUnit, NS::DebugInfoNone,
662 NS::DebugTypeBasic, NS::DebugTypePointer};
663 bool IsGlobalDI =
false;
664 for (
unsigned Idx = 0; Idx < std::size(GlobalNonSemanticDITy); ++Idx)
665 IsGlobalDI |= Ins.
getImm() == GlobalNonSemanticDITy[Idx];
668 }
else if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
670 }
else if (OpCode == SPIRV::OpEntryPoint) {
672 }
else if (TII->isAliasingInstr(
MI)) {
674 }
else if (TII->isDecorationInstr(
MI)) {
676 collectFuncNames(
MI, &
F);
677 }
else if (TII->isConstantInstr(
MI)) {
681 }
else if (OpCode == SPIRV::OpFunction) {
682 collectFuncNames(
MI, &
F);
683 }
else if (OpCode == SPIRV::OpTypeForwardPointer) {
693void SPIRVModuleAnalysis::numberRegistersGlobally(
const Module &M) {
694 for (
const Function &
F : M) {
695 if (
F.isDeclaration())
697 MachineFunction *MF = MMI->getMachineFunction(
F);
699 for (MachineBasicBlock &
MBB : *MF) {
700 for (MachineInstr &
MI :
MBB) {
701 for (MachineOperand &
Op :
MI.operands()) {
705 if (
MAI.hasRegisterAlias(MF,
Reg))
707 MCRegister NewReg =
MAI.getNextIDRegister();
708 MAI.setRegisterAlias(MF,
Reg, NewReg);
710 if (
MI.getOpcode() != SPIRV::OpExtInst)
712 auto Set =
MI.getOperand(2).getImm();
713 auto [It,
Inserted] =
MAI.ExtInstSetMap.try_emplace(Set);
715 It->second =
MAI.getNextIDRegister();
723 SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
725 addRequirements(getSymbolicOperandRequirements(Category, i, ST, *
this));
728void SPIRV::RequirementHandler::recursiveAddCapabilities(
730 for (
const auto &Cap : ToPrune) {
734 recursiveAddCapabilities(ImplicitDecls);
739 for (
const auto &Cap : ToAdd) {
740 bool IsNewlyInserted = AllCaps.insert(Cap).second;
741 if (!IsNewlyInserted)
745 recursiveAddCapabilities(ImplicitDecls);
746 MinimalCaps.push_back(Cap);
751 const SPIRV::Requirements &Req) {
755 if (Req.
Cap.has_value())
756 addCapabilities({Req.
Cap.value()});
758 addExtensions(Req.
Exts);
761 if (!MaxVersion.empty() && Req.
MinVer > MaxVersion) {
763 <<
" and <= " << MaxVersion <<
"\n");
767 if (MinVersion.empty() || Req.
MinVer > MinVersion)
772 if (!MinVersion.empty() && Req.
MaxVer < MinVersion) {
774 <<
" and >= " << MinVersion <<
"\n");
778 if (MaxVersion.empty() || Req.
MaxVer < MaxVersion)
784 const SPIRVSubtarget &ST)
const {
786 bool IsSatisfiable =
true;
787 auto TargetVer =
ST.getSPIRVVersion();
789 if (!MaxVersion.empty() && !TargetVer.empty() && MaxVersion < TargetVer) {
791 dbgs() <<
"Target SPIR-V version too high for required features\n"
792 <<
"Required max version: " << MaxVersion <<
" target version "
793 << TargetVer <<
"\n");
794 IsSatisfiable =
false;
797 if (!MinVersion.empty() && !TargetVer.empty() && MinVersion > TargetVer) {
798 LLVM_DEBUG(
dbgs() <<
"Target SPIR-V version too low for required features\n"
799 <<
"Required min version: " << MinVersion
800 <<
" target version " << TargetVer <<
"\n");
801 IsSatisfiable =
false;
804 if (!MinVersion.empty() && !MaxVersion.empty() && MinVersion > MaxVersion) {
807 <<
"Version is too low for some features and too high for others.\n"
808 <<
"Required SPIR-V min version: " << MinVersion
809 <<
" required SPIR-V max version " << MaxVersion <<
"\n");
810 IsSatisfiable =
false;
813 AvoidCapabilitiesSet AvoidCaps;
815 AvoidCaps.
S.
insert(SPIRV::Capability::Shader);
817 AvoidCaps.
S.
insert(SPIRV::Capability::Kernel);
819 for (
auto Cap : MinimalCaps) {
820 if (AvailableCaps.contains(Cap) && !AvoidCaps.
S.
contains(Cap))
824 OperandCategory::CapabilityOperand, Cap)
826 IsSatisfiable =
false;
829 for (
auto Ext : AllExtensions) {
830 if (
ST.canUseExtension(Ext))
834 OperandCategory::ExtensionOperand, Ext)
836 IsSatisfiable =
false;
845 for (
const auto Cap : ToAdd)
846 if (AvailableCaps.insert(Cap).second)
848 SPIRV::OperandCategory::CapabilityOperand, Cap));
852 const Capability::Capability
ToRemove,
853 const Capability::Capability IfPresent) {
854 if (AllCaps.contains(IfPresent))
862 addAvailableCaps({Capability::Shader, Capability::Linkage, Capability::Int8,
865 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 3)))
867 Capability::GroupNonUniformVote,
868 Capability::GroupNonUniformArithmetic,
869 Capability::GroupNonUniformBallot,
870 Capability::GroupNonUniformClustered,
871 Capability::GroupNonUniformShuffle,
872 Capability::GroupNonUniformShuffleRelative});
874 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
876 Capability::DotProductInput4x8Bit,
877 Capability::DotProductInput4x8BitPacked,
878 Capability::DemoteToHelperInvocation});
881 for (
auto Extension :
ST.getAllAvailableExtensions()) {
887 if (!
ST.isShader()) {
888 initAvailableCapabilitiesForOpenCL(ST);
893 initAvailableCapabilitiesForVulkan(ST);
900void RequirementHandler::initAvailableCapabilitiesForOpenCL(
901 const SPIRVSubtarget &ST) {
904 Capability::Kernel, Capability::Vector16,
905 Capability::Groups, Capability::GenericPointer,
906 Capability::StorageImageWriteWithoutFormat,
907 Capability::StorageImageReadWithoutFormat});
908 if (
ST.hasOpenCLFullProfile())
910 if (
ST.hasOpenCLImageSupport()) {
912 Capability::Image1D, Capability::SampledBuffer,
913 Capability::ImageBuffer});
914 if (
ST.isAtLeastOpenCLVer(VersionTuple(2, 0)))
917 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 1)) &&
918 ST.isAtLeastOpenCLVer(VersionTuple(2, 2)))
920 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 4)))
921 addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
922 Capability::SignedZeroInfNanPreserve,
923 Capability::RoundingModeRTE,
924 Capability::RoundingModeRTZ});
931void RequirementHandler::initAvailableCapabilitiesForVulkan(
932 const SPIRVSubtarget &ST) {
935 addAvailableCaps({Capability::Int64, Capability::Float16, Capability::Float64,
936 Capability::GroupNonUniform, Capability::Image1D,
937 Capability::SampledBuffer, Capability::ImageBuffer,
938 Capability::UniformBufferArrayDynamicIndexing,
939 Capability::SampledImageArrayDynamicIndexing,
940 Capability::StorageBufferArrayDynamicIndexing,
941 Capability::StorageImageArrayDynamicIndexing,
942 Capability::DerivativeControl});
945 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 5))) {
947 {Capability::ShaderNonUniformEXT, Capability::RuntimeDescriptorArrayEXT,
948 Capability::InputAttachmentArrayDynamicIndexingEXT,
949 Capability::UniformTexelBufferArrayDynamicIndexingEXT,
950 Capability::StorageTexelBufferArrayDynamicIndexingEXT,
951 Capability::UniformBufferArrayNonUniformIndexingEXT,
952 Capability::SampledImageArrayNonUniformIndexingEXT,
953 Capability::StorageBufferArrayNonUniformIndexingEXT,
954 Capability::StorageImageArrayNonUniformIndexingEXT,
955 Capability::InputAttachmentArrayNonUniformIndexingEXT,
956 Capability::UniformTexelBufferArrayNonUniformIndexingEXT,
957 Capability::StorageTexelBufferArrayNonUniformIndexingEXT});
961 if (
ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
963 Capability::StorageImageReadWithoutFormat});
971static void addOpDecorateReqs(
const MachineInstr &
MI,
unsigned DecIndex,
974 int64_t DecOp =
MI.getOperand(DecIndex).getImm();
975 auto Dec =
static_cast<SPIRV::Decoration::Decoration
>(DecOp);
977 SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
979 if (Dec == SPIRV::Decoration::BuiltIn) {
980 int64_t BuiltInOp =
MI.getOperand(DecIndex + 1).getImm();
981 auto BuiltIn =
static_cast<SPIRV::BuiltIn::BuiltIn
>(BuiltInOp);
983 SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
984 }
else if (Dec == SPIRV::Decoration::LinkageAttributes) {
985 int64_t LinkageOp =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
986 SPIRV::LinkageType::LinkageType LnkType =
987 static_cast<SPIRV::LinkageType::LinkageType
>(LinkageOp);
988 if (LnkType == SPIRV::LinkageType::LinkOnceODR)
989 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_linkonce_odr);
990 }
else if (Dec == SPIRV::Decoration::CacheControlLoadINTEL ||
991 Dec == SPIRV::Decoration::CacheControlStoreINTEL) {
992 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_cache_controls);
993 }
else if (Dec == SPIRV::Decoration::HostAccessINTEL) {
994 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_global_variable_host_access);
995 }
else if (Dec == SPIRV::Decoration::InitModeINTEL ||
996 Dec == SPIRV::Decoration::ImplementInRegisterMapINTEL) {
998 SPIRV::Extension::SPV_INTEL_global_variable_fpga_decorations);
999 }
else if (Dec == SPIRV::Decoration::NonUniformEXT) {
1001 }
else if (Dec == SPIRV::Decoration::FPMaxErrorDecorationINTEL) {
1003 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_fp_max_error);
1004 }
else if (Dec == SPIRV::Decoration::FPFastMathMode) {
1005 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)) {
1007 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_float_controls2);
1016 assert(
MI.getNumOperands() >= 8 &&
"Insufficient operands for OpTypeImage");
1019 int64_t ImgFormatOp =
MI.getOperand(7).getImm();
1020 auto ImgFormat =
static_cast<SPIRV::ImageFormat::ImageFormat
>(ImgFormatOp);
1024 bool IsArrayed =
MI.getOperand(4).getImm() == 1;
1025 bool IsMultisampled =
MI.getOperand(5).getImm() == 1;
1026 bool NoSampler =
MI.getOperand(6).getImm() == 2;
1029 switch (
MI.getOperand(2).getImm()) {
1030 case SPIRV::Dim::DIM_1D:
1032 : SPIRV::Capability::Sampled1D);
1034 case SPIRV::Dim::DIM_2D:
1035 if (IsMultisampled && NoSampler)
1038 case SPIRV::Dim::DIM_Cube:
1042 : SPIRV::Capability::SampledCubeArray);
1044 case SPIRV::Dim::DIM_Rect:
1046 : SPIRV::Capability::SampledRect);
1048 case SPIRV::Dim::DIM_Buffer:
1050 : SPIRV::Capability::SampledBuffer);
1052 case SPIRV::Dim::DIM_SubpassData:
1058 if (!
ST.isShader()) {
1059 if (
MI.getNumOperands() > 8 &&
1060 MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
1067static bool isBFloat16Type(
const SPIRVType *TypeDef) {
1069 TypeDef->
getOpcode() == SPIRV::OpTypeFloat &&
1075#define ATOM_FLT_REQ_EXT_MSG(ExtName) \
1076 "The atomic float instruction requires the following SPIR-V " \
1077 "extension: SPV_EXT_shader_atomic_float" ExtName
1078static void AddAtomicVectorFloatRequirements(
const MachineInstr &
MI,
1082 MI.getMF()->getRegInfo().getVRegDef(
MI.getOperand(1).getReg());
1085 if (Rank != 2 && Rank != 4)
1087 "must be a 2-component or 4 component vector");
1092 if (EltTypeDef->
getOpcode() != SPIRV::OpTypeFloat ||
1095 "The element type for the result type of an atomic vector float "
1096 "instruction must be a 16-bit floating-point scalar");
1098 if (isBFloat16Type(EltTypeDef))
1100 "The element type for the result type of an atomic vector float "
1101 "instruction cannot be a bfloat16 scalar");
1102 if (!
ST.canUseExtension(SPIRV::Extension::SPV_NV_shader_atomic_fp16_vector))
1104 "The atomic float16 vector instruction requires the following SPIR-V "
1105 "extension: SPV_NV_shader_atomic_fp16_vector");
1107 Reqs.
addExtension(SPIRV::Extension::SPV_NV_shader_atomic_fp16_vector);
1108 Reqs.
addCapability(SPIRV::Capability::AtomicFloat16VectorNV);
1115 "Expect register operand in atomic float instruction");
1116 Register TypeReg =
MI.getOperand(1).getReg();
1117 SPIRVType *TypeDef =
MI.getMF()->getRegInfo().getVRegDef(TypeReg);
1119 if (TypeDef->
getOpcode() == SPIRV::OpTypeVector)
1120 return AddAtomicVectorFloatRequirements(
MI, Reqs, ST);
1122 if (TypeDef->
getOpcode() != SPIRV::OpTypeFloat)
1124 "floating-point type scalar");
1127 unsigned Op =
MI.getOpcode();
1128 if (
Op == SPIRV::OpAtomicFAddEXT) {
1129 if (!
ST.canUseExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add))
1131 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add);
1134 if (isBFloat16Type(TypeDef)) {
1135 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1137 "The atomic bfloat16 instruction requires the following SPIR-V "
1138 "extension: SPV_INTEL_16bit_atomics",
1140 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1141 Reqs.
addCapability(SPIRV::Capability::AtomicBFloat16AddINTEL);
1143 if (!
ST.canUseExtension(
1144 SPIRV::Extension::SPV_EXT_shader_atomic_float16_add))
1146 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add);
1158 "Unexpected floating-point type width in atomic float instruction");
1161 if (!
ST.canUseExtension(
1162 SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max))
1164 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max);
1167 if (isBFloat16Type(TypeDef)) {
1168 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics))
1170 "The atomic bfloat16 instruction requires the following SPIR-V "
1171 "extension: SPV_INTEL_16bit_atomics",
1173 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_16bit_atomics);
1174 Reqs.
addCapability(SPIRV::Capability::AtomicBFloat16MinMaxINTEL);
1176 Reqs.
addCapability(SPIRV::Capability::AtomicFloat16MinMaxEXT);
1180 Reqs.
addCapability(SPIRV::Capability::AtomicFloat32MinMaxEXT);
1183 Reqs.
addCapability(SPIRV::Capability::AtomicFloat64MinMaxEXT);
1187 "Unexpected floating-point type width in atomic float instruction");
1193 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1197 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 1;
1201 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1205 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 2;
1209 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1213 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 1;
1217 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1221 return Dim == SPIRV::Dim::DIM_SubpassData && Sampled == 2;
1225 if (ImageInst->
getOpcode() != SPIRV::OpTypeImage)
1229 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 2;
1232bool isCombinedImageSampler(
MachineInstr *SampledImageInst) {
1233 if (SampledImageInst->
getOpcode() != SPIRV::OpTypeSampledImage)
1238 auto *ImageInst =
MRI.getUniqueVRegDef(ImageReg);
1239 return isSampledImage(ImageInst);
1243 for (
const auto &
MI :
MRI.reg_instructions(
Reg)) {
1244 if (
MI.getOpcode() != SPIRV::OpDecorate)
1248 if (Dec == SPIRV::Decoration::NonUniformEXT)
1266 if (
StorageClass != SPIRV::StorageClass::StorageClass::UniformConstant &&
1267 StorageClass != SPIRV::StorageClass::StorageClass::Uniform &&
1268 StorageClass != SPIRV::StorageClass::StorageClass::StorageBuffer) {
1273 hasNonUniformDecoration(
Instr.getOperand(0).getReg(),
MRI);
1275 auto FirstIndexReg =
Instr.getOperand(3).getReg();
1276 bool FirstIndexIsConstant =
1279 if (
StorageClass == SPIRV::StorageClass::StorageClass::StorageBuffer) {
1282 SPIRV::Capability::StorageBufferArrayNonUniformIndexingEXT);
1283 else if (!FirstIndexIsConstant)
1285 SPIRV::Capability::StorageBufferArrayDynamicIndexing);
1291 if (PointeeType->
getOpcode() != SPIRV::OpTypeImage &&
1292 PointeeType->
getOpcode() != SPIRV::OpTypeSampledImage &&
1293 PointeeType->
getOpcode() != SPIRV::OpTypeSampler) {
1297 if (isUniformTexelBuffer(PointeeType)) {
1300 SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
1301 else if (!FirstIndexIsConstant)
1303 SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
1304 }
else if (isInputAttachment(PointeeType)) {
1307 SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
1308 else if (!FirstIndexIsConstant)
1310 SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
1311 }
else if (isStorageTexelBuffer(PointeeType)) {
1314 SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
1315 else if (!FirstIndexIsConstant)
1317 SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
1318 }
else if (isSampledImage(PointeeType) ||
1319 isCombinedImageSampler(PointeeType) ||
1320 PointeeType->
getOpcode() == SPIRV::OpTypeSampler) {
1323 SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
1324 else if (!FirstIndexIsConstant)
1326 SPIRV::Capability::SampledImageArrayDynamicIndexing);
1327 }
else if (isStorageImage(PointeeType)) {
1330 SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
1331 else if (!FirstIndexIsConstant)
1333 SPIRV::Capability::StorageImageArrayDynamicIndexing);
1337static bool isImageTypeWithUnknownFormat(
SPIRVType *TypeInst) {
1338 if (TypeInst->
getOpcode() != SPIRV::OpTypeImage)
1347 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product))
1348 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_integer_dot_product);
1352 assert(
MI.getOperand(2).isReg() &&
"Unexpected operand in dot");
1356 assert(
Input->getOperand(1).isReg() &&
"Unexpected operand in dot input");
1360 if (TypeDef->
getOpcode() == SPIRV::OpTypeInt) {
1362 Reqs.
addCapability(SPIRV::Capability::DotProductInput4x8BitPacked);
1363 }
else if (TypeDef->
getOpcode() == SPIRV::OpTypeVector) {
1368 "Dot operand of 8-bit integer type requires 4 components");
1369 Reqs.
addCapability(SPIRV::Capability::DotProductInput4x8Bit);
1384 unsigned AddrSpace = ASOp.
getImm();
1385 if (AddrSpace != SPIRV::StorageClass::UniformConstant) {
1386 if (!
ST.canUseExtension(
1388 SPV_EXT_relaxed_printf_string_address_space)) {
1390 "required because printf uses a format string not "
1391 "in constant address space.",
1395 SPIRV::Extension::SPV_EXT_relaxed_printf_string_address_space);
1405 switch (
MI.getOpcode()) {
1406 case SPIRV::OpMemoryModel: {
1407 int64_t Addr =
MI.getOperand(0).getImm();
1410 int64_t Mem =
MI.getOperand(1).getImm();
1415 case SPIRV::OpEntryPoint: {
1416 int64_t
Exe =
MI.getOperand(0).getImm();
1421 case SPIRV::OpExecutionMode:
1422 case SPIRV::OpExecutionModeId: {
1423 int64_t
Exe =
MI.getOperand(1).getImm();
1428 case SPIRV::OpTypeMatrix:
1431 case SPIRV::OpTypeInt: {
1432 unsigned BitWidth =
MI.getOperand(1).getImm();
1441 case SPIRV::OpDot: {
1444 if (isBFloat16Type(TypeDef))
1445 Reqs.
addCapability(SPIRV::Capability::BFloat16DotProductKHR);
1448 case SPIRV::OpTypeFloat: {
1449 unsigned BitWidth =
MI.getOperand(1).getImm();
1453 if (isBFloat16Type(&
MI)) {
1454 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_bfloat16))
1456 "following SPIR-V extension: SPV_KHR_bfloat16",
1466 case SPIRV::OpTypeVector: {
1467 unsigned NumComponents =
MI.getOperand(2).getImm();
1468 if (NumComponents == 8 || NumComponents == 16)
1472 case SPIRV::OpTypePointer: {
1473 auto SC =
MI.getOperand(1).getImm();
1484 (TypeDef->
getOpcode() == SPIRV::OpTypeFloat) &&
1489 case SPIRV::OpExtInst: {
1490 if (
MI.getOperand(2).getImm() ==
1491 static_cast<int64_t
>(
1492 SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) {
1493 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info);
1496 if (
MI.getOperand(3).getImm() ==
1497 static_cast<int64_t
>(SPIRV::OpenCLExtInst::printf)) {
1498 addPrintfRequirements(
MI, Reqs, ST);
1505 case SPIRV::OpAliasDomainDeclINTEL:
1506 case SPIRV::OpAliasScopeDeclINTEL:
1507 case SPIRV::OpAliasScopeListDeclINTEL: {
1508 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing);
1509 Reqs.
addCapability(SPIRV::Capability::MemoryAccessAliasingINTEL);
1512 case SPIRV::OpBitReverse:
1513 case SPIRV::OpBitFieldInsert:
1514 case SPIRV::OpBitFieldSExtract:
1515 case SPIRV::OpBitFieldUExtract:
1516 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1520 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_bit_instructions);
1523 case SPIRV::OpTypeRuntimeArray:
1526 case SPIRV::OpTypeOpaque:
1527 case SPIRV::OpTypeEvent:
1530 case SPIRV::OpTypePipe:
1531 case SPIRV::OpTypeReserveId:
1534 case SPIRV::OpTypeDeviceEvent:
1535 case SPIRV::OpTypeQueue:
1536 case SPIRV::OpBuildNDRange:
1539 case SPIRV::OpDecorate:
1540 case SPIRV::OpDecorateId:
1541 case SPIRV::OpDecorateString:
1542 addOpDecorateReqs(
MI, 1, Reqs, ST);
1544 case SPIRV::OpMemberDecorate:
1545 case SPIRV::OpMemberDecorateString:
1546 addOpDecorateReqs(
MI, 2, Reqs, ST);
1548 case SPIRV::OpInBoundsPtrAccessChain:
1551 case SPIRV::OpConstantSampler:
1554 case SPIRV::OpInBoundsAccessChain:
1555 case SPIRV::OpAccessChain:
1556 addOpAccessChainReqs(
MI, Reqs, ST);
1558 case SPIRV::OpTypeImage:
1559 addOpTypeImageReqs(
MI, Reqs, ST);
1561 case SPIRV::OpTypeSampler:
1562 if (!
ST.isShader()) {
1566 case SPIRV::OpTypeForwardPointer:
1570 case SPIRV::OpAtomicFlagTestAndSet:
1571 case SPIRV::OpAtomicLoad:
1572 case SPIRV::OpAtomicStore:
1573 case SPIRV::OpAtomicExchange:
1574 case SPIRV::OpAtomicCompareExchange:
1575 case SPIRV::OpAtomicIIncrement:
1576 case SPIRV::OpAtomicIDecrement:
1577 case SPIRV::OpAtomicIAdd:
1578 case SPIRV::OpAtomicISub:
1579 case SPIRV::OpAtomicUMin:
1580 case SPIRV::OpAtomicUMax:
1581 case SPIRV::OpAtomicSMin:
1582 case SPIRV::OpAtomicSMax:
1583 case SPIRV::OpAtomicAnd:
1584 case SPIRV::OpAtomicOr:
1585 case SPIRV::OpAtomicXor: {
1588 if (
MI.getOpcode() == SPIRV::OpAtomicStore) {
1590 InstrPtr =
MRI.getVRegDef(
MI.getOperand(3).getReg());
1591 assert(InstrPtr &&
"Unexpected type instruction for OpAtomicStore");
1596 if (TypeDef->
getOpcode() == SPIRV::OpTypeInt) {
1603 case SPIRV::OpGroupNonUniformIAdd:
1604 case SPIRV::OpGroupNonUniformFAdd:
1605 case SPIRV::OpGroupNonUniformIMul:
1606 case SPIRV::OpGroupNonUniformFMul:
1607 case SPIRV::OpGroupNonUniformSMin:
1608 case SPIRV::OpGroupNonUniformUMin:
1609 case SPIRV::OpGroupNonUniformFMin:
1610 case SPIRV::OpGroupNonUniformSMax:
1611 case SPIRV::OpGroupNonUniformUMax:
1612 case SPIRV::OpGroupNonUniformFMax:
1613 case SPIRV::OpGroupNonUniformBitwiseAnd:
1614 case SPIRV::OpGroupNonUniformBitwiseOr:
1615 case SPIRV::OpGroupNonUniformBitwiseXor:
1616 case SPIRV::OpGroupNonUniformLogicalAnd:
1617 case SPIRV::OpGroupNonUniformLogicalOr:
1618 case SPIRV::OpGroupNonUniformLogicalXor: {
1620 int64_t GroupOp =
MI.getOperand(3).getImm();
1622 case SPIRV::GroupOperation::Reduce:
1623 case SPIRV::GroupOperation::InclusiveScan:
1624 case SPIRV::GroupOperation::ExclusiveScan:
1625 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
1627 case SPIRV::GroupOperation::ClusteredReduce:
1628 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformClustered);
1630 case SPIRV::GroupOperation::PartitionedReduceNV:
1631 case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
1632 case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
1633 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
1638 case SPIRV::OpGroupNonUniformShuffle:
1639 case SPIRV::OpGroupNonUniformShuffleXor:
1640 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffle);
1642 case SPIRV::OpGroupNonUniformShuffleUp:
1643 case SPIRV::OpGroupNonUniformShuffleDown:
1644 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
1646 case SPIRV::OpGroupAll:
1647 case SPIRV::OpGroupAny:
1648 case SPIRV::OpGroupBroadcast:
1649 case SPIRV::OpGroupIAdd:
1650 case SPIRV::OpGroupFAdd:
1651 case SPIRV::OpGroupFMin:
1652 case SPIRV::OpGroupUMin:
1653 case SPIRV::OpGroupSMin:
1654 case SPIRV::OpGroupFMax:
1655 case SPIRV::OpGroupUMax:
1656 case SPIRV::OpGroupSMax:
1659 case SPIRV::OpGroupNonUniformElect:
1662 case SPIRV::OpGroupNonUniformAll:
1663 case SPIRV::OpGroupNonUniformAny:
1664 case SPIRV::OpGroupNonUniformAllEqual:
1667 case SPIRV::OpGroupNonUniformBroadcast:
1668 case SPIRV::OpGroupNonUniformBroadcastFirst:
1669 case SPIRV::OpGroupNonUniformBallot:
1670 case SPIRV::OpGroupNonUniformInverseBallot:
1671 case SPIRV::OpGroupNonUniformBallotBitExtract:
1672 case SPIRV::OpGroupNonUniformBallotBitCount:
1673 case SPIRV::OpGroupNonUniformBallotFindLSB:
1674 case SPIRV::OpGroupNonUniformBallotFindMSB:
1675 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformBallot);
1677 case SPIRV::OpSubgroupShuffleINTEL:
1678 case SPIRV::OpSubgroupShuffleDownINTEL:
1679 case SPIRV::OpSubgroupShuffleUpINTEL:
1680 case SPIRV::OpSubgroupShuffleXorINTEL:
1681 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1682 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1683 Reqs.
addCapability(SPIRV::Capability::SubgroupShuffleINTEL);
1686 case SPIRV::OpSubgroupBlockReadINTEL:
1687 case SPIRV::OpSubgroupBlockWriteINTEL:
1688 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1689 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1690 Reqs.
addCapability(SPIRV::Capability::SubgroupBufferBlockIOINTEL);
1693 case SPIRV::OpSubgroupImageBlockReadINTEL:
1694 case SPIRV::OpSubgroupImageBlockWriteINTEL:
1695 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1696 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1697 Reqs.
addCapability(SPIRV::Capability::SubgroupImageBlockIOINTEL);
1700 case SPIRV::OpSubgroupImageMediaBlockReadINTEL:
1701 case SPIRV::OpSubgroupImageMediaBlockWriteINTEL:
1702 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1703 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_media_block_io);
1704 Reqs.
addCapability(SPIRV::Capability::SubgroupImageMediaBlockIOINTEL);
1707 case SPIRV::OpAssumeTrueKHR:
1708 case SPIRV::OpExpectKHR:
1709 if (
ST.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
1710 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_expect_assume);
1714 case SPIRV::OpPtrCastToCrossWorkgroupINTEL:
1715 case SPIRV::OpCrossWorkgroupCastToPtrINTEL:
1716 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) {
1717 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes);
1718 Reqs.
addCapability(SPIRV::Capability::USMStorageClassesINTEL);
1721 case SPIRV::OpConstantFunctionPointerINTEL:
1722 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1723 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1724 Reqs.
addCapability(SPIRV::Capability::FunctionPointersINTEL);
1727 case SPIRV::OpGroupNonUniformRotateKHR:
1728 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate))
1730 "following SPIR-V extension: SPV_KHR_subgroup_rotate",
1732 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate);
1733 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformRotateKHR);
1736 case SPIRV::OpFixedCosALTERA:
1737 case SPIRV::OpFixedSinALTERA:
1738 case SPIRV::OpFixedCosPiALTERA:
1739 case SPIRV::OpFixedSinPiALTERA:
1740 case SPIRV::OpFixedExpALTERA:
1741 case SPIRV::OpFixedLogALTERA:
1742 case SPIRV::OpFixedRecipALTERA:
1743 case SPIRV::OpFixedSqrtALTERA:
1744 case SPIRV::OpFixedSinCosALTERA:
1745 case SPIRV::OpFixedSinCosPiALTERA:
1746 case SPIRV::OpFixedRsqrtALTERA:
1747 if (!
ST.canUseExtension(
1748 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_fixed_point))
1750 "following SPIR-V extension: "
1751 "SPV_ALTERA_arbitrary_precision_fixed_point",
1754 SPIRV::Extension::SPV_ALTERA_arbitrary_precision_fixed_point);
1755 Reqs.
addCapability(SPIRV::Capability::ArbitraryPrecisionFixedPointALTERA);
1757 case SPIRV::OpGroupIMulKHR:
1758 case SPIRV::OpGroupFMulKHR:
1759 case SPIRV::OpGroupBitwiseAndKHR:
1760 case SPIRV::OpGroupBitwiseOrKHR:
1761 case SPIRV::OpGroupBitwiseXorKHR:
1762 case SPIRV::OpGroupLogicalAndKHR:
1763 case SPIRV::OpGroupLogicalOrKHR:
1764 case SPIRV::OpGroupLogicalXorKHR:
1765 if (
ST.canUseExtension(
1766 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1767 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_uniform_group_instructions);
1768 Reqs.
addCapability(SPIRV::Capability::GroupUniformArithmeticKHR);
1771 case SPIRV::OpReadClockKHR:
1772 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock))
1774 "following SPIR-V extension: SPV_KHR_shader_clock",
1776 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_shader_clock);
1779 case SPIRV::OpFunctionPointerCallINTEL:
1780 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1781 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1782 Reqs.
addCapability(SPIRV::Capability::FunctionPointersINTEL);
1785 case SPIRV::OpAtomicFAddEXT:
1786 case SPIRV::OpAtomicFMinEXT:
1787 case SPIRV::OpAtomicFMaxEXT:
1788 AddAtomicFloatRequirements(
MI, Reqs, ST);
1790 case SPIRV::OpConvertBF16ToFINTEL:
1791 case SPIRV::OpConvertFToBF16INTEL:
1792 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion)) {
1793 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion);
1794 Reqs.
addCapability(SPIRV::Capability::BFloat16ConversionINTEL);
1797 case SPIRV::OpRoundFToTF32INTEL:
1798 if (
ST.canUseExtension(
1799 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion)) {
1800 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_tensor_float32_conversion);
1801 Reqs.
addCapability(SPIRV::Capability::TensorFloat32RoundingINTEL);
1804 case SPIRV::OpVariableLengthArrayINTEL:
1805 case SPIRV::OpSaveMemoryINTEL:
1806 case SPIRV::OpRestoreMemoryINTEL:
1807 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array)) {
1808 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_variable_length_array);
1809 Reqs.
addCapability(SPIRV::Capability::VariableLengthArrayINTEL);
1812 case SPIRV::OpAsmTargetINTEL:
1813 case SPIRV::OpAsmINTEL:
1814 case SPIRV::OpAsmCallINTEL:
1815 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly)) {
1816 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_inline_assembly);
1820 case SPIRV::OpTypeCooperativeMatrixKHR: {
1821 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1823 "OpTypeCooperativeMatrixKHR type requires the "
1824 "following SPIR-V extension: SPV_KHR_cooperative_matrix",
1826 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1827 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1830 if (isBFloat16Type(TypeDef))
1831 Reqs.
addCapability(SPIRV::Capability::BFloat16CooperativeMatrixKHR);
1834 case SPIRV::OpArithmeticFenceEXT:
1835 if (!
ST.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
1837 "following SPIR-V extension: SPV_EXT_arithmetic_fence",
1839 Reqs.
addExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence);
1842 case SPIRV::OpControlBarrierArriveINTEL:
1843 case SPIRV::OpControlBarrierWaitINTEL:
1844 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
1845 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_split_barrier);
1849 case SPIRV::OpCooperativeMatrixMulAddKHR: {
1850 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1852 "following SPIR-V extension: "
1853 "SPV_KHR_cooperative_matrix",
1855 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1856 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1857 constexpr unsigned MulAddMaxSize = 6;
1858 if (
MI.getNumOperands() != MulAddMaxSize)
1860 const int64_t CoopOperands =
MI.getOperand(MulAddMaxSize - 1).getImm();
1862 SPIRV::CooperativeMatrixOperands::MatrixAAndBTF32ComponentsINTEL) {
1863 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1865 "require the following SPIR-V extension: "
1866 "SPV_INTEL_joint_matrix",
1868 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1870 SPIRV::Capability::CooperativeMatrixTF32ComponentTypeINTEL);
1873 MatrixAAndBBFloat16ComponentsINTEL ||
1875 SPIRV::CooperativeMatrixOperands::MatrixCBFloat16ComponentsINTEL ||
1877 MatrixResultBFloat16ComponentsINTEL) {
1878 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1880 "require the following SPIR-V extension: "
1881 "SPV_INTEL_joint_matrix",
1883 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1885 SPIRV::Capability::CooperativeMatrixBFloat16ComponentTypeINTEL);
1889 case SPIRV::OpCooperativeMatrixLoadKHR:
1890 case SPIRV::OpCooperativeMatrixStoreKHR:
1891 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1892 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1893 case SPIRV::OpCooperativeMatrixPrefetchINTEL: {
1894 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1896 "following SPIR-V extension: "
1897 "SPV_KHR_cooperative_matrix",
1899 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1900 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1904 std::unordered_map<unsigned, unsigned> LayoutToInstMap = {
1905 {SPIRV::OpCooperativeMatrixLoadKHR, 3},
1906 {SPIRV::OpCooperativeMatrixStoreKHR, 2},
1907 {SPIRV::OpCooperativeMatrixLoadCheckedINTEL, 5},
1908 {SPIRV::OpCooperativeMatrixStoreCheckedINTEL, 4},
1909 {SPIRV::OpCooperativeMatrixPrefetchINTEL, 4}};
1911 const auto OpCode =
MI.getOpcode();
1912 const unsigned LayoutNum = LayoutToInstMap[OpCode];
1913 Register RegLayout =
MI.getOperand(LayoutNum).getReg();
1916 if (MILayout->
getOpcode() == SPIRV::OpConstantI) {
1919 static_cast<unsigned>(SPIRV::CooperativeMatrixLayout::PackedINTEL)) {
1920 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1922 "extension: SPV_INTEL_joint_matrix",
1924 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1925 Reqs.
addCapability(SPIRV::Capability::PackedCooperativeMatrixINTEL);
1930 if (OpCode == SPIRV::OpCooperativeMatrixLoadKHR ||
1931 OpCode == SPIRV::OpCooperativeMatrixStoreKHR)
1934 std::string InstName;
1936 case SPIRV::OpCooperativeMatrixPrefetchINTEL:
1937 InstName =
"OpCooperativeMatrixPrefetchINTEL";
1939 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1940 InstName =
"OpCooperativeMatrixLoadCheckedINTEL";
1942 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1943 InstName =
"OpCooperativeMatrixStoreCheckedINTEL";
1947 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix)) {
1948 const std::string ErrorMsg =
1949 InstName +
" instruction requires the "
1950 "following SPIR-V extension: SPV_INTEL_joint_matrix";
1953 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1954 if (OpCode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
1955 Reqs.
addCapability(SPIRV::Capability::CooperativeMatrixPrefetchINTEL);
1959 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1962 case SPIRV::OpCooperativeMatrixConstructCheckedINTEL:
1963 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1965 "instructions require the following SPIR-V extension: "
1966 "SPV_INTEL_joint_matrix",
1968 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1970 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1972 case SPIRV::OpReadPipeBlockingALTERA:
1973 case SPIRV::OpWritePipeBlockingALTERA:
1974 if (
ST.canUseExtension(SPIRV::Extension::SPV_ALTERA_blocking_pipes)) {
1975 Reqs.
addExtension(SPIRV::Extension::SPV_ALTERA_blocking_pipes);
1979 case SPIRV::OpCooperativeMatrixGetElementCoordINTEL:
1980 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1982 "following SPIR-V extension: SPV_INTEL_joint_matrix",
1984 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1986 SPIRV::Capability::CooperativeMatrixInvocationInstructionsINTEL);
1988 case SPIRV::OpConvertHandleToImageINTEL:
1989 case SPIRV::OpConvertHandleToSamplerINTEL:
1990 case SPIRV::OpConvertHandleToSampledImageINTEL: {
1991 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bindless_images))
1993 "instructions require the following SPIR-V extension: "
1994 "SPV_INTEL_bindless_images",
1997 SPIRV::AddressingModel::AddressingModel AddrModel = MAI.
Addr;
1999 if (
MI.getOpcode() == SPIRV::OpConvertHandleToImageINTEL &&
2000 TyDef->
getOpcode() != SPIRV::OpTypeImage) {
2002 "OpConvertHandleToImageINTEL",
2004 }
else if (
MI.getOpcode() == SPIRV::OpConvertHandleToSamplerINTEL &&
2005 TyDef->
getOpcode() != SPIRV::OpTypeSampler) {
2007 "OpConvertHandleToSamplerINTEL",
2009 }
else if (
MI.getOpcode() == SPIRV::OpConvertHandleToSampledImageINTEL &&
2010 TyDef->
getOpcode() != SPIRV::OpTypeSampledImage) {
2012 "OpConvertHandleToSampledImageINTEL",
2017 if (!(Bitwidth == 32 && AddrModel == SPIRV::AddressingModel::Physical32) &&
2018 !(Bitwidth == 64 && AddrModel == SPIRV::AddressingModel::Physical64)) {
2020 "Parameter value must be a 32-bit scalar in case of "
2021 "Physical32 addressing model or a 64-bit scalar in case of "
2022 "Physical64 addressing model",
2025 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bindless_images);
2029 case SPIRV::OpSubgroup2DBlockLoadINTEL:
2030 case SPIRV::OpSubgroup2DBlockLoadTransposeINTEL:
2031 case SPIRV::OpSubgroup2DBlockLoadTransformINTEL:
2032 case SPIRV::OpSubgroup2DBlockPrefetchINTEL:
2033 case SPIRV::OpSubgroup2DBlockStoreINTEL: {
2034 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_2d_block_io))
2036 "Prefetch/Store]INTEL instructions require the "
2037 "following SPIR-V extension: SPV_INTEL_2d_block_io",
2039 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_2d_block_io);
2040 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockIOINTEL);
2042 const auto OpCode =
MI.getOpcode();
2043 if (OpCode == SPIRV::OpSubgroup2DBlockLoadTransposeINTEL) {
2044 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockTransposeINTEL);
2047 if (OpCode == SPIRV::OpSubgroup2DBlockLoadTransformINTEL) {
2048 Reqs.
addCapability(SPIRV::Capability::Subgroup2DBlockTransformINTEL);
2053 case SPIRV::OpKill: {
2056 case SPIRV::OpDemoteToHelperInvocation:
2057 Reqs.
addCapability(SPIRV::Capability::DemoteToHelperInvocation);
2059 if (
ST.canUseExtension(
2060 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation)) {
2063 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation);
2068 case SPIRV::OpSUDot:
2069 case SPIRV::OpSDotAccSat:
2070 case SPIRV::OpUDotAccSat:
2071 case SPIRV::OpSUDotAccSat:
2072 AddDotProductRequirements(
MI, Reqs, ST);
2074 case SPIRV::OpImageRead: {
2075 Register ImageReg =
MI.getOperand(2).getReg();
2076 SPIRVType *TypeDef =
ST.getSPIRVGlobalRegistry()->getResultType(
2084 if (isImageTypeWithUnknownFormat(TypeDef) &&
ST.isShader())
2085 Reqs.
addCapability(SPIRV::Capability::StorageImageReadWithoutFormat);
2088 case SPIRV::OpImageWrite: {
2089 Register ImageReg =
MI.getOperand(0).getReg();
2090 SPIRVType *TypeDef =
ST.getSPIRVGlobalRegistry()->getResultType(
2098 if (isImageTypeWithUnknownFormat(TypeDef) &&
ST.isShader())
2099 Reqs.
addCapability(SPIRV::Capability::StorageImageWriteWithoutFormat);
2102 case SPIRV::OpTypeStructContinuedINTEL:
2103 case SPIRV::OpConstantCompositeContinuedINTEL:
2104 case SPIRV::OpSpecConstantCompositeContinuedINTEL:
2105 case SPIRV::OpCompositeConstructContinuedINTEL: {
2106 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_long_composites))
2108 "Continued instructions require the "
2109 "following SPIR-V extension: SPV_INTEL_long_composites",
2111 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_long_composites);
2115 case SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL: {
2116 if (!
ST.canUseExtension(
2117 SPIRV::Extension::SPV_INTEL_subgroup_matrix_multiply_accumulate))
2119 "OpSubgroupMatrixMultiplyAccumulateINTEL instruction requires the "
2121 "extension: SPV_INTEL_subgroup_matrix_multiply_accumulate",
2124 SPIRV::Extension::SPV_INTEL_subgroup_matrix_multiply_accumulate);
2126 SPIRV::Capability::SubgroupMatrixMultiplyAccumulateINTEL);
2129 case SPIRV::OpBitwiseFunctionINTEL: {
2130 if (!
ST.canUseExtension(
2131 SPIRV::Extension::SPV_INTEL_ternary_bitwise_function))
2133 "OpBitwiseFunctionINTEL instruction requires the following SPIR-V "
2134 "extension: SPV_INTEL_ternary_bitwise_function",
2136 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_ternary_bitwise_function);
2137 Reqs.
addCapability(SPIRV::Capability::TernaryBitwiseFunctionINTEL);
2140 case SPIRV::OpCopyMemorySized: {
2145 case SPIRV::OpPredicatedLoadINTEL:
2146 case SPIRV::OpPredicatedStoreINTEL: {
2147 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_predicated_io))
2149 "OpPredicated[Load/Store]INTEL instructions require "
2150 "the following SPIR-V extension: SPV_INTEL_predicated_io",
2152 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_predicated_io);
2156 case SPIRV::OpFAddS:
2157 case SPIRV::OpFSubS:
2158 case SPIRV::OpFMulS:
2159 case SPIRV::OpFDivS:
2160 case SPIRV::OpFRemS:
2162 case SPIRV::OpFNegate:
2163 case SPIRV::OpFAddV:
2164 case SPIRV::OpFSubV:
2165 case SPIRV::OpFMulV:
2166 case SPIRV::OpFDivV:
2167 case SPIRV::OpFRemV:
2168 case SPIRV::OpFNegateV: {
2171 if (TypeDef->
getOpcode() == SPIRV::OpTypeVector)
2173 if (isBFloat16Type(TypeDef)) {
2174 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic))
2176 "Arithmetic instructions with bfloat16 arguments require the "
2177 "following SPIR-V extension: SPV_INTEL_bfloat16_arithmetic",
2179 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic);
2180 Reqs.
addCapability(SPIRV::Capability::BFloat16ArithmeticINTEL);
2184 case SPIRV::OpOrdered:
2185 case SPIRV::OpUnordered:
2186 case SPIRV::OpFOrdEqual:
2187 case SPIRV::OpFOrdNotEqual:
2188 case SPIRV::OpFOrdLessThan:
2189 case SPIRV::OpFOrdLessThanEqual:
2190 case SPIRV::OpFOrdGreaterThan:
2191 case SPIRV::OpFOrdGreaterThanEqual:
2192 case SPIRV::OpFUnordEqual:
2193 case SPIRV::OpFUnordNotEqual:
2194 case SPIRV::OpFUnordLessThan:
2195 case SPIRV::OpFUnordLessThanEqual:
2196 case SPIRV::OpFUnordGreaterThan:
2197 case SPIRV::OpFUnordGreaterThanEqual: {
2201 if (TypeDef->
getOpcode() == SPIRV::OpTypeVector)
2203 if (isBFloat16Type(TypeDef)) {
2204 if (!
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic))
2206 "Relational instructions with bfloat16 arguments require the "
2207 "following SPIR-V extension: SPV_INTEL_bfloat16_arithmetic",
2209 Reqs.
addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_arithmetic);
2210 Reqs.
addCapability(SPIRV::Capability::BFloat16ArithmeticINTEL);
2214 case SPIRV::OpDPdxCoarse:
2215 case SPIRV::OpDPdyCoarse: {
2228 SPIRV::Capability::Shader);
2240 addInstrRequirements(
MI, MAI, ST);
2243 auto Node = M.getNamedMetadata(
"spirv.ExecutionMode");
2245 bool RequireFloatControls =
false, RequireIntelFloatControls2 =
false,
2246 RequireKHRFloatControls2 =
false,
2248 bool HasIntelFloatControls2 =
2249 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
2250 bool HasKHRFloatControls2 =
2251 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2252 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2258 auto EM =
Const->getZExtValue();
2262 case SPIRV::ExecutionMode::DenormPreserve:
2263 case SPIRV::ExecutionMode::DenormFlushToZero:
2264 case SPIRV::ExecutionMode::RoundingModeRTE:
2265 case SPIRV::ExecutionMode::RoundingModeRTZ:
2266 RequireFloatControls = VerLower14;
2268 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2270 case SPIRV::ExecutionMode::RoundingModeRTPINTEL:
2271 case SPIRV::ExecutionMode::RoundingModeRTNINTEL:
2272 case SPIRV::ExecutionMode::FloatingPointModeALTINTEL:
2273 case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL:
2274 if (HasIntelFloatControls2) {
2275 RequireIntelFloatControls2 =
true;
2277 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2280 case SPIRV::ExecutionMode::FPFastMathDefault: {
2281 if (HasKHRFloatControls2) {
2282 RequireKHRFloatControls2 =
true;
2284 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2288 case SPIRV::ExecutionMode::ContractionOff:
2289 case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
2290 if (HasKHRFloatControls2) {
2291 RequireKHRFloatControls2 =
true;
2293 SPIRV::OperandCategory::ExecutionModeOperand,
2294 SPIRV::ExecutionMode::FPFastMathDefault, ST);
2297 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2302 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
2307 if (RequireFloatControls &&
2308 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls))
2310 if (RequireIntelFloatControls2)
2312 if (RequireKHRFloatControls2)
2316 if (
F.isDeclaration())
2318 if (
F.getMetadata(
"reqd_work_group_size"))
2320 SPIRV::OperandCategory::ExecutionModeOperand,
2321 SPIRV::ExecutionMode::LocalSize, ST);
2322 if (
F.getFnAttribute(
"hlsl.numthreads").isValid()) {
2324 SPIRV::OperandCategory::ExecutionModeOperand,
2325 SPIRV::ExecutionMode::LocalSize, ST);
2327 if (
F.getFnAttribute(
"enable-maximal-reconvergence").getValueAsBool()) {
2330 if (
F.getMetadata(
"work_group_size_hint"))
2332 SPIRV::OperandCategory::ExecutionModeOperand,
2333 SPIRV::ExecutionMode::LocalSizeHint, ST);
2334 if (
F.getMetadata(
"intel_reqd_sub_group_size"))
2336 SPIRV::OperandCategory::ExecutionModeOperand,
2337 SPIRV::ExecutionMode::SubgroupSize, ST);
2338 if (
F.getMetadata(
"max_work_group_size"))
2340 SPIRV::OperandCategory::ExecutionModeOperand,
2341 SPIRV::ExecutionMode::MaxWorkgroupSizeINTEL, ST);
2342 if (
F.getMetadata(
"vec_type_hint"))
2344 SPIRV::OperandCategory::ExecutionModeOperand,
2345 SPIRV::ExecutionMode::VecTypeHint, ST);
2347 if (
F.hasOptNone()) {
2348 if (
ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
2351 }
else if (
ST.canUseExtension(SPIRV::Extension::SPV_EXT_optnone)) {
2361 unsigned Flags = SPIRV::FPFastMathMode::None;
2362 bool CanUseKHRFloatControls2 =
2363 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2365 Flags |= SPIRV::FPFastMathMode::NotNaN;
2367 Flags |= SPIRV::FPFastMathMode::NotInf;
2369 Flags |= SPIRV::FPFastMathMode::NSZ;
2371 Flags |= SPIRV::FPFastMathMode::AllowRecip;
2373 Flags |= SPIRV::FPFastMathMode::AllowContract;
2375 if (CanUseKHRFloatControls2)
2383 Flags |= SPIRV::FPFastMathMode::NotNaN | SPIRV::FPFastMathMode::NotInf |
2384 SPIRV::FPFastMathMode::NSZ | SPIRV::FPFastMathMode::AllowRecip |
2385 SPIRV::FPFastMathMode::AllowTransform |
2386 SPIRV::FPFastMathMode::AllowReassoc |
2387 SPIRV::FPFastMathMode::AllowContract;
2389 Flags |= SPIRV::FPFastMathMode::Fast;
2392 if (CanUseKHRFloatControls2) {
2394 assert(!(Flags & SPIRV::FPFastMathMode::Fast) &&
2395 "SPIRV::FPFastMathMode::Fast is deprecated and should not be used "
2400 assert((!(Flags & SPIRV::FPFastMathMode::AllowTransform) ||
2401 ((Flags & SPIRV::FPFastMathMode::AllowReassoc &&
2402 Flags & SPIRV::FPFastMathMode::AllowContract))) &&
2403 "SPIRV::FPFastMathMode::AllowTransform requires AllowReassoc and "
2404 "AllowContract flags to be enabled as well.");
2415 return ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2);
2418static void handleMIFlagDecoration(
2423 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
2424 SPIRV::Decoration::NoSignedWrap, ST, Reqs)
2427 SPIRV::Decoration::NoSignedWrap, {});
2430 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
2431 SPIRV::Decoration::NoUnsignedWrap, ST,
2435 SPIRV::Decoration::NoUnsignedWrap, {});
2437 if (!
TII.canUseFastMathFlags(
2438 I,
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2)))
2441 unsigned FMFlags = getFastMathFlags(
I, ST);
2442 if (FMFlags == SPIRV::FPFastMathMode::None) {
2445 if (FPFastMathDefaultInfoVec.
empty())
2461 assert(
I.getNumOperands() >= 3 &&
"Expected at least 3 operands");
2462 Register ResReg =
I.getOpcode() == SPIRV::OpExtInst
2463 ?
I.getOperand(1).getReg()
2464 :
I.getOperand(2).getReg();
2472 if (Ty == Elem.Ty) {
2473 FMFlags = Elem.FastMathFlags;
2474 Emit = Elem.ContractionOff || Elem.SignedZeroInfNanPreserve ||
2475 Elem.FPFastMathDefault;
2480 if (FMFlags == SPIRV::FPFastMathMode::None && !Emit)
2483 if (isFastMathModeAvailable(ST)) {
2484 Register DstReg =
I.getOperand(0).getReg();
2500 for (
auto &
MBB : *MF)
2501 for (
auto &
MI :
MBB)
2502 handleMIFlagDecoration(
MI, ST,
TII, MAI.
Reqs, GR,
2515 for (
auto &
MBB : *MF) {
2516 if (!
MBB.hasName() ||
MBB.empty())
2520 MRI.setRegClass(
Reg, &SPIRV::IDRegClass);
2535 for (
auto &
MBB : *MF) {
2537 MI.setDesc(
TII.get(SPIRV::OpPhi));
2540 MI.insert(
MI.operands_begin() + 1,
2541 {MachineOperand::CreateReg(ResTypeReg, false)});
2560 SPIRV::FPFastMathMode::None);
2562 SPIRV::FPFastMathMode::None);
2564 SPIRV::FPFastMathMode::None);
2571 size_t BitWidth = Ty->getScalarSizeInBits();
2575 assert(Index >= 0 && Index < 3 &&
2576 "Expected FPFastMathDefaultInfo for half, float, or double");
2577 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2578 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2579 return FPFastMathDefaultInfoVec[Index];
2582static void collectFPFastMathDefaults(
const Module &M,
2585 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2))
2594 auto Node = M.getNamedMetadata(
"spirv.ExecutionMode");
2598 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
2607 if (EM == SPIRV::ExecutionMode::FPFastMathDefault) {
2609 "Expected 4 operands for FPFastMathDefault");
2620 Info.FastMathFlags = Flags;
2621 Info.FPFastMathDefault =
true;
2622 }
else if (EM == SPIRV::ExecutionMode::ContractionOff) {
2624 "Expected no operands for ContractionOff");
2631 Info.ContractionOff =
true;
2633 }
else if (EM == SPIRV::ExecutionMode::SignedZeroInfNanPreserve) {
2635 "Expected 1 operand for SignedZeroInfNanPreserve");
2636 unsigned TargetWidth =
2645 assert(Index >= 0 && Index < 3 &&
2646 "Expected FPFastMathDefaultInfo for half, float, or double");
2647 assert(FPFastMathDefaultInfoVec.
size() == 3 &&
2648 "Expected FPFastMathDefaultInfoVec to have exactly 3 elements");
2649 FPFastMathDefaultInfoVec[Index].SignedZeroInfNanPreserve =
true;