31#define DEBUG_TYPE "spirv-module-analysis"
35 cl::desc(
"Dump MIR with SPIR-V dependencies info"),
50 if (MdNode && OpIndex < MdNode->getNumOperands()) {
51 const auto &
Op = MdNode->getOperand(
OpIndex);
52 return mdconst::extract<ConstantInt>(
Op)->getZExtValue();
58getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
63 unsigned TargetVer =
ST.getSPIRVVersion();
64 bool MinVerOK = !ReqMinVer || !TargetVer || TargetVer >= ReqMinVer;
65 bool MaxVerOK = !ReqMaxVer || !TargetVer || TargetVer <= ReqMaxVer;
68 if (ReqCaps.
empty()) {
69 if (ReqExts.
empty()) {
70 if (MinVerOK && MaxVerOK)
71 return {
true, {}, {}, ReqMinVer, ReqMaxVer};
72 return {
false, {}, {}, 0, 0};
74 }
else if (MinVerOK && MaxVerOK) {
75 for (
auto Cap : ReqCaps) {
77 return {
true, {Cap}, {}, ReqMinVer, ReqMaxVer};
83 if (
llvm::all_of(ReqExts, [&ST](
const SPIRV::Extension::Extension &Ext) {
84 return ST.canUseExtension(Ext);
86 return {
true, {}, ReqExts, 0, 0};
88 return {
false, {}, {}, 0, 0};
91void SPIRVModuleAnalysis::setBaseInfo(
const Module &M) {
104 if (
auto MemModel =
M.getNamedMetadata(
"spirv.MemoryModel")) {
105 auto MemMD = MemModel->getOperand(0);
106 MAI.
Addr =
static_cast<SPIRV::AddressingModel::AddressingModel
>(
107 getMetadataUInt(MemMD, 0));
109 static_cast<SPIRV::MemoryModel::MemoryModel
>(getMetadataUInt(MemMD, 1));
112 MAI.
Mem =
ST->isOpenCLEnv() ? SPIRV::MemoryModel::OpenCL
113 : SPIRV::MemoryModel::GLSL450;
114 if (
MAI.
Mem == SPIRV::MemoryModel::OpenCL) {
115 unsigned PtrSize =
ST->getPointerSize();
116 MAI.
Addr = PtrSize == 32 ? SPIRV::AddressingModel::Physical32
117 : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
118 : SPIRV::AddressingModel::Logical;
121 MAI.
Addr = SPIRV::AddressingModel::Logical;
126 if (
auto VerNode =
M.getNamedMetadata(
"opencl.ocl.version")) {
127 MAI.
SrcLang = SPIRV::SourceLanguage::OpenCL_C;
130 assert(VerNode->getNumOperands() > 0 &&
"Invalid SPIR");
131 auto VersionMD = VerNode->getOperand(0);
132 unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
133 unsigned MinorNum = getMetadataUInt(VersionMD, 1);
134 unsigned RevNum = getMetadataUInt(VersionMD, 2);
141 if (
auto ExtNode =
M.getNamedMetadata(
"opencl.used.extensions")) {
142 for (
unsigned I = 0,
E = ExtNode->getNumOperands();
I !=
E; ++
I) {
160 if (
ST->isOpenCLEnv()) {
163 SPIRV::InstructionSet::OpenCL_std)] =
172 bool DoInsert =
true) {
175 assert(
MI &&
"There should be an instruction that defines the register");
181void SPIRVModuleAnalysis::collectGlobalEntities(
182 const std::vector<SPIRV::DTSortableEntry *> &DepsGraph,
185 bool UsePreOrder =
false) {
187 for (
const auto *
E : DepsGraph) {
191 RecHoistUtil = [MSType, UsePreOrder, &Visited, &Pred,
193 if (Visited.count(
E) || !Pred(
E))
202 for (
auto *S :
E->getDeps())
213 collectDefInstr(Reg, MF, &
MAI, MSType, IsFirst);
220 for (
auto *S :
E->getDeps())
231void SPIRVModuleAnalysis::processDefInstrs(
const Module &M) {
232 std::vector<SPIRV::DTSortableEntry *> DepsGraph;
236 collectGlobalEntities(
240 for (
auto F =
M.begin(),
E =
M.end();
F !=
E; ++
F) {
247 if (
MI.getOpcode() == SPIRV::OpExtension) {
249 auto Ext = SPIRV::Extension::Extension(
MI.getOperand(0).getImm());
252 }
else if (
MI.getOpcode() == SPIRV::OpCapability) {
253 auto Cap = SPIRV::Capability::Capability(
MI.getOperand(0).getImm());
261 collectGlobalEntities(
272 unsigned StartOpIndex = 0) {
273 for (
const auto *
B : MAI.
MS[MSType]) {
274 const unsigned NumAOps =
A.getNumOperands();
275 if (NumAOps !=
B->getNumOperands() ||
A.getNumDefs() !=
B->getNumDefs())
277 bool AllOpsMatch =
true;
278 for (
unsigned i = StartOpIndex; i < NumAOps && AllOpsMatch; ++i) {
279 if (
A.getOperand(i).isReg() &&
B->getOperand(i).isReg()) {
280 Register RegA =
A.getOperand(i).getReg();
281 Register RegB =
B->getOperand(i).getReg();
285 AllOpsMatch =
A.getOperand(i).isIdenticalTo(
B->getOperand(i));
300 if (
MI.getOpcode() == SPIRV::OpDecorate) {
302 auto Dec =
MI.getOperand(1).getImm();
303 if (Dec ==
static_cast<unsigned>(SPIRV::Decoration::LinkageAttributes)) {
304 auto Lnk =
MI.getOperand(
MI.getNumOperands() - 1).getImm();
305 if (Lnk ==
static_cast<unsigned>(SPIRV::LinkageType::Import)) {
313 }
else if (
MI.getOpcode() == SPIRV::OpFunction) {
327 bool Append =
true) {
329 if (findSameInstrInMS(
MI, MSType, MAI))
340void SPIRVModuleAnalysis::processOtherInstrs(
const Module &M) {
341 for (
auto F =
M.begin(),
E =
M.end();
F !=
E; ++
F) {
342 if ((*F).isDeclaration())
350 const unsigned OpCode =
MI.getOpcode();
351 if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
353 }
else if (OpCode == SPIRV::OpEntryPoint) {
357 collectFuncNames(
MI, &*
F);
362 }
else if (OpCode == SPIRV::OpFunction) {
363 collectFuncNames(
MI, &*
F);
364 }
else if (OpCode == SPIRV::OpTypeForwardPointer) {
374void SPIRVModuleAnalysis::numberRegistersGlobally(
const Module &M) {
375 for (
auto F =
M.begin(),
E =
M.end();
F !=
E; ++
F) {
376 if ((*F).isDeclaration())
391 if (
MI.getOpcode() != SPIRV::OpExtInst)
393 auto Set =
MI.getOperand(2).getImm();
403 SPIRV::OperandCategory::OperandCategory Category,
uint32_t i,
405 addRequirements(getSymbolicOperandRequirements(Category, i, ST, *
this));
408void SPIRV::RequirementHandler::pruneCapabilities(
410 for (
const auto &Cap : ToPrune) {
412 auto FoundIndex = std::find(MinimalCaps.begin(), MinimalCaps.end(), Cap);
413 if (FoundIndex != MinimalCaps.end())
414 MinimalCaps.erase(FoundIndex);
417 pruneCapabilities(ImplicitDecls);
422 for (
const auto &Cap : ToAdd) {
423 bool IsNewlyInserted = AllCaps.insert(Cap).second;
424 if (!IsNewlyInserted)
428 pruneCapabilities(ImplicitDecls);
429 MinimalCaps.push_back(Cap);
438 if (Req.
Cap.has_value())
439 addCapabilities({Req.
Cap.value()});
441 addExtensions(Req.
Exts);
444 if (MaxVersion && Req.
MinVer > MaxVersion) {
446 <<
" and <= " << MaxVersion <<
"\n");
450 if (MinVersion == 0 || Req.
MinVer > MinVersion)
455 if (MinVersion && Req.
MaxVer < MinVersion) {
457 <<
" and >= " << MinVersion <<
"\n");
461 if (MaxVersion == 0 || Req.
MaxVer < MaxVersion)
469 bool IsSatisfiable =
true;
470 auto TargetVer =
ST.getSPIRVVersion();
472 if (MaxVersion && TargetVer && MaxVersion < TargetVer) {
474 dbgs() <<
"Target SPIR-V version too high for required features\n"
475 <<
"Required max version: " << MaxVersion <<
" target version "
476 << TargetVer <<
"\n");
477 IsSatisfiable =
false;
480 if (MinVersion && TargetVer && MinVersion > TargetVer) {
481 LLVM_DEBUG(
dbgs() <<
"Target SPIR-V version too low for required features\n"
482 <<
"Required min version: " << MinVersion
483 <<
" target version " << TargetVer <<
"\n");
484 IsSatisfiable =
false;
487 if (MinVersion && MaxVersion && MinVersion > MaxVersion) {
490 <<
"Version is too low for some features and too high for others.\n"
491 <<
"Required SPIR-V min version: " << MinVersion
492 <<
" required SPIR-V max version " << MaxVersion <<
"\n");
493 IsSatisfiable =
false;
496 for (
auto Cap : MinimalCaps) {
497 if (AvailableCaps.contains(Cap))
501 OperandCategory::CapabilityOperand, Cap)
503 IsSatisfiable =
false;
506 for (
auto Ext : AllExtensions) {
507 if (
ST.canUseExtension(Ext))
511 OperandCategory::ExtensionOperand, Ext)
513 IsSatisfiable =
false;
522 for (
const auto Cap : ToAdd)
523 if (AvailableCaps.insert(Cap).second)
525 SPIRV::OperandCategory::CapabilityOperand, Cap));
529 const Capability::Capability
ToRemove,
530 const Capability::Capability IfPresent) {
531 if (AvailableCaps.contains(IfPresent))
537void RequirementHandler::initAvailableCapabilities(
const SPIRVSubtarget &ST) {
538 if (
ST.isOpenCLEnv()) {
539 initAvailableCapabilitiesForOpenCL(ST);
543 if (
ST.isVulkanEnv()) {
544 initAvailableCapabilitiesForVulkan(ST);
551void RequirementHandler::initAvailableCapabilitiesForOpenCL(
554 addAvailableCaps({Capability::Addresses, Capability::Float16Buffer,
555 Capability::Int16, Capability::Int8, Capability::Kernel,
556 Capability::Linkage, Capability::Vector16,
557 Capability::Groups, Capability::GenericPointer,
558 Capability::Shader});
559 if (
ST.hasOpenCLFullProfile())
560 addAvailableCaps({Capability::Int64, Capability::Int64Atomics});
561 if (
ST.hasOpenCLImageSupport()) {
562 addAvailableCaps({Capability::ImageBasic, Capability::LiteralSampler,
563 Capability::Image1D, Capability::SampledBuffer,
564 Capability::ImageBuffer});
565 if (
ST.isAtLeastOpenCLVer(20))
566 addAvailableCaps({Capability::ImageReadWrite});
568 if (
ST.isAtLeastSPIRVVer(11) &&
ST.isAtLeastOpenCLVer(22))
569 addAvailableCaps({Capability::SubgroupDispatch, Capability::PipeStorage});
570 if (
ST.isAtLeastSPIRVVer(13))
571 addAvailableCaps({Capability::GroupNonUniform,
572 Capability::GroupNonUniformVote,
573 Capability::GroupNonUniformArithmetic,
574 Capability::GroupNonUniformBallot,
575 Capability::GroupNonUniformClustered,
576 Capability::GroupNonUniformShuffle,
577 Capability::GroupNonUniformShuffleRelative});
578 if (
ST.isAtLeastSPIRVVer(14))
579 addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
580 Capability::SignedZeroInfNanPreserve,
581 Capability::RoundingModeRTE,
582 Capability::RoundingModeRTZ});
584 addAvailableCaps({Capability::Float16, Capability::Float64});
587 for (
auto Extension :
ST.getAllAvailableExtensions()) {
590 addAvailableCaps(EnabledCapabilities);
596void RequirementHandler::initAvailableCapabilitiesForVulkan(
598 addAvailableCaps({Capability::Shader, Capability::Linkage});
601 addAvailableCaps({Capability::Int16, Capability::Int64, Capability::Float64});
609static void addOpDecorateReqs(
const MachineInstr &
MI,
unsigned DecIndex,
612 int64_t DecOp =
MI.getOperand(DecIndex).getImm();
613 auto Dec =
static_cast<SPIRV::Decoration::Decoration
>(DecOp);
615 SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
617 if (Dec == SPIRV::Decoration::BuiltIn) {
618 int64_t BuiltInOp =
MI.getOperand(DecIndex + 1).getImm();
619 auto BuiltIn =
static_cast<SPIRV::BuiltIn::BuiltIn
>(BuiltInOp);
621 SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
629 assert(
MI.getNumOperands() >= 8 &&
"Insufficient operands for OpTypeImage");
632 int64_t ImgFormatOp =
MI.getOperand(7).getImm();
633 auto ImgFormat =
static_cast<SPIRV::ImageFormat::ImageFormat
>(ImgFormatOp);
637 bool IsArrayed =
MI.getOperand(4).getImm() == 1;
638 bool IsMultisampled =
MI.getOperand(5).getImm() == 1;
639 bool NoSampler =
MI.getOperand(6).getImm() == 2;
642 switch (
MI.getOperand(2).getImm()) {
643 case SPIRV::Dim::DIM_1D:
645 : SPIRV::Capability::Sampled1D);
647 case SPIRV::Dim::DIM_2D:
648 if (IsMultisampled && NoSampler)
651 case SPIRV::Dim::DIM_Cube:
655 : SPIRV::Capability::SampledCubeArray);
657 case SPIRV::Dim::DIM_Rect:
659 : SPIRV::Capability::SampledRect);
661 case SPIRV::Dim::DIM_Buffer:
663 : SPIRV::Capability::SampledBuffer);
665 case SPIRV::Dim::DIM_SubpassData:
672 if (
MI.getNumOperands() > 8 &&
673 MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
682 switch (
MI.getOpcode()) {
683 case SPIRV::OpMemoryModel: {
684 int64_t
Addr =
MI.getOperand(0).getImm();
687 int64_t Mem =
MI.getOperand(1).getImm();
692 case SPIRV::OpEntryPoint: {
693 int64_t
Exe =
MI.getOperand(0).getImm();
698 case SPIRV::OpExecutionMode:
699 case SPIRV::OpExecutionModeId: {
700 int64_t
Exe =
MI.getOperand(1).getImm();
705 case SPIRV::OpTypeMatrix:
708 case SPIRV::OpTypeInt: {
709 unsigned BitWidth =
MI.getOperand(1).getImm();
718 case SPIRV::OpTypeFloat: {
719 unsigned BitWidth =
MI.getOperand(1).getImm();
726 case SPIRV::OpTypeVector: {
727 unsigned NumComponents =
MI.getOperand(2).getImm();
728 if (NumComponents == 8 || NumComponents == 16)
732 case SPIRV::OpTypePointer: {
733 auto SC =
MI.getOperand(1).getImm();
740 if (TypeDef->
getOpcode() == SPIRV::OpTypeFloat &&
745 case SPIRV::OpBitReverse:
746 case SPIRV::OpBitFieldInsert:
747 case SPIRV::OpBitFieldSExtract:
748 case SPIRV::OpBitFieldUExtract:
749 if (!
ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
753 Reqs.
addExtension(SPIRV::Extension::SPV_KHR_bit_instructions);
756 case SPIRV::OpTypeRuntimeArray:
759 case SPIRV::OpTypeOpaque:
760 case SPIRV::OpTypeEvent:
763 case SPIRV::OpTypePipe:
764 case SPIRV::OpTypeReserveId:
767 case SPIRV::OpTypeDeviceEvent:
768 case SPIRV::OpTypeQueue:
769 case SPIRV::OpBuildNDRange:
772 case SPIRV::OpDecorate:
773 case SPIRV::OpDecorateId:
774 case SPIRV::OpDecorateString:
775 addOpDecorateReqs(
MI, 1, Reqs, ST);
777 case SPIRV::OpMemberDecorate:
778 case SPIRV::OpMemberDecorateString:
779 addOpDecorateReqs(
MI, 2, Reqs, ST);
781 case SPIRV::OpInBoundsPtrAccessChain:
784 case SPIRV::OpConstantSampler:
787 case SPIRV::OpTypeImage:
788 addOpTypeImageReqs(
MI, Reqs, ST);
790 case SPIRV::OpTypeSampler:
793 case SPIRV::OpTypeForwardPointer:
797 case SPIRV::OpAtomicFlagTestAndSet:
798 case SPIRV::OpAtomicLoad:
799 case SPIRV::OpAtomicStore:
800 case SPIRV::OpAtomicExchange:
801 case SPIRV::OpAtomicCompareExchange:
802 case SPIRV::OpAtomicIIncrement:
803 case SPIRV::OpAtomicIDecrement:
804 case SPIRV::OpAtomicIAdd:
805 case SPIRV::OpAtomicISub:
806 case SPIRV::OpAtomicUMin:
807 case SPIRV::OpAtomicUMax:
808 case SPIRV::OpAtomicSMin:
809 case SPIRV::OpAtomicSMax:
810 case SPIRV::OpAtomicAnd:
811 case SPIRV::OpAtomicOr:
812 case SPIRV::OpAtomicXor: {
815 if (
MI.getOpcode() == SPIRV::OpAtomicStore) {
817 InstrPtr =
MRI.getVRegDef(
MI.getOperand(3).getReg());
818 assert(InstrPtr &&
"Unexpected type instruction for OpAtomicStore");
823 if (TypeDef->
getOpcode() == SPIRV::OpTypeInt) {
830 case SPIRV::OpGroupNonUniformIAdd:
831 case SPIRV::OpGroupNonUniformFAdd:
832 case SPIRV::OpGroupNonUniformIMul:
833 case SPIRV::OpGroupNonUniformFMul:
834 case SPIRV::OpGroupNonUniformSMin:
835 case SPIRV::OpGroupNonUniformUMin:
836 case SPIRV::OpGroupNonUniformFMin:
837 case SPIRV::OpGroupNonUniformSMax:
838 case SPIRV::OpGroupNonUniformUMax:
839 case SPIRV::OpGroupNonUniformFMax:
840 case SPIRV::OpGroupNonUniformBitwiseAnd:
841 case SPIRV::OpGroupNonUniformBitwiseOr:
842 case SPIRV::OpGroupNonUniformBitwiseXor:
843 case SPIRV::OpGroupNonUniformLogicalAnd:
844 case SPIRV::OpGroupNonUniformLogicalOr:
845 case SPIRV::OpGroupNonUniformLogicalXor: {
847 int64_t GroupOp =
MI.getOperand(3).getImm();
849 case SPIRV::GroupOperation::Reduce:
850 case SPIRV::GroupOperation::InclusiveScan:
851 case SPIRV::GroupOperation::ExclusiveScan:
853 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
854 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformBallot);
856 case SPIRV::GroupOperation::ClusteredReduce:
857 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformClustered);
859 case SPIRV::GroupOperation::PartitionedReduceNV:
860 case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
861 case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
862 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
867 case SPIRV::OpGroupNonUniformShuffle:
868 case SPIRV::OpGroupNonUniformShuffleXor:
869 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffle);
871 case SPIRV::OpGroupNonUniformShuffleUp:
872 case SPIRV::OpGroupNonUniformShuffleDown:
873 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
875 case SPIRV::OpGroupAll:
876 case SPIRV::OpGroupAny:
877 case SPIRV::OpGroupBroadcast:
878 case SPIRV::OpGroupIAdd:
879 case SPIRV::OpGroupFAdd:
880 case SPIRV::OpGroupFMin:
881 case SPIRV::OpGroupUMin:
882 case SPIRV::OpGroupSMin:
883 case SPIRV::OpGroupFMax:
884 case SPIRV::OpGroupUMax:
885 case SPIRV::OpGroupSMax:
888 case SPIRV::OpGroupNonUniformElect:
891 case SPIRV::OpGroupNonUniformAll:
892 case SPIRV::OpGroupNonUniformAny:
893 case SPIRV::OpGroupNonUniformAllEqual:
896 case SPIRV::OpGroupNonUniformBroadcast:
897 case SPIRV::OpGroupNonUniformBroadcastFirst:
898 case SPIRV::OpGroupNonUniformBallot:
899 case SPIRV::OpGroupNonUniformInverseBallot:
900 case SPIRV::OpGroupNonUniformBallotBitExtract:
901 case SPIRV::OpGroupNonUniformBallotBitCount:
902 case SPIRV::OpGroupNonUniformBallotFindLSB:
903 case SPIRV::OpGroupNonUniformBallotFindMSB:
904 Reqs.
addCapability(SPIRV::Capability::GroupNonUniformBallot);
914 SPIRV::Capability::Shader);
920 for (
auto F =
M.begin(),
E =
M.end();
F !=
E; ++
F) {
926 addInstrRequirements(
MI, MAI.
Reqs, ST);
929 auto Node =
M.getNamedMetadata(
"spirv.ExecutionMode");
931 for (
unsigned i = 0; i <
Node->getNumOperands(); i++) {
932 MDNode *MDN = cast<MDNode>(
Node->getOperand(i));
934 if (
auto *CMeta = dyn_cast<ConstantAsMetadata>(MDOp)) {
937 auto EM =
Const->getZExtValue();
939 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
944 for (
auto FI =
M.begin(),
E =
M.end(); FI !=
E; ++FI) {
946 if (
F.isDeclaration())
948 if (
F.getMetadata(
"reqd_work_group_size"))
950 SPIRV::OperandCategory::ExecutionModeOperand,
951 SPIRV::ExecutionMode::LocalSize, ST);
952 if (
F.getFnAttribute(
"hlsl.numthreads").isValid()) {
954 SPIRV::OperandCategory::ExecutionModeOperand,
955 SPIRV::ExecutionMode::LocalSize, ST);
957 if (
F.getMetadata(
"work_group_size_hint"))
959 SPIRV::OperandCategory::ExecutionModeOperand,
960 SPIRV::ExecutionMode::LocalSizeHint, ST);
961 if (
F.getMetadata(
"intel_reqd_sub_group_size"))
963 SPIRV::OperandCategory::ExecutionModeOperand,
964 SPIRV::ExecutionMode::SubgroupSize, ST);
965 if (
F.getMetadata(
"vec_type_hint"))
967 SPIRV::OperandCategory::ExecutionModeOperand,
968 SPIRV::ExecutionMode::VecTypeHint, ST);
970 if (
F.hasOptNone() &&
971 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
980 unsigned Flags = SPIRV::FPFastMathMode::None;
982 Flags |= SPIRV::FPFastMathMode::NotNaN;
984 Flags |= SPIRV::FPFastMathMode::NotInf;
986 Flags |= SPIRV::FPFastMathMode::NSZ;
988 Flags |= SPIRV::FPFastMathMode::AllowRecip;
990 Flags |= SPIRV::FPFastMathMode::Fast;
998 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
999 SPIRV::Decoration::NoSignedWrap, ST, Reqs)
1002 SPIRV::Decoration::NoSignedWrap, {});
1005 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
1006 SPIRV::Decoration::NoUnsignedWrap, ST,
1010 SPIRV::Decoration::NoUnsignedWrap, {});
1012 if (!
TII.canUseFastMathFlags(
I))
1014 unsigned FMFlags = getFastMathFlags(
I);
1015 if (FMFlags == SPIRV::FPFastMathMode::None)
1017 Register DstReg =
I.getOperand(0).getReg();
1025 for (
auto F =
M.begin(),
E =
M.end();
F !=
E; ++
F) {
1029 for (
auto &
MBB : *MF)
1030 for (
auto &
MI :
MBB)
1031 handleMIFlagDecoration(
MI, ST,
TII, MAI.
Reqs);
1045 ST =
TM.getSubtargetImpl();
1046 GR = ST->getSPIRVGlobalRegistry();
1047 TII = ST->getInstrInfo();
1049 MMI = &getAnalysis<MachineModuleInfoWrapperPass>().getMMI();
1053 addDecorations(M, *
TII, MMI, *ST, MAI);
1055 collectReqs(M, MAI, MMI, *ST);
1059 processDefInstrs(M);
1062 numberRegistersGlobally(M);
1065 processOtherInstrs(M);
unsigned const MachineRegisterInfo * MRI
ReachingDefAnalysis InstSet & ToRemove
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
const HexagonInstrInfo * TII
const char LLVMTargetMachineRef TM
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static cl::opt< bool > SPVDumpDeps("spv-dump-deps", cl::desc("Dump MIR with SPIR-V dependencies info"), cl::Optional, cl::init(false))
unsigned unsigned DefaultVal
Target-Independent Code Generator Pass Configuration Options pass.
This is the shared class of boolean and integer constants.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
Implements a dense probed hash-table based set.
const MDOperand & getOperand(unsigned I) const
unsigned getNumOperands() const
Return number of MDNode operands.
Tracking metadata reference owned by Metadata.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
This class contains meta information specific to a module.
MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
constexpr bool isValid() const
void buildDepsGraph(std::vector< SPIRV::DTSortableEntry * > &Graph, MachineModuleInfo *MMI=nullptr)
bool isConstantInstr(const MachineInstr &MI) const
bool isDecorationInstr(const MachineInstr &MI) const
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
std::pair< typename Base::iterator, bool > insert(StringRef key)
Target-Independent Code Generator Pass Configuration Options.
Target - Wrapper for Target specific information.
std::pair< iterator, bool > insert(const ValueT &V)
@ C
The default llvm calling convention, compatible with C.
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
ExtensionList getSymbolicOperandExtensions(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
CapabilityList getSymbolicOperandCapabilities(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
void initializeSPIRVModuleAnalysisPass(PassRegistry &)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
CapabilityList getCapabilitiesEnabledByExtension(SPIRV::Extension::Extension Extension)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
std::string getSymbolicOperandMnemonic(SPIRV::OperandCategory::OperandCategory Category, int32_t Value)
uint32_t getSymbolicOperandMinVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
constexpr unsigned BitWidth
uint32_t getSymbolicOperandMaxVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
static struct SPIRV::ModuleAnalysisInfo MAI
bool runOnModule(Module &M) override
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Register getRegisterAlias(const MachineFunction *MF, Register Reg)
SmallVector< MachineInstr *, 4 > GlobalVarList
DenseMap< const Function *, Register > FuncMap
void setRegisterAlias(const MachineFunction *MF, Register Reg, Register AliasReg)
bool hasRegisterAlias(const MachineFunction *MF, Register Reg)
RegisterAliasMapTy RegisterAliasTable
bool getSkipEmission(const MachineInstr *MI)
MemoryModel::MemoryModel Mem
InstrList MS[NUM_MODULE_SECTIONS]
AddressingModel::AddressingModel Addr
void setSkipEmission(MachineInstr *MI)
SourceLanguage::SourceLanguage SrcLang
DenseSet< MachineInstr * > InstrsToDelete
DenseMap< unsigned, Register > ExtInstSetMap
void addCapabilities(const CapabilityList &ToAdd)
bool isCapabilityAvailable(Capability::Capability Cap) const
void checkSatisfiable(const SPIRVSubtarget &ST) const
void getAndAddRequirements(SPIRV::OperandCategory::OperandCategory Category, uint32_t i, const SPIRVSubtarget &ST)
void addExtension(Extension::Extension ToAdd)
void initAvailableCapabilities(const SPIRVSubtarget &ST)
void removeCapabilityIf(const Capability::Capability ToRemove, const Capability::Capability IfPresent)
void addCapability(Capability::Capability ToAdd)
void addAvailableCaps(const CapabilityList &ToAdd)
void addRequirements(const Requirements &Req)
const std::optional< Capability::Capability > Cap