LLVM 20.0.0git
SPIRVModuleAnalysis.cpp
Go to the documentation of this file.
1//===- SPIRVModuleAnalysis.cpp - analysis of global instrs & regs - C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The analysis collects instructions that should be output at the module level
10// and performs the global register numbering.
11//
12// The results of this analysis are used in AsmPrinter to rename registers
13// globally and to output required instructions at the module level.
14//
15//===----------------------------------------------------------------------===//
16
17#include "SPIRVModuleAnalysis.h"
20#include "SPIRV.h"
21#include "SPIRVSubtarget.h"
22#include "SPIRVTargetMachine.h"
23#include "SPIRVUtils.h"
24#include "llvm/ADT/STLExtras.h"
27
28using namespace llvm;
29
30#define DEBUG_TYPE "spirv-module-analysis"
31
32static cl::opt<bool>
33 SPVDumpDeps("spv-dump-deps",
34 cl::desc("Dump MIR with SPIR-V dependencies info"),
35 cl::Optional, cl::init(false));
36
38 AvoidCapabilities("avoid-spirv-capabilities",
39 cl::desc("SPIR-V capabilities to avoid if there are "
40 "other options enabling a feature"),
42 cl::values(clEnumValN(SPIRV::Capability::Shader, "Shader",
43 "SPIR-V Shader capability")));
44// Use sets instead of cl::list to check "if contains" condition
48 for (auto Cap : AvoidCapabilities)
49 S.insert(Cap);
50 }
51};
52
54
55namespace llvm {
57} // namespace llvm
58
59INITIALIZE_PASS(SPIRVModuleAnalysis, DEBUG_TYPE, "SPIRV module analysis", true,
60 true)
61
62// Retrieve an unsigned from an MDNode with a list of them as operands.
63static unsigned getMetadataUInt(MDNode *MdNode, unsigned OpIndex,
64 unsigned DefaultVal = 0) {
65 if (MdNode && OpIndex < MdNode->getNumOperands()) {
66 const auto &Op = MdNode->getOperand(OpIndex);
67 return mdconst::extract<ConstantInt>(Op)->getZExtValue();
68 }
69 return DefaultVal;
70}
71
73getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
74 unsigned i, const SPIRVSubtarget &ST,
77 AvoidCaps; // contains capabilities to avoid if there is another option
78
79 VersionTuple ReqMinVer = getSymbolicOperandMinVersion(Category, i);
80 VersionTuple ReqMaxVer = getSymbolicOperandMaxVersion(Category, i);
81 VersionTuple SPIRVVersion = ST.getSPIRVVersion();
82 bool MinVerOK = SPIRVVersion.empty() || SPIRVVersion >= ReqMinVer;
83 bool MaxVerOK =
84 ReqMaxVer.empty() || SPIRVVersion.empty() || SPIRVVersion <= ReqMaxVer;
86 ExtensionList ReqExts = getSymbolicOperandExtensions(Category, i);
87 if (ReqCaps.empty()) {
88 if (ReqExts.empty()) {
89 if (MinVerOK && MaxVerOK)
90 return {true, {}, {}, ReqMinVer, ReqMaxVer};
91 return {false, {}, {}, VersionTuple(), VersionTuple()};
92 }
93 } else if (MinVerOK && MaxVerOK) {
94 if (ReqCaps.size() == 1) {
95 auto Cap = ReqCaps[0];
96 if (Reqs.isCapabilityAvailable(Cap))
97 return {true, {Cap}, ReqExts, ReqMinVer, ReqMaxVer};
98 } else {
99 // By SPIR-V specification: "If an instruction, enumerant, or other
100 // feature specifies multiple enabling capabilities, only one such
101 // capability needs to be declared to use the feature." However, one
102 // capability may be preferred over another. We use command line
103 // argument(s) and AvoidCapabilities to avoid selection of certain
104 // capabilities if there are other options.
105 CapabilityList UseCaps;
106 for (auto Cap : ReqCaps)
107 if (Reqs.isCapabilityAvailable(Cap))
108 UseCaps.push_back(Cap);
109 for (size_t i = 0, Sz = UseCaps.size(); i < Sz; ++i) {
110 auto Cap = UseCaps[i];
111 if (i == Sz - 1 || !AvoidCaps.S.contains(Cap))
112 return {true, {Cap}, ReqExts, ReqMinVer, ReqMaxVer};
113 }
114 }
115 }
116 // If there are no capabilities, or we can't satisfy the version or
117 // capability requirements, use the list of extensions (if the subtarget
118 // can handle them all).
119 if (llvm::all_of(ReqExts, [&ST](const SPIRV::Extension::Extension &Ext) {
120 return ST.canUseExtension(Ext);
121 })) {
122 return {true,
123 {},
124 ReqExts,
125 VersionTuple(),
126 VersionTuple()}; // TODO: add versions to extensions.
127 }
128 return {false, {}, {}, VersionTuple(), VersionTuple()};
129}
130
131void SPIRVModuleAnalysis::setBaseInfo(const Module &M) {
132 MAI.MaxID = 0;
133 for (int i = 0; i < SPIRV::NUM_MODULE_SECTIONS; i++)
134 MAI.MS[i].clear();
135 MAI.RegisterAliasTable.clear();
136 MAI.InstrsToDelete.clear();
137 MAI.FuncMap.clear();
138 MAI.GlobalVarList.clear();
139 MAI.ExtInstSetMap.clear();
140 MAI.Reqs.clear();
142
143 // TODO: determine memory model and source language from the configuratoin.
144 if (auto MemModel = M.getNamedMetadata("spirv.MemoryModel")) {
145 auto MemMD = MemModel->getOperand(0);
146 MAI.Addr = static_cast<SPIRV::AddressingModel::AddressingModel>(
147 getMetadataUInt(MemMD, 0));
148 MAI.Mem =
149 static_cast<SPIRV::MemoryModel::MemoryModel>(getMetadataUInt(MemMD, 1));
150 } else {
151 // TODO: Add support for VulkanMemoryModel.
152 MAI.Mem = ST->isOpenCLEnv() ? SPIRV::MemoryModel::OpenCL
153 : SPIRV::MemoryModel::GLSL450;
154 if (MAI.Mem == SPIRV::MemoryModel::OpenCL) {
155 unsigned PtrSize = ST->getPointerSize();
156 MAI.Addr = PtrSize == 32 ? SPIRV::AddressingModel::Physical32
157 : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
158 : SPIRV::AddressingModel::Logical;
159 } else {
160 // TODO: Add support for PhysicalStorageBufferAddress.
161 MAI.Addr = SPIRV::AddressingModel::Logical;
162 }
163 }
164 // Get the OpenCL version number from metadata.
165 // TODO: support other source languages.
166 if (auto VerNode = M.getNamedMetadata("opencl.ocl.version")) {
167 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
168 // Construct version literal in accordance with SPIRV-LLVM-Translator.
169 // TODO: support multiple OCL version metadata.
170 assert(VerNode->getNumOperands() > 0 && "Invalid SPIR");
171 auto VersionMD = VerNode->getOperand(0);
172 unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
173 unsigned MinorNum = getMetadataUInt(VersionMD, 1);
174 unsigned RevNum = getMetadataUInt(VersionMD, 2);
175 // Prevent Major part of OpenCL version to be 0
177 (std::max(1U, MajorNum) * 100 + MinorNum) * 1000 + RevNum;
178 } else {
179 // If there is no information about OpenCL version we are forced to generate
180 // OpenCL 1.0 by default for the OpenCL environment to avoid puzzling
181 // run-times with Unknown/0.0 version output. For a reference, LLVM-SPIRV
182 // Translator avoids potential issues with run-times in a similar manner.
183 if (ST->isOpenCLEnv()) {
184 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_CPP;
185 MAI.SrcLangVersion = 100000;
186 } else {
187 MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
189 }
190 }
191
192 if (auto ExtNode = M.getNamedMetadata("opencl.used.extensions")) {
193 for (unsigned I = 0, E = ExtNode->getNumOperands(); I != E; ++I) {
194 MDNode *MD = ExtNode->getOperand(I);
195 if (!MD || MD->getNumOperands() == 0)
196 continue;
197 for (unsigned J = 0, N = MD->getNumOperands(); J != N; ++J)
198 MAI.SrcExt.insert(cast<MDString>(MD->getOperand(J))->getString());
199 }
200 }
201
202 // Update required capabilities for this memory model, addressing model and
203 // source language.
204 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand,
205 MAI.Mem, *ST);
206 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::SourceLanguageOperand,
207 MAI.SrcLang, *ST);
208 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
209 MAI.Addr, *ST);
210
211 if (ST->isOpenCLEnv()) {
212 // TODO: check if it's required by default.
213 MAI.ExtInstSetMap[static_cast<unsigned>(
214 SPIRV::InstructionSet::OpenCL_std)] =
216 }
217}
218
219// Returns a representation of an instruction as a vector of MachineOperand
220// hash values, see llvm::hash_value(const MachineOperand &MO) for details.
221// This creates a signature of the instruction with the same content
222// that MachineOperand::isIdenticalTo uses for comparison.
223static InstrSignature instrToSignature(const MachineInstr &MI,
225 bool UseDefReg) {
226 InstrSignature Signature{MI.getOpcode()};
227 for (unsigned i = 0; i < MI.getNumOperands(); ++i) {
228 const MachineOperand &MO = MI.getOperand(i);
229 size_t h;
230 if (MO.isReg()) {
231 if (!UseDefReg && MO.isDef())
232 continue;
233 Register RegAlias = MAI.getRegisterAlias(MI.getMF(), MO.getReg());
234 if (!RegAlias.isValid()) {
235 LLVM_DEBUG({
236 dbgs() << "Unexpectedly, no global id found for the operand ";
237 MO.print(dbgs());
238 dbgs() << "\nInstruction: ";
239 MI.print(dbgs());
240 dbgs() << "\n";
241 });
242 report_fatal_error("All v-regs must have been mapped to global id's");
243 }
244 // mimic llvm::hash_value(const MachineOperand &MO)
245 h = hash_combine(MO.getType(), (unsigned)RegAlias, MO.getSubReg(),
246 MO.isDef());
247 } else {
248 h = hash_value(MO);
249 }
250 Signature.push_back(h);
251 }
252 return Signature;
253}
254
255bool SPIRVModuleAnalysis::isDeclSection(const MachineRegisterInfo &MRI,
256 const MachineInstr &MI) {
257 unsigned Opcode = MI.getOpcode();
258 switch (Opcode) {
259 case SPIRV::OpTypeForwardPointer:
260 // omit now, collect later
261 return false;
262 case SPIRV::OpVariable:
263 return static_cast<SPIRV::StorageClass::StorageClass>(
264 MI.getOperand(2).getImm()) != SPIRV::StorageClass::Function;
265 case SPIRV::OpFunction:
266 case SPIRV::OpFunctionParameter:
267 return true;
268 }
269 if (GR->hasConstFunPtr() && Opcode == SPIRV::OpUndef) {
270 Register DefReg = MI.getOperand(0).getReg();
271 for (MachineInstr &UseMI : MRI.use_instructions(DefReg)) {
272 if (UseMI.getOpcode() != SPIRV::OpConstantFunctionPointerINTEL)
273 continue;
274 // it's a dummy definition, FP constant refers to a function,
275 // and this is resolved in another way; let's skip this definition
276 assert(UseMI.getOperand(2).isReg() &&
277 UseMI.getOperand(2).getReg() == DefReg);
279 return false;
280 }
281 }
282 return TII->isTypeDeclInstr(MI) || TII->isConstantInstr(MI) ||
284}
285
286// This is a special case of a function pointer refering to a possibly
287// forward function declaration. The operand is a dummy OpUndef that
288// requires a special treatment.
289void SPIRVModuleAnalysis::visitFunPtrUse(
290 Register OpReg, InstrGRegsMap &SignatureToGReg,
291 std::map<const Value *, unsigned> &GlobalToGReg, const MachineFunction *MF,
292 const MachineInstr &MI) {
293 const MachineOperand *OpFunDef =
294 GR->getFunctionDefinitionByUse(&MI.getOperand(2));
295 assert(OpFunDef && OpFunDef->isReg());
296 // find the actual function definition and number it globally in advance
297 const MachineInstr *OpDefMI = OpFunDef->getParent();
298 assert(OpDefMI && OpDefMI->getOpcode() == SPIRV::OpFunction);
299 const MachineFunction *FunDefMF = OpDefMI->getParent()->getParent();
300 const MachineRegisterInfo &FunDefMRI = FunDefMF->getRegInfo();
301 do {
302 visitDecl(FunDefMRI, SignatureToGReg, GlobalToGReg, FunDefMF, *OpDefMI);
303 OpDefMI = OpDefMI->getNextNode();
304 } while (OpDefMI && (OpDefMI->getOpcode() == SPIRV::OpFunction ||
305 OpDefMI->getOpcode() == SPIRV::OpFunctionParameter));
306 // associate the function pointer with the newly assigned global number
307 Register GlobalFunDefReg = MAI.getRegisterAlias(FunDefMF, OpFunDef->getReg());
308 assert(GlobalFunDefReg.isValid() &&
309 "Function definition must refer to a global register");
310 MAI.setRegisterAlias(MF, OpReg, GlobalFunDefReg);
311}
312
313// Depth first recursive traversal of dependencies. Repeated visits are guarded
314// by MAI.hasRegisterAlias().
315void SPIRVModuleAnalysis::visitDecl(
316 const MachineRegisterInfo &MRI, InstrGRegsMap &SignatureToGReg,
317 std::map<const Value *, unsigned> &GlobalToGReg, const MachineFunction *MF,
318 const MachineInstr &MI) {
319 unsigned Opcode = MI.getOpcode();
321
322 // Process each operand of the instruction to resolve dependencies
323 for (const MachineOperand &MO : MI.operands()) {
324 if (!MO.isReg() || MO.isDef())
325 continue;
326 Register OpReg = MO.getReg();
327 // Handle function pointers special case
328 if (Opcode == SPIRV::OpConstantFunctionPointerINTEL &&
329 MRI.getRegClass(OpReg) == &SPIRV::pIDRegClass) {
330 visitFunPtrUse(OpReg, SignatureToGReg, GlobalToGReg, MF, MI);
331 continue;
332 }
333 // Skip already processed instructions
334 if (MAI.hasRegisterAlias(MF, MO.getReg()))
335 continue;
336 // Recursively visit dependencies
337 if (const MachineInstr *OpDefMI = MRI.getUniqueVRegDef(OpReg)) {
338 if (isDeclSection(MRI, *OpDefMI))
339 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, *OpDefMI);
340 continue;
341 }
342 // Handle the unexpected case of no unique definition for the SPIR-V
343 // instruction
344 LLVM_DEBUG({
345 dbgs() << "Unexpectedly, no unique definition for the operand ";
346 MO.print(dbgs());
347 dbgs() << "\nInstruction: ";
348 MI.print(dbgs());
349 dbgs() << "\n";
350 });
352 "No unique definition is found for the virtual register");
353 }
354
355 Register GReg;
356 bool IsFunDef = false;
357 if (TII->isSpecConstantInstr(MI)) {
360 } else if (Opcode == SPIRV::OpFunction ||
361 Opcode == SPIRV::OpFunctionParameter) {
362 GReg = handleFunctionOrParameter(MF, MI, GlobalToGReg, IsFunDef);
363 } else if (TII->isTypeDeclInstr(MI) || TII->isConstantInstr(MI) ||
364 TII->isInlineAsmDefInstr(MI)) {
365 GReg = handleTypeDeclOrConstant(MI, SignatureToGReg);
366 } else if (Opcode == SPIRV::OpVariable) {
367 GReg = handleVariable(MF, MI, GlobalToGReg);
368 } else {
369 LLVM_DEBUG({
370 dbgs() << "\nInstruction: ";
371 MI.print(dbgs());
372 dbgs() << "\n";
373 });
374 llvm_unreachable("Unexpected instruction is visited");
375 }
376 MAI.setRegisterAlias(MF, MI.getOperand(0).getReg(), GReg);
377 if (!IsFunDef)
379}
380
381Register SPIRVModuleAnalysis::handleFunctionOrParameter(
382 const MachineFunction *MF, const MachineInstr &MI,
383 std::map<const Value *, unsigned> &GlobalToGReg, bool &IsFunDef) {
384 const Value *GObj = GR->getGlobalObject(MF, MI.getOperand(0).getReg());
385 assert(GObj && "Unregistered global definition");
386 const Function *F = dyn_cast<Function>(GObj);
387 if (!F)
388 F = dyn_cast<Argument>(GObj)->getParent();
389 assert(F && "Expected a reference to a function or an argument");
390 IsFunDef = !F->isDeclaration();
391 auto It = GlobalToGReg.find(GObj);
392 if (It != GlobalToGReg.end())
393 return It->second;
395 GlobalToGReg[GObj] = GReg;
396 if (!IsFunDef)
398 return GReg;
399}
400
402SPIRVModuleAnalysis::handleTypeDeclOrConstant(const MachineInstr &MI,
403 InstrGRegsMap &SignatureToGReg) {
404 InstrSignature MISign = instrToSignature(MI, MAI, false);
405 auto It = SignatureToGReg.find(MISign);
406 if (It != SignatureToGReg.end())
407 return It->second;
409 SignatureToGReg[MISign] = GReg;
411 return GReg;
412}
413
414Register SPIRVModuleAnalysis::handleVariable(
415 const MachineFunction *MF, const MachineInstr &MI,
416 std::map<const Value *, unsigned> &GlobalToGReg) {
417 MAI.GlobalVarList.push_back(&MI);
418 const Value *GObj = GR->getGlobalObject(MF, MI.getOperand(0).getReg());
419 assert(GObj && "Unregistered global definition");
420 auto It = GlobalToGReg.find(GObj);
421 if (It != GlobalToGReg.end())
422 return It->second;
424 GlobalToGReg[GObj] = GReg;
426 return GReg;
427}
428
429void SPIRVModuleAnalysis::collectDeclarations(const Module &M) {
430 InstrGRegsMap SignatureToGReg;
431 std::map<const Value *, unsigned> GlobalToGReg;
432 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
434 if (!MF)
435 continue;
436 const MachineRegisterInfo &MRI = MF->getRegInfo();
437 unsigned PastHeader = 0;
438 for (MachineBasicBlock &MBB : *MF) {
439 for (MachineInstr &MI : MBB) {
440 if (MI.getNumOperands() == 0)
441 continue;
442 unsigned Opcode = MI.getOpcode();
443 if (Opcode == SPIRV::OpFunction) {
444 if (PastHeader == 0) {
445 PastHeader = 1;
446 continue;
447 }
448 } else if (Opcode == SPIRV::OpFunctionParameter) {
449 if (PastHeader < 2)
450 continue;
451 } else if (PastHeader > 0) {
452 PastHeader = 2;
453 }
454
455 const MachineOperand &DefMO = MI.getOperand(0);
456 switch (Opcode) {
457 case SPIRV::OpExtension:
458 MAI.Reqs.addExtension(SPIRV::Extension::Extension(DefMO.getImm()));
460 break;
461 case SPIRV::OpCapability:
462 MAI.Reqs.addCapability(SPIRV::Capability::Capability(DefMO.getImm()));
464 if (PastHeader > 0)
465 PastHeader = 2;
466 break;
467 default:
468 if (DefMO.isReg() && isDeclSection(MRI, MI) &&
469 !MAI.hasRegisterAlias(MF, DefMO.getReg()))
470 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, MI);
471 }
472 }
473 }
474 }
475}
476
477// Look for IDs declared with Import linkage, and map the corresponding function
478// to the register defining that variable (which will usually be the result of
479// an OpFunction). This lets us call externally imported functions using
480// the correct ID registers.
481void SPIRVModuleAnalysis::collectFuncNames(MachineInstr &MI,
482 const Function *F) {
483 if (MI.getOpcode() == SPIRV::OpDecorate) {
484 // If it's got Import linkage.
485 auto Dec = MI.getOperand(1).getImm();
486 if (Dec == static_cast<unsigned>(SPIRV::Decoration::LinkageAttributes)) {
487 auto Lnk = MI.getOperand(MI.getNumOperands() - 1).getImm();
488 if (Lnk == static_cast<unsigned>(SPIRV::LinkageType::Import)) {
489 // Map imported function name to function ID register.
490 const Function *ImportedFunc =
491 F->getParent()->getFunction(getStringImm(MI, 2));
492 Register Target = MI.getOperand(0).getReg();
493 MAI.FuncMap[ImportedFunc] = MAI.getRegisterAlias(MI.getMF(), Target);
494 }
495 }
496 } else if (MI.getOpcode() == SPIRV::OpFunction) {
497 // Record all internal OpFunction declarations.
498 Register Reg = MI.defs().begin()->getReg();
499 Register GlobalReg = MAI.getRegisterAlias(MI.getMF(), Reg);
500 assert(GlobalReg.isValid());
501 MAI.FuncMap[F] = GlobalReg;
502 }
503}
504
505// Collect the given instruction in the specified MS. We assume global register
506// numbering has already occurred by this point. We can directly compare reg
507// arguments when detecting duplicates.
508static void collectOtherInstr(MachineInstr &MI, SPIRV::ModuleAnalysisInfo &MAI,
510 bool Append = true) {
511 MAI.setSkipEmission(&MI);
512 InstrSignature MISign = instrToSignature(MI, MAI, true);
513 auto FoundMI = IS.insert(MISign);
514 if (!FoundMI.second)
515 return; // insert failed, so we found a duplicate; don't add it to MAI.MS
516 // No duplicates, so add it.
517 if (Append)
518 MAI.MS[MSType].push_back(&MI);
519 else
520 MAI.MS[MSType].insert(MAI.MS[MSType].begin(), &MI);
521}
522
523// Some global instructions make reference to function-local ID regs, so cannot
524// be correctly collected until these registers are globally numbered.
525void SPIRVModuleAnalysis::processOtherInstrs(const Module &M) {
526 InstrTraces IS;
527 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
528 if ((*F).isDeclaration())
529 continue;
531 assert(MF);
532
533 for (MachineBasicBlock &MBB : *MF)
534 for (MachineInstr &MI : MBB) {
535 if (MAI.getSkipEmission(&MI))
536 continue;
537 const unsigned OpCode = MI.getOpcode();
538 if (OpCode == SPIRV::OpString) {
539 collectOtherInstr(MI, MAI, SPIRV::MB_DebugStrings, IS);
540 } else if (OpCode == SPIRV::OpExtInst && MI.getOperand(2).isImm() &&
541 MI.getOperand(2).getImm() ==
542 SPIRV::InstructionSet::
543 NonSemantic_Shader_DebugInfo_100) {
544 MachineOperand Ins = MI.getOperand(3);
545 namespace NS = SPIRV::NonSemanticExtInst;
546 static constexpr int64_t GlobalNonSemanticDITy[] = {
547 NS::DebugSource, NS::DebugCompilationUnit, NS::DebugInfoNone,
548 NS::DebugTypeBasic, NS::DebugTypePointer};
549 bool IsGlobalDI = false;
550 for (unsigned Idx = 0; Idx < std::size(GlobalNonSemanticDITy); ++Idx)
551 IsGlobalDI |= Ins.getImm() == GlobalNonSemanticDITy[Idx];
552 if (IsGlobalDI)
553 collectOtherInstr(MI, MAI, SPIRV::MB_NonSemanticGlobalDI, IS);
554 } else if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
555 collectOtherInstr(MI, MAI, SPIRV::MB_DebugNames, IS);
556 } else if (OpCode == SPIRV::OpEntryPoint) {
557 collectOtherInstr(MI, MAI, SPIRV::MB_EntryPoints, IS);
558 } else if (TII->isDecorationInstr(MI)) {
559 collectOtherInstr(MI, MAI, SPIRV::MB_Annotations, IS);
560 collectFuncNames(MI, &*F);
561 } else if (TII->isConstantInstr(MI)) {
562 // Now OpSpecConstant*s are not in DT,
563 // but they need to be collected anyway.
564 collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars, IS);
565 } else if (OpCode == SPIRV::OpFunction) {
566 collectFuncNames(MI, &*F);
567 } else if (OpCode == SPIRV::OpTypeForwardPointer) {
568 collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars, IS, false);
569 }
570 }
571 }
572}
573
574// Number registers in all functions globally from 0 onwards and store
575// the result in global register alias table. Some registers are already
576// numbered.
577void SPIRVModuleAnalysis::numberRegistersGlobally(const Module &M) {
578 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
579 if ((*F).isDeclaration())
580 continue;
582 assert(MF);
583 for (MachineBasicBlock &MBB : *MF) {
584 for (MachineInstr &MI : MBB) {
585 for (MachineOperand &Op : MI.operands()) {
586 if (!Op.isReg())
587 continue;
588 Register Reg = Op.getReg();
589 if (MAI.hasRegisterAlias(MF, Reg))
590 continue;
592 MAI.setRegisterAlias(MF, Reg, NewReg);
593 }
594 if (MI.getOpcode() != SPIRV::OpExtInst)
595 continue;
596 auto Set = MI.getOperand(2).getImm();
597 if (!MAI.ExtInstSetMap.contains(Set))
599 }
600 }
601 }
602}
603
604// RequirementHandler implementations.
606 SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
607 const SPIRVSubtarget &ST) {
608 addRequirements(getSymbolicOperandRequirements(Category, i, ST, *this));
609}
610
611void SPIRV::RequirementHandler::recursiveAddCapabilities(
612 const CapabilityList &ToPrune) {
613 for (const auto &Cap : ToPrune) {
614 AllCaps.insert(Cap);
615 CapabilityList ImplicitDecls =
616 getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
617 recursiveAddCapabilities(ImplicitDecls);
618 }
619}
620
622 for (const auto &Cap : ToAdd) {
623 bool IsNewlyInserted = AllCaps.insert(Cap).second;
624 if (!IsNewlyInserted) // Don't re-add if it's already been declared.
625 continue;
626 CapabilityList ImplicitDecls =
627 getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
628 recursiveAddCapabilities(ImplicitDecls);
629 MinimalCaps.push_back(Cap);
630 }
631}
632
634 const SPIRV::Requirements &Req) {
635 if (!Req.IsSatisfiable)
636 report_fatal_error("Adding SPIR-V requirements this target can't satisfy.");
637
638 if (Req.Cap.has_value())
639 addCapabilities({Req.Cap.value()});
640
641 addExtensions(Req.Exts);
642
643 if (!Req.MinVer.empty()) {
644 if (!MaxVersion.empty() && Req.MinVer > MaxVersion) {
645 LLVM_DEBUG(dbgs() << "Conflicting version requirements: >= " << Req.MinVer
646 << " and <= " << MaxVersion << "\n");
647 report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
648 }
649
650 if (MinVersion.empty() || Req.MinVer > MinVersion)
651 MinVersion = Req.MinVer;
652 }
653
654 if (!Req.MaxVer.empty()) {
655 if (!MinVersion.empty() && Req.MaxVer < MinVersion) {
656 LLVM_DEBUG(dbgs() << "Conflicting version requirements: <= " << Req.MaxVer
657 << " and >= " << MinVersion << "\n");
658 report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
659 }
660
661 if (MaxVersion.empty() || Req.MaxVer < MaxVersion)
662 MaxVersion = Req.MaxVer;
663 }
664}
665
667 const SPIRVSubtarget &ST) const {
668 // Report as many errors as possible before aborting the compilation.
669 bool IsSatisfiable = true;
670 auto TargetVer = ST.getSPIRVVersion();
671
672 if (!MaxVersion.empty() && !TargetVer.empty() && MaxVersion < TargetVer) {
674 dbgs() << "Target SPIR-V version too high for required features\n"
675 << "Required max version: " << MaxVersion << " target version "
676 << TargetVer << "\n");
677 IsSatisfiable = false;
678 }
679
680 if (!MinVersion.empty() && !TargetVer.empty() && MinVersion > TargetVer) {
681 LLVM_DEBUG(dbgs() << "Target SPIR-V version too low for required features\n"
682 << "Required min version: " << MinVersion
683 << " target version " << TargetVer << "\n");
684 IsSatisfiable = false;
685 }
686
687 if (!MinVersion.empty() && !MaxVersion.empty() && MinVersion > MaxVersion) {
689 dbgs()
690 << "Version is too low for some features and too high for others.\n"
691 << "Required SPIR-V min version: " << MinVersion
692 << " required SPIR-V max version " << MaxVersion << "\n");
693 IsSatisfiable = false;
694 }
695
696 for (auto Cap : MinimalCaps) {
697 if (AvailableCaps.contains(Cap))
698 continue;
699 LLVM_DEBUG(dbgs() << "Capability not supported: "
701 OperandCategory::CapabilityOperand, Cap)
702 << "\n");
703 IsSatisfiable = false;
704 }
705
706 for (auto Ext : AllExtensions) {
707 if (ST.canUseExtension(Ext))
708 continue;
709 LLVM_DEBUG(dbgs() << "Extension not supported: "
711 OperandCategory::ExtensionOperand, Ext)
712 << "\n");
713 IsSatisfiable = false;
714 }
715
716 if (!IsSatisfiable)
717 report_fatal_error("Unable to meet SPIR-V requirements for this target.");
718}
719
720// Add the given capabilities and all their implicitly defined capabilities too.
722 for (const auto Cap : ToAdd)
723 if (AvailableCaps.insert(Cap).second)
724 addAvailableCaps(getSymbolicOperandCapabilities(
725 SPIRV::OperandCategory::CapabilityOperand, Cap));
726}
727
729 const Capability::Capability ToRemove,
730 const Capability::Capability IfPresent) {
731 if (AllCaps.contains(IfPresent))
732 AllCaps.erase(ToRemove);
733}
734
735namespace llvm {
736namespace SPIRV {
737void RequirementHandler::initAvailableCapabilities(const SPIRVSubtarget &ST) {
738 // Provided by both all supported Vulkan versions and OpenCl.
739 addAvailableCaps({Capability::Shader, Capability::Linkage, Capability::Int8,
740 Capability::Int16});
741
742 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 3)))
743 addAvailableCaps({Capability::GroupNonUniform,
744 Capability::GroupNonUniformVote,
745 Capability::GroupNonUniformArithmetic,
746 Capability::GroupNonUniformBallot,
747 Capability::GroupNonUniformClustered,
748 Capability::GroupNonUniformShuffle,
749 Capability::GroupNonUniformShuffleRelative});
750
751 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
752 addAvailableCaps({Capability::DotProduct, Capability::DotProductInputAll,
753 Capability::DotProductInput4x8Bit,
754 Capability::DotProductInput4x8BitPacked,
755 Capability::DemoteToHelperInvocation});
756
757 // Add capabilities enabled by extensions.
758 for (auto Extension : ST.getAllAvailableExtensions()) {
759 CapabilityList EnabledCapabilities =
761 addAvailableCaps(EnabledCapabilities);
762 }
763
764 if (ST.isOpenCLEnv()) {
765 initAvailableCapabilitiesForOpenCL(ST);
766 return;
767 }
768
769 if (ST.isVulkanEnv()) {
770 initAvailableCapabilitiesForVulkan(ST);
771 return;
772 }
773
774 report_fatal_error("Unimplemented environment for SPIR-V generation.");
775}
776
777void RequirementHandler::initAvailableCapabilitiesForOpenCL(
778 const SPIRVSubtarget &ST) {
779 // Add the min requirements for different OpenCL and SPIR-V versions.
780 addAvailableCaps({Capability::Addresses, Capability::Float16Buffer,
781 Capability::Kernel, Capability::Vector16,
782 Capability::Groups, Capability::GenericPointer,
783 Capability::StorageImageWriteWithoutFormat,
784 Capability::StorageImageReadWithoutFormat});
785 if (ST.hasOpenCLFullProfile())
786 addAvailableCaps({Capability::Int64, Capability::Int64Atomics});
787 if (ST.hasOpenCLImageSupport()) {
788 addAvailableCaps({Capability::ImageBasic, Capability::LiteralSampler,
789 Capability::Image1D, Capability::SampledBuffer,
790 Capability::ImageBuffer});
791 if (ST.isAtLeastOpenCLVer(VersionTuple(2, 0)))
792 addAvailableCaps({Capability::ImageReadWrite});
793 }
794 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 1)) &&
795 ST.isAtLeastOpenCLVer(VersionTuple(2, 2)))
796 addAvailableCaps({Capability::SubgroupDispatch, Capability::PipeStorage});
797 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 4)))
798 addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
799 Capability::SignedZeroInfNanPreserve,
800 Capability::RoundingModeRTE,
801 Capability::RoundingModeRTZ});
802 // TODO: verify if this needs some checks.
803 addAvailableCaps({Capability::Float16, Capability::Float64});
804
805 // TODO: add OpenCL extensions.
806}
807
808void RequirementHandler::initAvailableCapabilitiesForVulkan(
809 const SPIRVSubtarget &ST) {
810
811 // Core in Vulkan 1.1 and earlier.
812 addAvailableCaps({Capability::Int64, Capability::Float16, Capability::Float64,
813 Capability::GroupNonUniform, Capability::Image1D,
814 Capability::SampledBuffer, Capability::ImageBuffer,
815 Capability::UniformBufferArrayDynamicIndexing,
816 Capability::SampledImageArrayDynamicIndexing,
817 Capability::StorageBufferArrayDynamicIndexing,
818 Capability::StorageImageArrayDynamicIndexing});
819
820 // Became core in Vulkan 1.2
821 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 5))) {
822 addAvailableCaps(
823 {Capability::ShaderNonUniformEXT, Capability::RuntimeDescriptorArrayEXT,
824 Capability::InputAttachmentArrayDynamicIndexingEXT,
825 Capability::UniformTexelBufferArrayDynamicIndexingEXT,
826 Capability::StorageTexelBufferArrayDynamicIndexingEXT,
827 Capability::UniformBufferArrayNonUniformIndexingEXT,
828 Capability::SampledImageArrayNonUniformIndexingEXT,
829 Capability::StorageBufferArrayNonUniformIndexingEXT,
830 Capability::StorageImageArrayNonUniformIndexingEXT,
831 Capability::InputAttachmentArrayNonUniformIndexingEXT,
832 Capability::UniformTexelBufferArrayNonUniformIndexingEXT,
833 Capability::StorageTexelBufferArrayNonUniformIndexingEXT});
834 }
835
836 // Became core in Vulkan 1.3
837 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
838 addAvailableCaps({Capability::StorageImageWriteWithoutFormat,
839 Capability::StorageImageReadWithoutFormat});
840}
841
842} // namespace SPIRV
843} // namespace llvm
844
845// Add the required capabilities from a decoration instruction (including
846// BuiltIns).
847static void addOpDecorateReqs(const MachineInstr &MI, unsigned DecIndex,
849 const SPIRVSubtarget &ST) {
850 int64_t DecOp = MI.getOperand(DecIndex).getImm();
851 auto Dec = static_cast<SPIRV::Decoration::Decoration>(DecOp);
852 Reqs.addRequirements(getSymbolicOperandRequirements(
853 SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
854
855 if (Dec == SPIRV::Decoration::BuiltIn) {
856 int64_t BuiltInOp = MI.getOperand(DecIndex + 1).getImm();
857 auto BuiltIn = static_cast<SPIRV::BuiltIn::BuiltIn>(BuiltInOp);
858 Reqs.addRequirements(getSymbolicOperandRequirements(
859 SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
860 } else if (Dec == SPIRV::Decoration::LinkageAttributes) {
861 int64_t LinkageOp = MI.getOperand(MI.getNumOperands() - 1).getImm();
862 SPIRV::LinkageType::LinkageType LnkType =
863 static_cast<SPIRV::LinkageType::LinkageType>(LinkageOp);
864 if (LnkType == SPIRV::LinkageType::LinkOnceODR)
865 Reqs.addExtension(SPIRV::Extension::SPV_KHR_linkonce_odr);
866 } else if (Dec == SPIRV::Decoration::CacheControlLoadINTEL ||
867 Dec == SPIRV::Decoration::CacheControlStoreINTEL) {
868 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_cache_controls);
869 } else if (Dec == SPIRV::Decoration::HostAccessINTEL) {
870 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_global_variable_host_access);
871 } else if (Dec == SPIRV::Decoration::InitModeINTEL ||
872 Dec == SPIRV::Decoration::ImplementInRegisterMapINTEL) {
873 Reqs.addExtension(
874 SPIRV::Extension::SPV_INTEL_global_variable_fpga_decorations);
875 } else if (Dec == SPIRV::Decoration::NonUniformEXT) {
876 Reqs.addRequirements(SPIRV::Capability::ShaderNonUniformEXT);
877 }
878}
879
880// Add requirements for image handling.
881static void addOpTypeImageReqs(const MachineInstr &MI,
883 const SPIRVSubtarget &ST) {
884 assert(MI.getNumOperands() >= 8 && "Insufficient operands for OpTypeImage");
885 // The operand indices used here are based on the OpTypeImage layout, which
886 // the MachineInstr follows as well.
887 int64_t ImgFormatOp = MI.getOperand(7).getImm();
888 auto ImgFormat = static_cast<SPIRV::ImageFormat::ImageFormat>(ImgFormatOp);
889 Reqs.getAndAddRequirements(SPIRV::OperandCategory::ImageFormatOperand,
890 ImgFormat, ST);
891
892 bool IsArrayed = MI.getOperand(4).getImm() == 1;
893 bool IsMultisampled = MI.getOperand(5).getImm() == 1;
894 bool NoSampler = MI.getOperand(6).getImm() == 2;
895 // Add dimension requirements.
896 assert(MI.getOperand(2).isImm());
897 switch (MI.getOperand(2).getImm()) {
898 case SPIRV::Dim::DIM_1D:
899 Reqs.addRequirements(NoSampler ? SPIRV::Capability::Image1D
900 : SPIRV::Capability::Sampled1D);
901 break;
902 case SPIRV::Dim::DIM_2D:
903 if (IsMultisampled && NoSampler)
904 Reqs.addRequirements(SPIRV::Capability::ImageMSArray);
905 break;
906 case SPIRV::Dim::DIM_Cube:
907 Reqs.addRequirements(SPIRV::Capability::Shader);
908 if (IsArrayed)
909 Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageCubeArray
910 : SPIRV::Capability::SampledCubeArray);
911 break;
912 case SPIRV::Dim::DIM_Rect:
913 Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageRect
914 : SPIRV::Capability::SampledRect);
915 break;
916 case SPIRV::Dim::DIM_Buffer:
917 Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageBuffer
918 : SPIRV::Capability::SampledBuffer);
919 break;
920 case SPIRV::Dim::DIM_SubpassData:
921 Reqs.addRequirements(SPIRV::Capability::InputAttachment);
922 break;
923 }
924
925 // Has optional access qualifier.
926 if (ST.isOpenCLEnv()) {
927 if (MI.getNumOperands() > 8 &&
928 MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
929 Reqs.addRequirements(SPIRV::Capability::ImageReadWrite);
930 else
931 Reqs.addRequirements(SPIRV::Capability::ImageBasic);
932 }
933}
934
935// Add requirements for handling atomic float instructions
936#define ATOM_FLT_REQ_EXT_MSG(ExtName) \
937 "The atomic float instruction requires the following SPIR-V " \
938 "extension: SPV_EXT_shader_atomic_float" ExtName
939static void AddAtomicFloatRequirements(const MachineInstr &MI,
941 const SPIRVSubtarget &ST) {
942 assert(MI.getOperand(1).isReg() &&
943 "Expect register operand in atomic float instruction");
944 Register TypeReg = MI.getOperand(1).getReg();
945 SPIRVType *TypeDef = MI.getMF()->getRegInfo().getVRegDef(TypeReg);
946 if (TypeDef->getOpcode() != SPIRV::OpTypeFloat)
947 report_fatal_error("Result type of an atomic float instruction must be a "
948 "floating-point type scalar");
949
950 unsigned BitWidth = TypeDef->getOperand(1).getImm();
951 unsigned Op = MI.getOpcode();
952 if (Op == SPIRV::OpAtomicFAddEXT) {
953 if (!ST.canUseExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add))
955 Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add);
956 switch (BitWidth) {
957 case 16:
958 if (!ST.canUseExtension(
959 SPIRV::Extension::SPV_EXT_shader_atomic_float16_add))
961 Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add);
962 Reqs.addCapability(SPIRV::Capability::AtomicFloat16AddEXT);
963 break;
964 case 32:
965 Reqs.addCapability(SPIRV::Capability::AtomicFloat32AddEXT);
966 break;
967 case 64:
968 Reqs.addCapability(SPIRV::Capability::AtomicFloat64AddEXT);
969 break;
970 default:
972 "Unexpected floating-point type width in atomic float instruction");
973 }
974 } else {
975 if (!ST.canUseExtension(
976 SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max))
977 report_fatal_error(ATOM_FLT_REQ_EXT_MSG("_min_max"), false);
978 Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max);
979 switch (BitWidth) {
980 case 16:
981 Reqs.addCapability(SPIRV::Capability::AtomicFloat16MinMaxEXT);
982 break;
983 case 32:
984 Reqs.addCapability(SPIRV::Capability::AtomicFloat32MinMaxEXT);
985 break;
986 case 64:
987 Reqs.addCapability(SPIRV::Capability::AtomicFloat64MinMaxEXT);
988 break;
989 default:
991 "Unexpected floating-point type width in atomic float instruction");
992 }
993 }
994}
995
996bool isUniformTexelBuffer(MachineInstr *ImageInst) {
997 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
998 return false;
999 uint32_t Dim = ImageInst->getOperand(2).getImm();
1000 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1001 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 1;
1002}
1003
1004bool isStorageTexelBuffer(MachineInstr *ImageInst) {
1005 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1006 return false;
1007 uint32_t Dim = ImageInst->getOperand(2).getImm();
1008 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1009 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 2;
1010}
1011
1012bool isSampledImage(MachineInstr *ImageInst) {
1013 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1014 return false;
1015 uint32_t Dim = ImageInst->getOperand(2).getImm();
1016 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1017 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 1;
1018}
1019
1020bool isInputAttachment(MachineInstr *ImageInst) {
1021 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1022 return false;
1023 uint32_t Dim = ImageInst->getOperand(2).getImm();
1024 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1025 return Dim == SPIRV::Dim::DIM_SubpassData && Sampled == 2;
1026}
1027
1028bool isStorageImage(MachineInstr *ImageInst) {
1029 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1030 return false;
1031 uint32_t Dim = ImageInst->getOperand(2).getImm();
1032 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1033 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 2;
1034}
1035
1036bool isCombinedImageSampler(MachineInstr *SampledImageInst) {
1037 if (SampledImageInst->getOpcode() != SPIRV::OpTypeSampledImage)
1038 return false;
1039
1040 const MachineRegisterInfo &MRI = SampledImageInst->getMF()->getRegInfo();
1041 Register ImageReg = SampledImageInst->getOperand(1).getReg();
1042 auto *ImageInst = MRI.getUniqueVRegDef(ImageReg);
1043 return isSampledImage(ImageInst);
1044}
1045
1046bool hasNonUniformDecoration(Register Reg, const MachineRegisterInfo &MRI) {
1047 for (const auto &MI : MRI.reg_instructions(Reg)) {
1048 if (MI.getOpcode() != SPIRV::OpDecorate)
1049 continue;
1050
1051 uint32_t Dec = MI.getOperand(1).getImm();
1052 if (Dec == SPIRV::Decoration::NonUniformEXT)
1053 return true;
1054 }
1055 return false;
1056}
1057
1058void addOpAccessChainReqs(const MachineInstr &Instr,
1060 const SPIRVSubtarget &Subtarget) {
1061 const MachineRegisterInfo &MRI = Instr.getMF()->getRegInfo();
1062 // Get the result type. If it is an image type, then the shader uses
1063 // descriptor indexing. The appropriate capabilities will be added based
1064 // on the specifics of the image.
1065 Register ResTypeReg = Instr.getOperand(1).getReg();
1066 MachineInstr *ResTypeInst = MRI.getUniqueVRegDef(ResTypeReg);
1067
1068 assert(ResTypeInst->getOpcode() == SPIRV::OpTypePointer);
1069 uint32_t StorageClass = ResTypeInst->getOperand(1).getImm();
1070 if (StorageClass != SPIRV::StorageClass::StorageClass::UniformConstant &&
1071 StorageClass != SPIRV::StorageClass::StorageClass::Uniform &&
1072 StorageClass != SPIRV::StorageClass::StorageClass::StorageBuffer) {
1073 return;
1074 }
1075
1076 Register PointeeTypeReg = ResTypeInst->getOperand(2).getReg();
1077 MachineInstr *PointeeType = MRI.getUniqueVRegDef(PointeeTypeReg);
1078 if (PointeeType->getOpcode() != SPIRV::OpTypeImage &&
1079 PointeeType->getOpcode() != SPIRV::OpTypeSampledImage &&
1080 PointeeType->getOpcode() != SPIRV::OpTypeSampler) {
1081 return;
1082 }
1083
1084 bool IsNonUniform =
1085 hasNonUniformDecoration(Instr.getOperand(0).getReg(), MRI);
1086 if (isUniformTexelBuffer(PointeeType)) {
1087 if (IsNonUniform)
1088 Handler.addRequirements(
1089 SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
1090 else
1091 Handler.addRequirements(
1092 SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
1093 } else if (isInputAttachment(PointeeType)) {
1094 if (IsNonUniform)
1095 Handler.addRequirements(
1096 SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
1097 else
1098 Handler.addRequirements(
1099 SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
1100 } else if (isStorageTexelBuffer(PointeeType)) {
1101 if (IsNonUniform)
1102 Handler.addRequirements(
1103 SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
1104 else
1105 Handler.addRequirements(
1106 SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
1107 } else if (isSampledImage(PointeeType) ||
1108 isCombinedImageSampler(PointeeType) ||
1109 PointeeType->getOpcode() == SPIRV::OpTypeSampler) {
1110 if (IsNonUniform)
1111 Handler.addRequirements(
1112 SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
1113 else
1114 Handler.addRequirements(
1115 SPIRV::Capability::SampledImageArrayDynamicIndexing);
1116 } else if (isStorageImage(PointeeType)) {
1117 if (IsNonUniform)
1118 Handler.addRequirements(
1119 SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
1120 else
1121 Handler.addRequirements(
1122 SPIRV::Capability::StorageImageArrayDynamicIndexing);
1123 }
1124}
1125
1126static bool isImageTypeWithUnknownFormat(SPIRVType *TypeInst) {
1127 if (TypeInst->getOpcode() != SPIRV::OpTypeImage)
1128 return false;
1129 assert(TypeInst->getOperand(7).isImm() && "The image format must be an imm.");
1130 return TypeInst->getOperand(7).getImm() == 0;
1131}
1132
1133static void AddDotProductRequirements(const MachineInstr &MI,
1135 const SPIRVSubtarget &ST) {
1136 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product))
1137 Reqs.addExtension(SPIRV::Extension::SPV_KHR_integer_dot_product);
1138 Reqs.addCapability(SPIRV::Capability::DotProduct);
1139
1140 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1141 assert(MI.getOperand(2).isReg() && "Unexpected operand in dot");
1142 // We do not consider what the previous instruction is. This is just used
1143 // to get the input register and to check the type.
1144 const MachineInstr *Input = MRI.getVRegDef(MI.getOperand(2).getReg());
1145 assert(Input->getOperand(1).isReg() && "Unexpected operand in dot input");
1146 Register InputReg = Input->getOperand(1).getReg();
1147
1148 SPIRVType *TypeDef = MRI.getVRegDef(InputReg);
1149 if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
1150 assert(TypeDef->getOperand(1).getImm() == 32);
1151 Reqs.addCapability(SPIRV::Capability::DotProductInput4x8BitPacked);
1152 } else if (TypeDef->getOpcode() == SPIRV::OpTypeVector) {
1153 SPIRVType *ScalarTypeDef = MRI.getVRegDef(TypeDef->getOperand(1).getReg());
1154 assert(ScalarTypeDef->getOpcode() == SPIRV::OpTypeInt);
1155 if (ScalarTypeDef->getOperand(1).getImm() == 8) {
1156 assert(TypeDef->getOperand(2).getImm() == 4 &&
1157 "Dot operand of 8-bit integer type requires 4 components");
1158 Reqs.addCapability(SPIRV::Capability::DotProductInput4x8Bit);
1159 } else {
1160 Reqs.addCapability(SPIRV::Capability::DotProductInputAll);
1161 }
1162 }
1163}
1164
1165void addInstrRequirements(const MachineInstr &MI,
1167 const SPIRVSubtarget &ST) {
1168 switch (MI.getOpcode()) {
1169 case SPIRV::OpMemoryModel: {
1170 int64_t Addr = MI.getOperand(0).getImm();
1171 Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
1172 Addr, ST);
1173 int64_t Mem = MI.getOperand(1).getImm();
1174 Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand, Mem,
1175 ST);
1176 break;
1177 }
1178 case SPIRV::OpEntryPoint: {
1179 int64_t Exe = MI.getOperand(0).getImm();
1180 Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModelOperand,
1181 Exe, ST);
1182 break;
1183 }
1184 case SPIRV::OpExecutionMode:
1185 case SPIRV::OpExecutionModeId: {
1186 int64_t Exe = MI.getOperand(1).getImm();
1187 Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModeOperand,
1188 Exe, ST);
1189 break;
1190 }
1191 case SPIRV::OpTypeMatrix:
1192 Reqs.addCapability(SPIRV::Capability::Matrix);
1193 break;
1194 case SPIRV::OpTypeInt: {
1195 unsigned BitWidth = MI.getOperand(1).getImm();
1196 if (BitWidth == 64)
1197 Reqs.addCapability(SPIRV::Capability::Int64);
1198 else if (BitWidth == 16)
1199 Reqs.addCapability(SPIRV::Capability::Int16);
1200 else if (BitWidth == 8)
1201 Reqs.addCapability(SPIRV::Capability::Int8);
1202 break;
1203 }
1204 case SPIRV::OpTypeFloat: {
1205 unsigned BitWidth = MI.getOperand(1).getImm();
1206 if (BitWidth == 64)
1207 Reqs.addCapability(SPIRV::Capability::Float64);
1208 else if (BitWidth == 16)
1209 Reqs.addCapability(SPIRV::Capability::Float16);
1210 break;
1211 }
1212 case SPIRV::OpTypeVector: {
1213 unsigned NumComponents = MI.getOperand(2).getImm();
1214 if (NumComponents == 8 || NumComponents == 16)
1215 Reqs.addCapability(SPIRV::Capability::Vector16);
1216 break;
1217 }
1218 case SPIRV::OpTypePointer: {
1219 auto SC = MI.getOperand(1).getImm();
1220 Reqs.getAndAddRequirements(SPIRV::OperandCategory::StorageClassOperand, SC,
1221 ST);
1222 // If it's a type of pointer to float16 targeting OpenCL, add Float16Buffer
1223 // capability.
1224 if (!ST.isOpenCLEnv())
1225 break;
1226 assert(MI.getOperand(2).isReg());
1227 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1228 SPIRVType *TypeDef = MRI.getVRegDef(MI.getOperand(2).getReg());
1229 if (TypeDef->getOpcode() == SPIRV::OpTypeFloat &&
1230 TypeDef->getOperand(1).getImm() == 16)
1231 Reqs.addCapability(SPIRV::Capability::Float16Buffer);
1232 break;
1233 }
1234 case SPIRV::OpExtInst: {
1235 if (MI.getOperand(2).getImm() ==
1236 static_cast<int64_t>(
1237 SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) {
1238 Reqs.addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info);
1239 }
1240 break;
1241 }
1242 case SPIRV::OpBitReverse:
1243 case SPIRV::OpBitFieldInsert:
1244 case SPIRV::OpBitFieldSExtract:
1245 case SPIRV::OpBitFieldUExtract:
1246 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1247 Reqs.addCapability(SPIRV::Capability::Shader);
1248 break;
1249 }
1250 Reqs.addExtension(SPIRV::Extension::SPV_KHR_bit_instructions);
1251 Reqs.addCapability(SPIRV::Capability::BitInstructions);
1252 break;
1253 case SPIRV::OpTypeRuntimeArray:
1254 Reqs.addCapability(SPIRV::Capability::Shader);
1255 break;
1256 case SPIRV::OpTypeOpaque:
1257 case SPIRV::OpTypeEvent:
1258 Reqs.addCapability(SPIRV::Capability::Kernel);
1259 break;
1260 case SPIRV::OpTypePipe:
1261 case SPIRV::OpTypeReserveId:
1262 Reqs.addCapability(SPIRV::Capability::Pipes);
1263 break;
1264 case SPIRV::OpTypeDeviceEvent:
1265 case SPIRV::OpTypeQueue:
1266 case SPIRV::OpBuildNDRange:
1267 Reqs.addCapability(SPIRV::Capability::DeviceEnqueue);
1268 break;
1269 case SPIRV::OpDecorate:
1270 case SPIRV::OpDecorateId:
1271 case SPIRV::OpDecorateString:
1272 addOpDecorateReqs(MI, 1, Reqs, ST);
1273 break;
1274 case SPIRV::OpMemberDecorate:
1275 case SPIRV::OpMemberDecorateString:
1276 addOpDecorateReqs(MI, 2, Reqs, ST);
1277 break;
1278 case SPIRV::OpInBoundsPtrAccessChain:
1279 Reqs.addCapability(SPIRV::Capability::Addresses);
1280 break;
1281 case SPIRV::OpConstantSampler:
1282 Reqs.addCapability(SPIRV::Capability::LiteralSampler);
1283 break;
1284 case SPIRV::OpInBoundsAccessChain:
1285 case SPIRV::OpAccessChain:
1286 addOpAccessChainReqs(MI, Reqs, ST);
1287 break;
1288 case SPIRV::OpTypeImage:
1289 addOpTypeImageReqs(MI, Reqs, ST);
1290 break;
1291 case SPIRV::OpTypeSampler:
1292 if (!ST.isVulkanEnv()) {
1293 Reqs.addCapability(SPIRV::Capability::ImageBasic);
1294 }
1295 break;
1296 case SPIRV::OpTypeForwardPointer:
1297 // TODO: check if it's OpenCL's kernel.
1298 Reqs.addCapability(SPIRV::Capability::Addresses);
1299 break;
1300 case SPIRV::OpAtomicFlagTestAndSet:
1301 case SPIRV::OpAtomicLoad:
1302 case SPIRV::OpAtomicStore:
1303 case SPIRV::OpAtomicExchange:
1304 case SPIRV::OpAtomicCompareExchange:
1305 case SPIRV::OpAtomicIIncrement:
1306 case SPIRV::OpAtomicIDecrement:
1307 case SPIRV::OpAtomicIAdd:
1308 case SPIRV::OpAtomicISub:
1309 case SPIRV::OpAtomicUMin:
1310 case SPIRV::OpAtomicUMax:
1311 case SPIRV::OpAtomicSMin:
1312 case SPIRV::OpAtomicSMax:
1313 case SPIRV::OpAtomicAnd:
1314 case SPIRV::OpAtomicOr:
1315 case SPIRV::OpAtomicXor: {
1316 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1317 const MachineInstr *InstrPtr = &MI;
1318 if (MI.getOpcode() == SPIRV::OpAtomicStore) {
1319 assert(MI.getOperand(3).isReg());
1320 InstrPtr = MRI.getVRegDef(MI.getOperand(3).getReg());
1321 assert(InstrPtr && "Unexpected type instruction for OpAtomicStore");
1322 }
1323 assert(InstrPtr->getOperand(1).isReg() && "Unexpected operand in atomic");
1324 Register TypeReg = InstrPtr->getOperand(1).getReg();
1325 SPIRVType *TypeDef = MRI.getVRegDef(TypeReg);
1326 if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
1327 unsigned BitWidth = TypeDef->getOperand(1).getImm();
1328 if (BitWidth == 64)
1329 Reqs.addCapability(SPIRV::Capability::Int64Atomics);
1330 }
1331 break;
1332 }
1333 case SPIRV::OpGroupNonUniformIAdd:
1334 case SPIRV::OpGroupNonUniformFAdd:
1335 case SPIRV::OpGroupNonUniformIMul:
1336 case SPIRV::OpGroupNonUniformFMul:
1337 case SPIRV::OpGroupNonUniformSMin:
1338 case SPIRV::OpGroupNonUniformUMin:
1339 case SPIRV::OpGroupNonUniformFMin:
1340 case SPIRV::OpGroupNonUniformSMax:
1341 case SPIRV::OpGroupNonUniformUMax:
1342 case SPIRV::OpGroupNonUniformFMax:
1343 case SPIRV::OpGroupNonUniformBitwiseAnd:
1344 case SPIRV::OpGroupNonUniformBitwiseOr:
1345 case SPIRV::OpGroupNonUniformBitwiseXor:
1346 case SPIRV::OpGroupNonUniformLogicalAnd:
1347 case SPIRV::OpGroupNonUniformLogicalOr:
1348 case SPIRV::OpGroupNonUniformLogicalXor: {
1349 assert(MI.getOperand(3).isImm());
1350 int64_t GroupOp = MI.getOperand(3).getImm();
1351 switch (GroupOp) {
1352 case SPIRV::GroupOperation::Reduce:
1353 case SPIRV::GroupOperation::InclusiveScan:
1354 case SPIRV::GroupOperation::ExclusiveScan:
1355 Reqs.addCapability(SPIRV::Capability::Kernel);
1356 Reqs.addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
1357 Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot);
1358 break;
1359 case SPIRV::GroupOperation::ClusteredReduce:
1360 Reqs.addCapability(SPIRV::Capability::GroupNonUniformClustered);
1361 break;
1362 case SPIRV::GroupOperation::PartitionedReduceNV:
1363 case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
1364 case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
1365 Reqs.addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
1366 break;
1367 }
1368 break;
1369 }
1370 case SPIRV::OpGroupNonUniformShuffle:
1371 case SPIRV::OpGroupNonUniformShuffleXor:
1372 Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffle);
1373 break;
1374 case SPIRV::OpGroupNonUniformShuffleUp:
1375 case SPIRV::OpGroupNonUniformShuffleDown:
1376 Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
1377 break;
1378 case SPIRV::OpGroupAll:
1379 case SPIRV::OpGroupAny:
1380 case SPIRV::OpGroupBroadcast:
1381 case SPIRV::OpGroupIAdd:
1382 case SPIRV::OpGroupFAdd:
1383 case SPIRV::OpGroupFMin:
1384 case SPIRV::OpGroupUMin:
1385 case SPIRV::OpGroupSMin:
1386 case SPIRV::OpGroupFMax:
1387 case SPIRV::OpGroupUMax:
1388 case SPIRV::OpGroupSMax:
1389 Reqs.addCapability(SPIRV::Capability::Groups);
1390 break;
1391 case SPIRV::OpGroupNonUniformElect:
1392 Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
1393 break;
1394 case SPIRV::OpGroupNonUniformAll:
1395 case SPIRV::OpGroupNonUniformAny:
1396 case SPIRV::OpGroupNonUniformAllEqual:
1397 Reqs.addCapability(SPIRV::Capability::GroupNonUniformVote);
1398 break;
1399 case SPIRV::OpGroupNonUniformBroadcast:
1400 case SPIRV::OpGroupNonUniformBroadcastFirst:
1401 case SPIRV::OpGroupNonUniformBallot:
1402 case SPIRV::OpGroupNonUniformInverseBallot:
1403 case SPIRV::OpGroupNonUniformBallotBitExtract:
1404 case SPIRV::OpGroupNonUniformBallotBitCount:
1405 case SPIRV::OpGroupNonUniformBallotFindLSB:
1406 case SPIRV::OpGroupNonUniformBallotFindMSB:
1407 Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot);
1408 break;
1409 case SPIRV::OpSubgroupShuffleINTEL:
1410 case SPIRV::OpSubgroupShuffleDownINTEL:
1411 case SPIRV::OpSubgroupShuffleUpINTEL:
1412 case SPIRV::OpSubgroupShuffleXorINTEL:
1413 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1414 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1415 Reqs.addCapability(SPIRV::Capability::SubgroupShuffleINTEL);
1416 }
1417 break;
1418 case SPIRV::OpSubgroupBlockReadINTEL:
1419 case SPIRV::OpSubgroupBlockWriteINTEL:
1420 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1421 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1422 Reqs.addCapability(SPIRV::Capability::SubgroupBufferBlockIOINTEL);
1423 }
1424 break;
1425 case SPIRV::OpSubgroupImageBlockReadINTEL:
1426 case SPIRV::OpSubgroupImageBlockWriteINTEL:
1427 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1428 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1429 Reqs.addCapability(SPIRV::Capability::SubgroupImageBlockIOINTEL);
1430 }
1431 break;
1432 case SPIRV::OpSubgroupImageMediaBlockReadINTEL:
1433 case SPIRV::OpSubgroupImageMediaBlockWriteINTEL:
1434 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1435 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_media_block_io);
1436 Reqs.addCapability(SPIRV::Capability::SubgroupImageMediaBlockIOINTEL);
1437 }
1438 break;
1439 case SPIRV::OpAssumeTrueKHR:
1440 case SPIRV::OpExpectKHR:
1441 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
1442 Reqs.addExtension(SPIRV::Extension::SPV_KHR_expect_assume);
1443 Reqs.addCapability(SPIRV::Capability::ExpectAssumeKHR);
1444 }
1445 break;
1446 case SPIRV::OpPtrCastToCrossWorkgroupINTEL:
1447 case SPIRV::OpCrossWorkgroupCastToPtrINTEL:
1448 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) {
1449 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes);
1450 Reqs.addCapability(SPIRV::Capability::USMStorageClassesINTEL);
1451 }
1452 break;
1453 case SPIRV::OpConstantFunctionPointerINTEL:
1454 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1455 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1456 Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL);
1457 }
1458 break;
1459 case SPIRV::OpGroupNonUniformRotateKHR:
1460 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate))
1461 report_fatal_error("OpGroupNonUniformRotateKHR instruction requires the "
1462 "following SPIR-V extension: SPV_KHR_subgroup_rotate",
1463 false);
1464 Reqs.addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate);
1465 Reqs.addCapability(SPIRV::Capability::GroupNonUniformRotateKHR);
1466 Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
1467 break;
1468 case SPIRV::OpGroupIMulKHR:
1469 case SPIRV::OpGroupFMulKHR:
1470 case SPIRV::OpGroupBitwiseAndKHR:
1471 case SPIRV::OpGroupBitwiseOrKHR:
1472 case SPIRV::OpGroupBitwiseXorKHR:
1473 case SPIRV::OpGroupLogicalAndKHR:
1474 case SPIRV::OpGroupLogicalOrKHR:
1475 case SPIRV::OpGroupLogicalXorKHR:
1476 if (ST.canUseExtension(
1477 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1478 Reqs.addExtension(SPIRV::Extension::SPV_KHR_uniform_group_instructions);
1479 Reqs.addCapability(SPIRV::Capability::GroupUniformArithmeticKHR);
1480 }
1481 break;
1482 case SPIRV::OpReadClockKHR:
1483 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock))
1484 report_fatal_error("OpReadClockKHR instruction requires the "
1485 "following SPIR-V extension: SPV_KHR_shader_clock",
1486 false);
1487 Reqs.addExtension(SPIRV::Extension::SPV_KHR_shader_clock);
1488 Reqs.addCapability(SPIRV::Capability::ShaderClockKHR);
1489 break;
1490 case SPIRV::OpFunctionPointerCallINTEL:
1491 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1492 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1493 Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL);
1494 }
1495 break;
1496 case SPIRV::OpAtomicFAddEXT:
1497 case SPIRV::OpAtomicFMinEXT:
1498 case SPIRV::OpAtomicFMaxEXT:
1499 AddAtomicFloatRequirements(MI, Reqs, ST);
1500 break;
1501 case SPIRV::OpConvertBF16ToFINTEL:
1502 case SPIRV::OpConvertFToBF16INTEL:
1503 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion)) {
1504 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion);
1505 Reqs.addCapability(SPIRV::Capability::BFloat16ConversionINTEL);
1506 }
1507 break;
1508 case SPIRV::OpVariableLengthArrayINTEL:
1509 case SPIRV::OpSaveMemoryINTEL:
1510 case SPIRV::OpRestoreMemoryINTEL:
1511 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array)) {
1512 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_variable_length_array);
1513 Reqs.addCapability(SPIRV::Capability::VariableLengthArrayINTEL);
1514 }
1515 break;
1516 case SPIRV::OpAsmTargetINTEL:
1517 case SPIRV::OpAsmINTEL:
1518 case SPIRV::OpAsmCallINTEL:
1519 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly)) {
1520 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_inline_assembly);
1521 Reqs.addCapability(SPIRV::Capability::AsmINTEL);
1522 }
1523 break;
1524 case SPIRV::OpTypeCooperativeMatrixKHR:
1525 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1527 "OpTypeCooperativeMatrixKHR type requires the "
1528 "following SPIR-V extension: SPV_KHR_cooperative_matrix",
1529 false);
1530 Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1531 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1532 break;
1533 case SPIRV::OpArithmeticFenceEXT:
1534 if (!ST.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
1535 report_fatal_error("OpArithmeticFenceEXT requires the "
1536 "following SPIR-V extension: SPV_EXT_arithmetic_fence",
1537 false);
1538 Reqs.addExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence);
1539 Reqs.addCapability(SPIRV::Capability::ArithmeticFenceEXT);
1540 break;
1541 case SPIRV::OpControlBarrierArriveINTEL:
1542 case SPIRV::OpControlBarrierWaitINTEL:
1543 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
1544 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_split_barrier);
1545 Reqs.addCapability(SPIRV::Capability::SplitBarrierINTEL);
1546 }
1547 break;
1548 case SPIRV::OpCooperativeMatrixMulAddKHR: {
1549 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1550 report_fatal_error("Cooperative matrix instructions require the "
1551 "following SPIR-V extension: "
1552 "SPV_KHR_cooperative_matrix",
1553 false);
1554 Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1555 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1556 constexpr unsigned MulAddMaxSize = 6;
1557 if (MI.getNumOperands() != MulAddMaxSize)
1558 break;
1559 const int64_t CoopOperands = MI.getOperand(MulAddMaxSize - 1).getImm();
1560 if (CoopOperands &
1561 SPIRV::CooperativeMatrixOperands::MatrixAAndBTF32ComponentsINTEL) {
1562 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1563 report_fatal_error("MatrixAAndBTF32ComponentsINTEL type interpretation "
1564 "require the following SPIR-V extension: "
1565 "SPV_INTEL_joint_matrix",
1566 false);
1567 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1568 Reqs.addCapability(
1569 SPIRV::Capability::CooperativeMatrixTF32ComponentTypeINTEL);
1570 }
1571 if (CoopOperands & SPIRV::CooperativeMatrixOperands::
1572 MatrixAAndBBFloat16ComponentsINTEL ||
1573 CoopOperands &
1574 SPIRV::CooperativeMatrixOperands::MatrixCBFloat16ComponentsINTEL ||
1575 CoopOperands & SPIRV::CooperativeMatrixOperands::
1576 MatrixResultBFloat16ComponentsINTEL) {
1577 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1578 report_fatal_error("***BF16ComponentsINTEL type interpretations "
1579 "require the following SPIR-V extension: "
1580 "SPV_INTEL_joint_matrix",
1581 false);
1582 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1583 Reqs.addCapability(
1584 SPIRV::Capability::CooperativeMatrixBFloat16ComponentTypeINTEL);
1585 }
1586 break;
1587 }
1588 case SPIRV::OpCooperativeMatrixLoadKHR:
1589 case SPIRV::OpCooperativeMatrixStoreKHR:
1590 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1591 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1592 case SPIRV::OpCooperativeMatrixPrefetchINTEL: {
1593 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1594 report_fatal_error("Cooperative matrix instructions require the "
1595 "following SPIR-V extension: "
1596 "SPV_KHR_cooperative_matrix",
1597 false);
1598 Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1599 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1600
1601 // Check Layout operand in case if it's not a standard one and add the
1602 // appropriate capability.
1603 std::unordered_map<unsigned, unsigned> LayoutToInstMap = {
1604 {SPIRV::OpCooperativeMatrixLoadKHR, 3},
1605 {SPIRV::OpCooperativeMatrixStoreKHR, 2},
1606 {SPIRV::OpCooperativeMatrixLoadCheckedINTEL, 5},
1607 {SPIRV::OpCooperativeMatrixStoreCheckedINTEL, 4},
1608 {SPIRV::OpCooperativeMatrixPrefetchINTEL, 4}};
1609
1610 const auto OpCode = MI.getOpcode();
1611 const unsigned LayoutNum = LayoutToInstMap[OpCode];
1612 Register RegLayout = MI.getOperand(LayoutNum).getReg();
1613 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1614 MachineInstr *MILayout = MRI.getUniqueVRegDef(RegLayout);
1615 if (MILayout->getOpcode() == SPIRV::OpConstantI) {
1616 const unsigned LayoutVal = MILayout->getOperand(2).getImm();
1617 if (LayoutVal ==
1618 static_cast<unsigned>(SPIRV::CooperativeMatrixLayout::PackedINTEL)) {
1619 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1620 report_fatal_error("PackedINTEL layout require the following SPIR-V "
1621 "extension: SPV_INTEL_joint_matrix",
1622 false);
1623 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1624 Reqs.addCapability(SPIRV::Capability::PackedCooperativeMatrixINTEL);
1625 }
1626 }
1627
1628 // Nothing to do.
1629 if (OpCode == SPIRV::OpCooperativeMatrixLoadKHR ||
1630 OpCode == SPIRV::OpCooperativeMatrixStoreKHR)
1631 break;
1632
1633 std::string InstName;
1634 switch (OpCode) {
1635 case SPIRV::OpCooperativeMatrixPrefetchINTEL:
1636 InstName = "OpCooperativeMatrixPrefetchINTEL";
1637 break;
1638 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1639 InstName = "OpCooperativeMatrixLoadCheckedINTEL";
1640 break;
1641 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1642 InstName = "OpCooperativeMatrixStoreCheckedINTEL";
1643 break;
1644 }
1645
1646 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix)) {
1647 const std::string ErrorMsg =
1648 InstName + " instruction requires the "
1649 "following SPIR-V extension: SPV_INTEL_joint_matrix";
1650 report_fatal_error(ErrorMsg.c_str(), false);
1651 }
1652 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1653 if (OpCode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
1654 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixPrefetchINTEL);
1655 break;
1656 }
1657 Reqs.addCapability(
1658 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1659 break;
1660 }
1661 case SPIRV::OpCooperativeMatrixConstructCheckedINTEL:
1662 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1663 report_fatal_error("OpCooperativeMatrixConstructCheckedINTEL "
1664 "instructions require the following SPIR-V extension: "
1665 "SPV_INTEL_joint_matrix",
1666 false);
1667 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1668 Reqs.addCapability(
1669 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1670 break;
1671 case SPIRV::OpCooperativeMatrixGetElementCoordINTEL:
1672 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1673 report_fatal_error("OpCooperativeMatrixGetElementCoordINTEL requires the "
1674 "following SPIR-V extension: SPV_INTEL_joint_matrix",
1675 false);
1676 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1677 Reqs.addCapability(
1678 SPIRV::Capability::CooperativeMatrixInvocationInstructionsINTEL);
1679 break;
1680 case SPIRV::OpKill: {
1681 Reqs.addCapability(SPIRV::Capability::Shader);
1682 } break;
1683 case SPIRV::OpDemoteToHelperInvocation:
1684 Reqs.addCapability(SPIRV::Capability::DemoteToHelperInvocation);
1685
1686 if (ST.canUseExtension(
1687 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation)) {
1688 if (!ST.isAtLeastSPIRVVer(llvm::VersionTuple(1, 6)))
1689 Reqs.addExtension(
1690 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation);
1691 }
1692 break;
1693 case SPIRV::OpSDot:
1694 case SPIRV::OpUDot:
1695 AddDotProductRequirements(MI, Reqs, ST);
1696 break;
1697 case SPIRV::OpImageRead: {
1698 Register ImageReg = MI.getOperand(2).getReg();
1699 SPIRVType *TypeDef = ST.getSPIRVGlobalRegistry()->getResultType(ImageReg);
1700 if (isImageTypeWithUnknownFormat(TypeDef))
1701 Reqs.addCapability(SPIRV::Capability::StorageImageReadWithoutFormat);
1702 break;
1703 }
1704 case SPIRV::OpImageWrite: {
1705 Register ImageReg = MI.getOperand(0).getReg();
1706 SPIRVType *TypeDef = ST.getSPIRVGlobalRegistry()->getResultType(ImageReg);
1707 if (isImageTypeWithUnknownFormat(TypeDef))
1708 Reqs.addCapability(SPIRV::Capability::StorageImageWriteWithoutFormat);
1709 break;
1710 }
1711
1712 default:
1713 break;
1714 }
1715
1716 // If we require capability Shader, then we can remove the requirement for
1717 // the BitInstructions capability, since Shader is a superset capability
1718 // of BitInstructions.
1719 Reqs.removeCapabilityIf(SPIRV::Capability::BitInstructions,
1720 SPIRV::Capability::Shader);
1721}
1722
1723static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
1724 MachineModuleInfo *MMI, const SPIRVSubtarget &ST) {
1725 // Collect requirements for existing instructions.
1726 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1728 if (!MF)
1729 continue;
1730 for (const MachineBasicBlock &MBB : *MF)
1731 for (const MachineInstr &MI : MBB)
1732 addInstrRequirements(MI, MAI.Reqs, ST);
1733 }
1734 // Collect requirements for OpExecutionMode instructions.
1735 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
1736 if (Node) {
1737 bool RequireFloatControls = false, RequireFloatControls2 = false,
1738 VerLower14 = !ST.isAtLeastSPIRVVer(VersionTuple(1, 4));
1739 bool HasFloatControls2 =
1740 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
1741 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
1742 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
1743 const MDOperand &MDOp = MDN->getOperand(1);
1744 if (auto *CMeta = dyn_cast<ConstantAsMetadata>(MDOp)) {
1745 Constant *C = CMeta->getValue();
1746 if (ConstantInt *Const = dyn_cast<ConstantInt>(C)) {
1747 auto EM = Const->getZExtValue();
1748 // SPV_KHR_float_controls is not available until v1.4:
1749 // add SPV_KHR_float_controls if the version is too low
1750 switch (EM) {
1751 case SPIRV::ExecutionMode::DenormPreserve:
1752 case SPIRV::ExecutionMode::DenormFlushToZero:
1753 case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
1754 case SPIRV::ExecutionMode::RoundingModeRTE:
1755 case SPIRV::ExecutionMode::RoundingModeRTZ:
1756 RequireFloatControls = VerLower14;
1758 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1759 break;
1760 case SPIRV::ExecutionMode::RoundingModeRTPINTEL:
1761 case SPIRV::ExecutionMode::RoundingModeRTNINTEL:
1762 case SPIRV::ExecutionMode::FloatingPointModeALTINTEL:
1763 case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL:
1764 if (HasFloatControls2) {
1765 RequireFloatControls2 = true;
1767 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1768 }
1769 break;
1770 default:
1772 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1773 }
1774 }
1775 }
1776 }
1777 if (RequireFloatControls &&
1778 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls))
1779 MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls);
1780 if (RequireFloatControls2)
1781 MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
1782 }
1783 for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) {
1784 const Function &F = *FI;
1785 if (F.isDeclaration())
1786 continue;
1787 if (F.getMetadata("reqd_work_group_size"))
1789 SPIRV::OperandCategory::ExecutionModeOperand,
1790 SPIRV::ExecutionMode::LocalSize, ST);
1791 if (F.getFnAttribute("hlsl.numthreads").isValid()) {
1793 SPIRV::OperandCategory::ExecutionModeOperand,
1794 SPIRV::ExecutionMode::LocalSize, ST);
1795 }
1796 if (F.getMetadata("work_group_size_hint"))
1798 SPIRV::OperandCategory::ExecutionModeOperand,
1799 SPIRV::ExecutionMode::LocalSizeHint, ST);
1800 if (F.getMetadata("intel_reqd_sub_group_size"))
1802 SPIRV::OperandCategory::ExecutionModeOperand,
1803 SPIRV::ExecutionMode::SubgroupSize, ST);
1804 if (F.getMetadata("vec_type_hint"))
1806 SPIRV::OperandCategory::ExecutionModeOperand,
1807 SPIRV::ExecutionMode::VecTypeHint, ST);
1808
1809 if (F.hasOptNone()) {
1810 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
1811 MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_optnone);
1812 MAI.Reqs.addCapability(SPIRV::Capability::OptNoneINTEL);
1813 } else if (ST.canUseExtension(SPIRV::Extension::SPV_EXT_optnone)) {
1814 MAI.Reqs.addExtension(SPIRV::Extension::SPV_EXT_optnone);
1815 MAI.Reqs.addCapability(SPIRV::Capability::OptNoneEXT);
1816 }
1817 }
1818 }
1819}
1820
1821static unsigned getFastMathFlags(const MachineInstr &I) {
1822 unsigned Flags = SPIRV::FPFastMathMode::None;
1823 if (I.getFlag(MachineInstr::MIFlag::FmNoNans))
1824 Flags |= SPIRV::FPFastMathMode::NotNaN;
1825 if (I.getFlag(MachineInstr::MIFlag::FmNoInfs))
1826 Flags |= SPIRV::FPFastMathMode::NotInf;
1827 if (I.getFlag(MachineInstr::MIFlag::FmNsz))
1828 Flags |= SPIRV::FPFastMathMode::NSZ;
1829 if (I.getFlag(MachineInstr::MIFlag::FmArcp))
1830 Flags |= SPIRV::FPFastMathMode::AllowRecip;
1832 Flags |= SPIRV::FPFastMathMode::Fast;
1833 return Flags;
1834}
1835
1836static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST,
1837 const SPIRVInstrInfo &TII,
1839 if (I.getFlag(MachineInstr::MIFlag::NoSWrap) && TII.canUseNSW(I) &&
1840 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
1841 SPIRV::Decoration::NoSignedWrap, ST, Reqs)
1842 .IsSatisfiable) {
1843 buildOpDecorate(I.getOperand(0).getReg(), I, TII,
1844 SPIRV::Decoration::NoSignedWrap, {});
1845 }
1846 if (I.getFlag(MachineInstr::MIFlag::NoUWrap) && TII.canUseNUW(I) &&
1847 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
1848 SPIRV::Decoration::NoUnsignedWrap, ST,
1849 Reqs)
1850 .IsSatisfiable) {
1851 buildOpDecorate(I.getOperand(0).getReg(), I, TII,
1852 SPIRV::Decoration::NoUnsignedWrap, {});
1853 }
1854 if (!TII.canUseFastMathFlags(I))
1855 return;
1856 unsigned FMFlags = getFastMathFlags(I);
1857 if (FMFlags == SPIRV::FPFastMathMode::None)
1858 return;
1859 Register DstReg = I.getOperand(0).getReg();
1860 buildOpDecorate(DstReg, I, TII, SPIRV::Decoration::FPFastMathMode, {FMFlags});
1861}
1862
1863// Walk all functions and add decorations related to MI flags.
1864static void addDecorations(const Module &M, const SPIRVInstrInfo &TII,
1865 MachineModuleInfo *MMI, const SPIRVSubtarget &ST,
1867 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1869 if (!MF)
1870 continue;
1871 for (auto &MBB : *MF)
1872 for (auto &MI : MBB)
1873 handleMIFlagDecoration(MI, ST, TII, MAI.Reqs);
1874 }
1875}
1876
1877static void addMBBNames(const Module &M, const SPIRVInstrInfo &TII,
1878 MachineModuleInfo *MMI, const SPIRVSubtarget &ST,
1880 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1882 if (!MF)
1883 continue;
1885 for (auto &MBB : *MF) {
1886 if (!MBB.hasName() || MBB.empty())
1887 continue;
1888 // Emit basic block names.
1889 Register Reg = MRI.createGenericVirtualRegister(LLT::scalar(64));
1890 MRI.setRegClass(Reg, &SPIRV::IDRegClass);
1891 buildOpName(Reg, MBB.getName(), *std::prev(MBB.end()), TII);
1892 Register GlobalReg = MAI.getOrCreateMBBRegister(MBB);
1893 MAI.setRegisterAlias(MF, Reg, GlobalReg);
1894 }
1895 }
1896}
1897
1898// patching Instruction::PHI to SPIRV::OpPhi
1899static void patchPhis(const Module &M, SPIRVGlobalRegistry *GR,
1900 const SPIRVInstrInfo &TII, MachineModuleInfo *MMI) {
1901 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1903 if (!MF)
1904 continue;
1905 for (auto &MBB : *MF) {
1906 for (MachineInstr &MI : MBB) {
1907 if (MI.getOpcode() != TargetOpcode::PHI)
1908 continue;
1909 MI.setDesc(TII.get(SPIRV::OpPhi));
1910 Register ResTypeReg = GR->getSPIRVTypeID(
1911 GR->getSPIRVTypeForVReg(MI.getOperand(0).getReg(), MF));
1912 MI.insert(MI.operands_begin() + 1,
1913 {MachineOperand::CreateReg(ResTypeReg, false)});
1914 }
1915 }
1916 }
1917}
1918
1920
1922 AU.addRequired<TargetPassConfig>();
1923 AU.addRequired<MachineModuleInfoWrapperPass>();
1924}
1925
1927 SPIRVTargetMachine &TM =
1928 getAnalysis<TargetPassConfig>().getTM<SPIRVTargetMachine>();
1929 ST = TM.getSubtargetImpl();
1930 GR = ST->getSPIRVGlobalRegistry();
1931 TII = ST->getInstrInfo();
1932
1933 MMI = &getAnalysis<MachineModuleInfoWrapperPass>().getMMI();
1934
1935 setBaseInfo(M);
1936
1937 patchPhis(M, GR, *TII, MMI);
1938
1939 addMBBNames(M, *TII, MMI, *ST, MAI);
1940 addDecorations(M, *TII, MMI, *ST, MAI);
1941
1942 collectReqs(M, MAI, MMI, *ST);
1943
1944 // Process type/const/global var/func decl instructions, number their
1945 // destination registers from 0 to N, collect Extensions and Capabilities.
1946 collectDeclarations(M);
1947
1948 // Number rest of registers from N+1 onwards.
1949 numberRegistersGlobally(M);
1950
1951 // Collect OpName, OpEntryPoint, OpDecorate etc, process other instructions.
1952 processOtherInstrs(M);
1953
1954 // If there are no entry points, we need the Linkage capability.
1955 if (MAI.MS[SPIRV::MB_EntryPoints].empty())
1956 MAI.Reqs.addCapability(SPIRV::Capability::Linkage);
1957
1958 // Set maximum ID used.
1959 GR->setBound(MAI.MaxID);
1960
1961 return false;
1962}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
aarch64 promote const
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock & MBB
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
uint64_t Addr
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define ATOM_FLT_REQ_EXT_MSG(ExtName)
static cl::opt< bool > SPVDumpDeps("spv-dump-deps", cl::desc("Dump MIR with SPIR-V dependencies info"), cl::Optional, cl::init(false))
unsigned unsigned DefaultVal
unsigned OpIndex
#define DEBUG_TYPE
static cl::list< SPIRV::Capability::Capability > AvoidCapabilities("avoid-spirv-capabilities", cl::desc("SPIR-V capabilities to avoid if there are " "other options enabling a feature"), cl::ZeroOrMore, cl::Hidden, cl::values(clEnumValN(SPIRV::Capability::Shader, "Shader", "SPIR-V Shader capability")))
spirv structurize SPIRV
This file contains some templates that are useful if you are working with the STL at all.
Target-Independent Code Generator Pass Configuration Options pass.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
Metadata node.
Definition: Metadata.h:1069
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1430
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1436
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:891
bool hasName() const
Check if there is a name of corresponding LLVM basic block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:575
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:347
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:585
This class contains meta information specific to a module.
MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
constexpr bool isValid() const
Definition: Register.h:116
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
const Value * getGlobalObject(const MachineFunction *MF, Register R)
const MachineOperand * getFunctionDefinitionByUse(const MachineOperand *Use)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isConstantInstr(const MachineInstr &MI) const
bool isInlineAsmDefInstr(const MachineInstr &MI) const
bool isTypeDeclInstr(const MachineInstr &MI) const
bool isSpecConstantInstr(const MachineInstr &MI) const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:222
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:805
void push_back(const T &Elt)
Definition: SmallVector.h:413
std::pair< typename Base::iterator, bool > insert(StringRef key)
Definition: StringSet.h:38
Target-Independent Code Generator Pass Configuration Options.
Target - Wrapper for Target specific information.
LLVM Value Representation.
Definition: Value.h:74
Represents a version number in the form major[.minor[.subminor[.build]]].
Definition: VersionTuple.h:29
bool empty() const
Determine whether this version information is empty (e.g., all version components are zero).
Definition: VersionTuple.h:66
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
Reg
All possible values of the reg field in the ModR/M byte.
StorageClass
Definition: XCOFF.h:170
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
NodeAddr< InstrNode * > Instr
Definition: RDFGraph.h:389
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:103
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
Definition: SPIRVUtils.cpp:79
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
hash_code hash_value(const FixedPointSemantics &Val)
Definition: APFixedPoint.h:136
ExtensionList getSymbolicOperandExtensions(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
CapabilityList getSymbolicOperandCapabilities(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
VersionTuple getSymbolicOperandMaxVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
void initializeSPIRVModuleAnalysisPass(PassRegistry &)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:130
CapabilityList getCapabilitiesEnabledByExtension(SPIRV::Extension::Extension Extension)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
std::set< InstrSignature > InstrTraces
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::string getSymbolicOperandMnemonic(SPIRV::OperandCategory::OperandCategory Category, int32_t Value)
VersionTuple getSymbolicOperandMinVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
std::map< SmallVector< size_t >, unsigned > InstrGRegsMap
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:590
#define N
SmallSet< SPIRV::Capability::Capability, 4 > S
static struct SPIRV::ModuleAnalysisInfo MAI
bool runOnModule(Module &M) override
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Register getRegisterAlias(const MachineFunction *MF, Register Reg)
SmallVector< const MachineInstr *, 4 > GlobalVarList
DenseMap< const Function *, Register > FuncMap
void setSkipEmission(const MachineInstr *MI)
void setRegisterAlias(const MachineFunction *MF, Register Reg, Register AliasReg)
DenseSet< const MachineInstr * > InstrsToDelete
bool hasRegisterAlias(const MachineFunction *MF, Register Reg)
Register getOrCreateMBBRegister(const MachineBasicBlock &MBB)
bool getSkipEmission(const MachineInstr *MI)
InstrList MS[NUM_MODULE_SECTIONS]
AddressingModel::AddressingModel Addr
SourceLanguage::SourceLanguage SrcLang
DenseMap< unsigned, Register > ExtInstSetMap
void addCapabilities(const CapabilityList &ToAdd)
bool isCapabilityAvailable(Capability::Capability Cap) const
void checkSatisfiable(const SPIRVSubtarget &ST) const
void getAndAddRequirements(SPIRV::OperandCategory::OperandCategory Category, uint32_t i, const SPIRVSubtarget &ST)
void addExtension(Extension::Extension ToAdd)
void initAvailableCapabilities(const SPIRVSubtarget &ST)
void removeCapabilityIf(const Capability::Capability ToRemove, const Capability::Capability IfPresent)
void addCapability(Capability::Capability ToAdd)
void addAvailableCaps(const CapabilityList &ToAdd)
void addRequirements(const Requirements &Req)
const std::optional< Capability::Capability > Cap