LLVM 21.0.0git
SPIRVModuleAnalysis.cpp
Go to the documentation of this file.
1//===- SPIRVModuleAnalysis.cpp - analysis of global instrs & regs - C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// The analysis collects instructions that should be output at the module level
10// and performs the global register numbering.
11//
12// The results of this analysis are used in AsmPrinter to rename registers
13// globally and to output required instructions at the module level.
14//
15//===----------------------------------------------------------------------===//
16
17#include "SPIRVModuleAnalysis.h"
20#include "SPIRV.h"
21#include "SPIRVSubtarget.h"
22#include "SPIRVTargetMachine.h"
23#include "SPIRVUtils.h"
24#include "llvm/ADT/STLExtras.h"
27
28using namespace llvm;
29
30#define DEBUG_TYPE "spirv-module-analysis"
31
32static cl::opt<bool>
33 SPVDumpDeps("spv-dump-deps",
34 cl::desc("Dump MIR with SPIR-V dependencies info"),
35 cl::Optional, cl::init(false));
36
38 AvoidCapabilities("avoid-spirv-capabilities",
39 cl::desc("SPIR-V capabilities to avoid if there are "
40 "other options enabling a feature"),
42 cl::values(clEnumValN(SPIRV::Capability::Shader, "Shader",
43 "SPIR-V Shader capability")));
44// Use sets instead of cl::list to check "if contains" condition
48 for (auto Cap : AvoidCapabilities)
49 S.insert(Cap);
50 }
51};
52
54
55namespace llvm {
57} // namespace llvm
58
59INITIALIZE_PASS(SPIRVModuleAnalysis, DEBUG_TYPE, "SPIRV module analysis", true,
60 true)
61
62// Retrieve an unsigned from an MDNode with a list of them as operands.
63static unsigned getMetadataUInt(MDNode *MdNode, unsigned OpIndex,
64 unsigned DefaultVal = 0) {
65 if (MdNode && OpIndex < MdNode->getNumOperands()) {
66 const auto &Op = MdNode->getOperand(OpIndex);
67 return mdconst::extract<ConstantInt>(Op)->getZExtValue();
68 }
69 return DefaultVal;
70}
71
73getSymbolicOperandRequirements(SPIRV::OperandCategory::OperandCategory Category,
74 unsigned i, const SPIRVSubtarget &ST,
76 // A set of capabilities to avoid if there is another option.
77 AvoidCapabilitiesSet AvoidCaps;
78 if (ST.isOpenCLEnv())
79 AvoidCaps.S.insert(SPIRV::Capability::Shader);
80
81 VersionTuple ReqMinVer = getSymbolicOperandMinVersion(Category, i);
82 VersionTuple ReqMaxVer = getSymbolicOperandMaxVersion(Category, i);
83 VersionTuple SPIRVVersion = ST.getSPIRVVersion();
84 bool MinVerOK = SPIRVVersion.empty() || SPIRVVersion >= ReqMinVer;
85 bool MaxVerOK =
86 ReqMaxVer.empty() || SPIRVVersion.empty() || SPIRVVersion <= ReqMaxVer;
88 ExtensionList ReqExts = getSymbolicOperandExtensions(Category, i);
89 if (ReqCaps.empty()) {
90 if (ReqExts.empty()) {
91 if (MinVerOK && MaxVerOK)
92 return {true, {}, {}, ReqMinVer, ReqMaxVer};
93 return {false, {}, {}, VersionTuple(), VersionTuple()};
94 }
95 } else if (MinVerOK && MaxVerOK) {
96 if (ReqCaps.size() == 1) {
97 auto Cap = ReqCaps[0];
98 if (Reqs.isCapabilityAvailable(Cap))
99 return {true, {Cap}, ReqExts, ReqMinVer, ReqMaxVer};
100 } else {
101 // By SPIR-V specification: "If an instruction, enumerant, or other
102 // feature specifies multiple enabling capabilities, only one such
103 // capability needs to be declared to use the feature." However, one
104 // capability may be preferred over another. We use command line
105 // argument(s) and AvoidCapabilities to avoid selection of certain
106 // capabilities if there are other options.
107 CapabilityList UseCaps;
108 for (auto Cap : ReqCaps)
109 if (Reqs.isCapabilityAvailable(Cap))
110 UseCaps.push_back(Cap);
111 for (size_t i = 0, Sz = UseCaps.size(); i < Sz; ++i) {
112 auto Cap = UseCaps[i];
113 if (i == Sz - 1 || !AvoidCaps.S.contains(Cap))
114 return {true, {Cap}, ReqExts, ReqMinVer, ReqMaxVer};
115 }
116 }
117 }
118 // If there are no capabilities, or we can't satisfy the version or
119 // capability requirements, use the list of extensions (if the subtarget
120 // can handle them all).
121 if (llvm::all_of(ReqExts, [&ST](const SPIRV::Extension::Extension &Ext) {
122 return ST.canUseExtension(Ext);
123 })) {
124 return {true,
125 {},
126 ReqExts,
127 VersionTuple(),
128 VersionTuple()}; // TODO: add versions to extensions.
129 }
130 return {false, {}, {}, VersionTuple(), VersionTuple()};
131}
132
133void SPIRVModuleAnalysis::setBaseInfo(const Module &M) {
134 MAI.MaxID = 0;
135 for (int i = 0; i < SPIRV::NUM_MODULE_SECTIONS; i++)
136 MAI.MS[i].clear();
137 MAI.RegisterAliasTable.clear();
138 MAI.InstrsToDelete.clear();
139 MAI.FuncMap.clear();
140 MAI.GlobalVarList.clear();
141 MAI.ExtInstSetMap.clear();
142 MAI.Reqs.clear();
144
145 // TODO: determine memory model and source language from the configuratoin.
146 if (auto MemModel = M.getNamedMetadata("spirv.MemoryModel")) {
147 auto MemMD = MemModel->getOperand(0);
148 MAI.Addr = static_cast<SPIRV::AddressingModel::AddressingModel>(
149 getMetadataUInt(MemMD, 0));
150 MAI.Mem =
151 static_cast<SPIRV::MemoryModel::MemoryModel>(getMetadataUInt(MemMD, 1));
152 } else {
153 // TODO: Add support for VulkanMemoryModel.
154 MAI.Mem = ST->isOpenCLEnv() ? SPIRV::MemoryModel::OpenCL
155 : SPIRV::MemoryModel::GLSL450;
156 if (MAI.Mem == SPIRV::MemoryModel::OpenCL) {
157 unsigned PtrSize = ST->getPointerSize();
158 MAI.Addr = PtrSize == 32 ? SPIRV::AddressingModel::Physical32
159 : PtrSize == 64 ? SPIRV::AddressingModel::Physical64
160 : SPIRV::AddressingModel::Logical;
161 } else {
162 // TODO: Add support for PhysicalStorageBufferAddress.
163 MAI.Addr = SPIRV::AddressingModel::Logical;
164 }
165 }
166 // Get the OpenCL version number from metadata.
167 // TODO: support other source languages.
168 if (auto VerNode = M.getNamedMetadata("opencl.ocl.version")) {
169 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_C;
170 // Construct version literal in accordance with SPIRV-LLVM-Translator.
171 // TODO: support multiple OCL version metadata.
172 assert(VerNode->getNumOperands() > 0 && "Invalid SPIR");
173 auto VersionMD = VerNode->getOperand(0);
174 unsigned MajorNum = getMetadataUInt(VersionMD, 0, 2);
175 unsigned MinorNum = getMetadataUInt(VersionMD, 1);
176 unsigned RevNum = getMetadataUInt(VersionMD, 2);
177 // Prevent Major part of OpenCL version to be 0
179 (std::max(1U, MajorNum) * 100 + MinorNum) * 1000 + RevNum;
180 } else {
181 // If there is no information about OpenCL version we are forced to generate
182 // OpenCL 1.0 by default for the OpenCL environment to avoid puzzling
183 // run-times with Unknown/0.0 version output. For a reference, LLVM-SPIRV
184 // Translator avoids potential issues with run-times in a similar manner.
185 if (ST->isOpenCLEnv()) {
186 MAI.SrcLang = SPIRV::SourceLanguage::OpenCL_CPP;
187 MAI.SrcLangVersion = 100000;
188 } else {
189 MAI.SrcLang = SPIRV::SourceLanguage::Unknown;
191 }
192 }
193
194 if (auto ExtNode = M.getNamedMetadata("opencl.used.extensions")) {
195 for (unsigned I = 0, E = ExtNode->getNumOperands(); I != E; ++I) {
196 MDNode *MD = ExtNode->getOperand(I);
197 if (!MD || MD->getNumOperands() == 0)
198 continue;
199 for (unsigned J = 0, N = MD->getNumOperands(); J != N; ++J)
200 MAI.SrcExt.insert(cast<MDString>(MD->getOperand(J))->getString());
201 }
202 }
203
204 // Update required capabilities for this memory model, addressing model and
205 // source language.
206 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand,
207 MAI.Mem, *ST);
208 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::SourceLanguageOperand,
209 MAI.SrcLang, *ST);
210 MAI.Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
211 MAI.Addr, *ST);
212
213 if (ST->isOpenCLEnv()) {
214 // TODO: check if it's required by default.
215 MAI.ExtInstSetMap[static_cast<unsigned>(
216 SPIRV::InstructionSet::OpenCL_std)] =
218 }
219}
220
221// Returns a representation of an instruction as a vector of MachineOperand
222// hash values, see llvm::hash_value(const MachineOperand &MO) for details.
223// This creates a signature of the instruction with the same content
224// that MachineOperand::isIdenticalTo uses for comparison.
225static InstrSignature instrToSignature(const MachineInstr &MI,
227 bool UseDefReg) {
228 InstrSignature Signature{MI.getOpcode()};
229 for (unsigned i = 0; i < MI.getNumOperands(); ++i) {
230 const MachineOperand &MO = MI.getOperand(i);
231 size_t h;
232 if (MO.isReg()) {
233 if (!UseDefReg && MO.isDef())
234 continue;
235 Register RegAlias = MAI.getRegisterAlias(MI.getMF(), MO.getReg());
236 if (!RegAlias.isValid()) {
237 LLVM_DEBUG({
238 dbgs() << "Unexpectedly, no global id found for the operand ";
239 MO.print(dbgs());
240 dbgs() << "\nInstruction: ";
241 MI.print(dbgs());
242 dbgs() << "\n";
243 });
244 report_fatal_error("All v-regs must have been mapped to global id's");
245 }
246 // mimic llvm::hash_value(const MachineOperand &MO)
247 h = hash_combine(MO.getType(), (unsigned)RegAlias, MO.getSubReg(),
248 MO.isDef());
249 } else {
250 h = hash_value(MO);
251 }
252 Signature.push_back(h);
253 }
254 return Signature;
255}
256
257bool SPIRVModuleAnalysis::isDeclSection(const MachineRegisterInfo &MRI,
258 const MachineInstr &MI) {
259 unsigned Opcode = MI.getOpcode();
260 switch (Opcode) {
261 case SPIRV::OpTypeForwardPointer:
262 // omit now, collect later
263 return false;
264 case SPIRV::OpVariable:
265 return static_cast<SPIRV::StorageClass::StorageClass>(
266 MI.getOperand(2).getImm()) != SPIRV::StorageClass::Function;
267 case SPIRV::OpFunction:
268 case SPIRV::OpFunctionParameter:
269 return true;
270 }
271 if (GR->hasConstFunPtr() && Opcode == SPIRV::OpUndef) {
272 Register DefReg = MI.getOperand(0).getReg();
273 for (MachineInstr &UseMI : MRI.use_instructions(DefReg)) {
274 if (UseMI.getOpcode() != SPIRV::OpConstantFunctionPointerINTEL)
275 continue;
276 // it's a dummy definition, FP constant refers to a function,
277 // and this is resolved in another way; let's skip this definition
278 assert(UseMI.getOperand(2).isReg() &&
279 UseMI.getOperand(2).getReg() == DefReg);
281 return false;
282 }
283 }
284 return TII->isTypeDeclInstr(MI) || TII->isConstantInstr(MI) ||
286}
287
288// This is a special case of a function pointer refering to a possibly
289// forward function declaration. The operand is a dummy OpUndef that
290// requires a special treatment.
291void SPIRVModuleAnalysis::visitFunPtrUse(
292 Register OpReg, InstrGRegsMap &SignatureToGReg,
293 std::map<const Value *, unsigned> &GlobalToGReg, const MachineFunction *MF,
294 const MachineInstr &MI) {
295 const MachineOperand *OpFunDef =
296 GR->getFunctionDefinitionByUse(&MI.getOperand(2));
297 assert(OpFunDef && OpFunDef->isReg());
298 // find the actual function definition and number it globally in advance
299 const MachineInstr *OpDefMI = OpFunDef->getParent();
300 assert(OpDefMI && OpDefMI->getOpcode() == SPIRV::OpFunction);
301 const MachineFunction *FunDefMF = OpDefMI->getParent()->getParent();
302 const MachineRegisterInfo &FunDefMRI = FunDefMF->getRegInfo();
303 do {
304 visitDecl(FunDefMRI, SignatureToGReg, GlobalToGReg, FunDefMF, *OpDefMI);
305 OpDefMI = OpDefMI->getNextNode();
306 } while (OpDefMI && (OpDefMI->getOpcode() == SPIRV::OpFunction ||
307 OpDefMI->getOpcode() == SPIRV::OpFunctionParameter));
308 // associate the function pointer with the newly assigned global number
309 Register GlobalFunDefReg = MAI.getRegisterAlias(FunDefMF, OpFunDef->getReg());
310 assert(GlobalFunDefReg.isValid() &&
311 "Function definition must refer to a global register");
312 MAI.setRegisterAlias(MF, OpReg, GlobalFunDefReg);
313}
314
315// Depth first recursive traversal of dependencies. Repeated visits are guarded
316// by MAI.hasRegisterAlias().
317void SPIRVModuleAnalysis::visitDecl(
318 const MachineRegisterInfo &MRI, InstrGRegsMap &SignatureToGReg,
319 std::map<const Value *, unsigned> &GlobalToGReg, const MachineFunction *MF,
320 const MachineInstr &MI) {
321 unsigned Opcode = MI.getOpcode();
323
324 // Process each operand of the instruction to resolve dependencies
325 for (const MachineOperand &MO : MI.operands()) {
326 if (!MO.isReg() || MO.isDef())
327 continue;
328 Register OpReg = MO.getReg();
329 // Handle function pointers special case
330 if (Opcode == SPIRV::OpConstantFunctionPointerINTEL &&
331 MRI.getRegClass(OpReg) == &SPIRV::pIDRegClass) {
332 visitFunPtrUse(OpReg, SignatureToGReg, GlobalToGReg, MF, MI);
333 continue;
334 }
335 // Skip already processed instructions
336 if (MAI.hasRegisterAlias(MF, MO.getReg()))
337 continue;
338 // Recursively visit dependencies
339 if (const MachineInstr *OpDefMI = MRI.getUniqueVRegDef(OpReg)) {
340 if (isDeclSection(MRI, *OpDefMI))
341 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, *OpDefMI);
342 continue;
343 }
344 // Handle the unexpected case of no unique definition for the SPIR-V
345 // instruction
346 LLVM_DEBUG({
347 dbgs() << "Unexpectedly, no unique definition for the operand ";
348 MO.print(dbgs());
349 dbgs() << "\nInstruction: ";
350 MI.print(dbgs());
351 dbgs() << "\n";
352 });
354 "No unique definition is found for the virtual register");
355 }
356
357 Register GReg;
358 bool IsFunDef = false;
359 if (TII->isSpecConstantInstr(MI)) {
362 } else if (Opcode == SPIRV::OpFunction ||
363 Opcode == SPIRV::OpFunctionParameter) {
364 GReg = handleFunctionOrParameter(MF, MI, GlobalToGReg, IsFunDef);
365 } else if (TII->isTypeDeclInstr(MI) || TII->isConstantInstr(MI) ||
366 TII->isInlineAsmDefInstr(MI)) {
367 GReg = handleTypeDeclOrConstant(MI, SignatureToGReg);
368 } else if (Opcode == SPIRV::OpVariable) {
369 GReg = handleVariable(MF, MI, GlobalToGReg);
370 } else {
371 LLVM_DEBUG({
372 dbgs() << "\nInstruction: ";
373 MI.print(dbgs());
374 dbgs() << "\n";
375 });
376 llvm_unreachable("Unexpected instruction is visited");
377 }
378 MAI.setRegisterAlias(MF, MI.getOperand(0).getReg(), GReg);
379 if (!IsFunDef)
381}
382
383Register SPIRVModuleAnalysis::handleFunctionOrParameter(
384 const MachineFunction *MF, const MachineInstr &MI,
385 std::map<const Value *, unsigned> &GlobalToGReg, bool &IsFunDef) {
386 const Value *GObj = GR->getGlobalObject(MF, MI.getOperand(0).getReg());
387 assert(GObj && "Unregistered global definition");
388 const Function *F = dyn_cast<Function>(GObj);
389 if (!F)
390 F = dyn_cast<Argument>(GObj)->getParent();
391 assert(F && "Expected a reference to a function or an argument");
392 IsFunDef = !F->isDeclaration();
393 auto It = GlobalToGReg.find(GObj);
394 if (It != GlobalToGReg.end())
395 return It->second;
397 GlobalToGReg[GObj] = GReg;
398 if (!IsFunDef)
400 return GReg;
401}
402
404SPIRVModuleAnalysis::handleTypeDeclOrConstant(const MachineInstr &MI,
405 InstrGRegsMap &SignatureToGReg) {
406 InstrSignature MISign = instrToSignature(MI, MAI, false);
407 auto It = SignatureToGReg.find(MISign);
408 if (It != SignatureToGReg.end())
409 return It->second;
411 SignatureToGReg[MISign] = GReg;
413 return GReg;
414}
415
416Register SPIRVModuleAnalysis::handleVariable(
417 const MachineFunction *MF, const MachineInstr &MI,
418 std::map<const Value *, unsigned> &GlobalToGReg) {
419 MAI.GlobalVarList.push_back(&MI);
420 const Value *GObj = GR->getGlobalObject(MF, MI.getOperand(0).getReg());
421 assert(GObj && "Unregistered global definition");
422 auto It = GlobalToGReg.find(GObj);
423 if (It != GlobalToGReg.end())
424 return It->second;
426 GlobalToGReg[GObj] = GReg;
428 return GReg;
429}
430
431void SPIRVModuleAnalysis::collectDeclarations(const Module &M) {
432 InstrGRegsMap SignatureToGReg;
433 std::map<const Value *, unsigned> GlobalToGReg;
434 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
436 if (!MF)
437 continue;
438 const MachineRegisterInfo &MRI = MF->getRegInfo();
439 unsigned PastHeader = 0;
440 for (MachineBasicBlock &MBB : *MF) {
441 for (MachineInstr &MI : MBB) {
442 if (MI.getNumOperands() == 0)
443 continue;
444 unsigned Opcode = MI.getOpcode();
445 if (Opcode == SPIRV::OpFunction) {
446 if (PastHeader == 0) {
447 PastHeader = 1;
448 continue;
449 }
450 } else if (Opcode == SPIRV::OpFunctionParameter) {
451 if (PastHeader < 2)
452 continue;
453 } else if (PastHeader > 0) {
454 PastHeader = 2;
455 }
456
457 const MachineOperand &DefMO = MI.getOperand(0);
458 switch (Opcode) {
459 case SPIRV::OpExtension:
460 MAI.Reqs.addExtension(SPIRV::Extension::Extension(DefMO.getImm()));
462 break;
463 case SPIRV::OpCapability:
464 MAI.Reqs.addCapability(SPIRV::Capability::Capability(DefMO.getImm()));
466 if (PastHeader > 0)
467 PastHeader = 2;
468 break;
469 default:
470 if (DefMO.isReg() && isDeclSection(MRI, MI) &&
471 !MAI.hasRegisterAlias(MF, DefMO.getReg()))
472 visitDecl(MRI, SignatureToGReg, GlobalToGReg, MF, MI);
473 }
474 }
475 }
476 }
477}
478
479// Look for IDs declared with Import linkage, and map the corresponding function
480// to the register defining that variable (which will usually be the result of
481// an OpFunction). This lets us call externally imported functions using
482// the correct ID registers.
483void SPIRVModuleAnalysis::collectFuncNames(MachineInstr &MI,
484 const Function *F) {
485 if (MI.getOpcode() == SPIRV::OpDecorate) {
486 // If it's got Import linkage.
487 auto Dec = MI.getOperand(1).getImm();
488 if (Dec == static_cast<unsigned>(SPIRV::Decoration::LinkageAttributes)) {
489 auto Lnk = MI.getOperand(MI.getNumOperands() - 1).getImm();
490 if (Lnk == static_cast<unsigned>(SPIRV::LinkageType::Import)) {
491 // Map imported function name to function ID register.
492 const Function *ImportedFunc =
493 F->getParent()->getFunction(getStringImm(MI, 2));
494 Register Target = MI.getOperand(0).getReg();
495 MAI.FuncMap[ImportedFunc] = MAI.getRegisterAlias(MI.getMF(), Target);
496 }
497 }
498 } else if (MI.getOpcode() == SPIRV::OpFunction) {
499 // Record all internal OpFunction declarations.
500 Register Reg = MI.defs().begin()->getReg();
501 Register GlobalReg = MAI.getRegisterAlias(MI.getMF(), Reg);
502 assert(GlobalReg.isValid());
503 MAI.FuncMap[F] = GlobalReg;
504 }
505}
506
507// Collect the given instruction in the specified MS. We assume global register
508// numbering has already occurred by this point. We can directly compare reg
509// arguments when detecting duplicates.
510static void collectOtherInstr(MachineInstr &MI, SPIRV::ModuleAnalysisInfo &MAI,
512 bool Append = true) {
513 MAI.setSkipEmission(&MI);
514 InstrSignature MISign = instrToSignature(MI, MAI, true);
515 auto FoundMI = IS.insert(MISign);
516 if (!FoundMI.second)
517 return; // insert failed, so we found a duplicate; don't add it to MAI.MS
518 // No duplicates, so add it.
519 if (Append)
520 MAI.MS[MSType].push_back(&MI);
521 else
522 MAI.MS[MSType].insert(MAI.MS[MSType].begin(), &MI);
523}
524
525// Some global instructions make reference to function-local ID regs, so cannot
526// be correctly collected until these registers are globally numbered.
527void SPIRVModuleAnalysis::processOtherInstrs(const Module &M) {
528 InstrTraces IS;
529 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
530 if ((*F).isDeclaration())
531 continue;
533 assert(MF);
534
535 for (MachineBasicBlock &MBB : *MF)
536 for (MachineInstr &MI : MBB) {
537 if (MAI.getSkipEmission(&MI))
538 continue;
539 const unsigned OpCode = MI.getOpcode();
540 if (OpCode == SPIRV::OpString) {
541 collectOtherInstr(MI, MAI, SPIRV::MB_DebugStrings, IS);
542 } else if (OpCode == SPIRV::OpExtInst && MI.getOperand(2).isImm() &&
543 MI.getOperand(2).getImm() ==
544 SPIRV::InstructionSet::
545 NonSemantic_Shader_DebugInfo_100) {
546 MachineOperand Ins = MI.getOperand(3);
547 namespace NS = SPIRV::NonSemanticExtInst;
548 static constexpr int64_t GlobalNonSemanticDITy[] = {
549 NS::DebugSource, NS::DebugCompilationUnit, NS::DebugInfoNone,
550 NS::DebugTypeBasic, NS::DebugTypePointer};
551 bool IsGlobalDI = false;
552 for (unsigned Idx = 0; Idx < std::size(GlobalNonSemanticDITy); ++Idx)
553 IsGlobalDI |= Ins.getImm() == GlobalNonSemanticDITy[Idx];
554 if (IsGlobalDI)
555 collectOtherInstr(MI, MAI, SPIRV::MB_NonSemanticGlobalDI, IS);
556 } else if (OpCode == SPIRV::OpName || OpCode == SPIRV::OpMemberName) {
557 collectOtherInstr(MI, MAI, SPIRV::MB_DebugNames, IS);
558 } else if (OpCode == SPIRV::OpEntryPoint) {
559 collectOtherInstr(MI, MAI, SPIRV::MB_EntryPoints, IS);
560 } else if (TII->isDecorationInstr(MI)) {
561 collectOtherInstr(MI, MAI, SPIRV::MB_Annotations, IS);
562 collectFuncNames(MI, &*F);
563 } else if (TII->isConstantInstr(MI)) {
564 // Now OpSpecConstant*s are not in DT,
565 // but they need to be collected anyway.
566 collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars, IS);
567 } else if (OpCode == SPIRV::OpFunction) {
568 collectFuncNames(MI, &*F);
569 } else if (OpCode == SPIRV::OpTypeForwardPointer) {
570 collectOtherInstr(MI, MAI, SPIRV::MB_TypeConstVars, IS, false);
571 }
572 }
573 }
574}
575
576// Number registers in all functions globally from 0 onwards and store
577// the result in global register alias table. Some registers are already
578// numbered.
579void SPIRVModuleAnalysis::numberRegistersGlobally(const Module &M) {
580 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
581 if ((*F).isDeclaration())
582 continue;
584 assert(MF);
585 for (MachineBasicBlock &MBB : *MF) {
586 for (MachineInstr &MI : MBB) {
587 for (MachineOperand &Op : MI.operands()) {
588 if (!Op.isReg())
589 continue;
590 Register Reg = Op.getReg();
591 if (MAI.hasRegisterAlias(MF, Reg))
592 continue;
594 MAI.setRegisterAlias(MF, Reg, NewReg);
595 }
596 if (MI.getOpcode() != SPIRV::OpExtInst)
597 continue;
598 auto Set = MI.getOperand(2).getImm();
599 if (!MAI.ExtInstSetMap.contains(Set))
601 }
602 }
603 }
604}
605
606// RequirementHandler implementations.
608 SPIRV::OperandCategory::OperandCategory Category, uint32_t i,
609 const SPIRVSubtarget &ST) {
610 addRequirements(getSymbolicOperandRequirements(Category, i, ST, *this));
611}
612
613void SPIRV::RequirementHandler::recursiveAddCapabilities(
614 const CapabilityList &ToPrune) {
615 for (const auto &Cap : ToPrune) {
616 AllCaps.insert(Cap);
617 CapabilityList ImplicitDecls =
618 getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
619 recursiveAddCapabilities(ImplicitDecls);
620 }
621}
622
624 for (const auto &Cap : ToAdd) {
625 bool IsNewlyInserted = AllCaps.insert(Cap).second;
626 if (!IsNewlyInserted) // Don't re-add if it's already been declared.
627 continue;
628 CapabilityList ImplicitDecls =
629 getSymbolicOperandCapabilities(OperandCategory::CapabilityOperand, Cap);
630 recursiveAddCapabilities(ImplicitDecls);
631 MinimalCaps.push_back(Cap);
632 }
633}
634
636 const SPIRV::Requirements &Req) {
637 if (!Req.IsSatisfiable)
638 report_fatal_error("Adding SPIR-V requirements this target can't satisfy.");
639
640 if (Req.Cap.has_value())
641 addCapabilities({Req.Cap.value()});
642
643 addExtensions(Req.Exts);
644
645 if (!Req.MinVer.empty()) {
646 if (!MaxVersion.empty() && Req.MinVer > MaxVersion) {
647 LLVM_DEBUG(dbgs() << "Conflicting version requirements: >= " << Req.MinVer
648 << " and <= " << MaxVersion << "\n");
649 report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
650 }
651
652 if (MinVersion.empty() || Req.MinVer > MinVersion)
653 MinVersion = Req.MinVer;
654 }
655
656 if (!Req.MaxVer.empty()) {
657 if (!MinVersion.empty() && Req.MaxVer < MinVersion) {
658 LLVM_DEBUG(dbgs() << "Conflicting version requirements: <= " << Req.MaxVer
659 << " and >= " << MinVersion << "\n");
660 report_fatal_error("Adding SPIR-V requirements that can't be satisfied.");
661 }
662
663 if (MaxVersion.empty() || Req.MaxVer < MaxVersion)
664 MaxVersion = Req.MaxVer;
665 }
666}
667
669 const SPIRVSubtarget &ST) const {
670 // Report as many errors as possible before aborting the compilation.
671 bool IsSatisfiable = true;
672 auto TargetVer = ST.getSPIRVVersion();
673
674 if (!MaxVersion.empty() && !TargetVer.empty() && MaxVersion < TargetVer) {
676 dbgs() << "Target SPIR-V version too high for required features\n"
677 << "Required max version: " << MaxVersion << " target version "
678 << TargetVer << "\n");
679 IsSatisfiable = false;
680 }
681
682 if (!MinVersion.empty() && !TargetVer.empty() && MinVersion > TargetVer) {
683 LLVM_DEBUG(dbgs() << "Target SPIR-V version too low for required features\n"
684 << "Required min version: " << MinVersion
685 << " target version " << TargetVer << "\n");
686 IsSatisfiable = false;
687 }
688
689 if (!MinVersion.empty() && !MaxVersion.empty() && MinVersion > MaxVersion) {
691 dbgs()
692 << "Version is too low for some features and too high for others.\n"
693 << "Required SPIR-V min version: " << MinVersion
694 << " required SPIR-V max version " << MaxVersion << "\n");
695 IsSatisfiable = false;
696 }
697
698 for (auto Cap : MinimalCaps) {
699 if (AvailableCaps.contains(Cap))
700 continue;
701 LLVM_DEBUG(dbgs() << "Capability not supported: "
703 OperandCategory::CapabilityOperand, Cap)
704 << "\n");
705 IsSatisfiable = false;
706 }
707
708 for (auto Ext : AllExtensions) {
709 if (ST.canUseExtension(Ext))
710 continue;
711 LLVM_DEBUG(dbgs() << "Extension not supported: "
713 OperandCategory::ExtensionOperand, Ext)
714 << "\n");
715 IsSatisfiable = false;
716 }
717
718 if (!IsSatisfiable)
719 report_fatal_error("Unable to meet SPIR-V requirements for this target.");
720}
721
722// Add the given capabilities and all their implicitly defined capabilities too.
724 for (const auto Cap : ToAdd)
725 if (AvailableCaps.insert(Cap).second)
726 addAvailableCaps(getSymbolicOperandCapabilities(
727 SPIRV::OperandCategory::CapabilityOperand, Cap));
728}
729
731 const Capability::Capability ToRemove,
732 const Capability::Capability IfPresent) {
733 if (AllCaps.contains(IfPresent))
734 AllCaps.erase(ToRemove);
735}
736
737namespace llvm {
738namespace SPIRV {
739void RequirementHandler::initAvailableCapabilities(const SPIRVSubtarget &ST) {
740 // Provided by both all supported Vulkan versions and OpenCl.
741 addAvailableCaps({Capability::Shader, Capability::Linkage, Capability::Int8,
742 Capability::Int16});
743
744 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 3)))
745 addAvailableCaps({Capability::GroupNonUniform,
746 Capability::GroupNonUniformVote,
747 Capability::GroupNonUniformArithmetic,
748 Capability::GroupNonUniformBallot,
749 Capability::GroupNonUniformClustered,
750 Capability::GroupNonUniformShuffle,
751 Capability::GroupNonUniformShuffleRelative});
752
753 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
754 addAvailableCaps({Capability::DotProduct, Capability::DotProductInputAll,
755 Capability::DotProductInput4x8Bit,
756 Capability::DotProductInput4x8BitPacked,
757 Capability::DemoteToHelperInvocation});
758
759 // Add capabilities enabled by extensions.
760 for (auto Extension : ST.getAllAvailableExtensions()) {
761 CapabilityList EnabledCapabilities =
763 addAvailableCaps(EnabledCapabilities);
764 }
765
766 if (ST.isOpenCLEnv()) {
767 initAvailableCapabilitiesForOpenCL(ST);
768 return;
769 }
770
771 if (ST.isVulkanEnv()) {
772 initAvailableCapabilitiesForVulkan(ST);
773 return;
774 }
775
776 report_fatal_error("Unimplemented environment for SPIR-V generation.");
777}
778
779void RequirementHandler::initAvailableCapabilitiesForOpenCL(
780 const SPIRVSubtarget &ST) {
781 // Add the min requirements for different OpenCL and SPIR-V versions.
782 addAvailableCaps({Capability::Addresses, Capability::Float16Buffer,
783 Capability::Kernel, Capability::Vector16,
784 Capability::Groups, Capability::GenericPointer,
785 Capability::StorageImageWriteWithoutFormat,
786 Capability::StorageImageReadWithoutFormat});
787 if (ST.hasOpenCLFullProfile())
788 addAvailableCaps({Capability::Int64, Capability::Int64Atomics});
789 if (ST.hasOpenCLImageSupport()) {
790 addAvailableCaps({Capability::ImageBasic, Capability::LiteralSampler,
791 Capability::Image1D, Capability::SampledBuffer,
792 Capability::ImageBuffer});
793 if (ST.isAtLeastOpenCLVer(VersionTuple(2, 0)))
794 addAvailableCaps({Capability::ImageReadWrite});
795 }
796 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 1)) &&
797 ST.isAtLeastOpenCLVer(VersionTuple(2, 2)))
798 addAvailableCaps({Capability::SubgroupDispatch, Capability::PipeStorage});
799 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 4)))
800 addAvailableCaps({Capability::DenormPreserve, Capability::DenormFlushToZero,
801 Capability::SignedZeroInfNanPreserve,
802 Capability::RoundingModeRTE,
803 Capability::RoundingModeRTZ});
804 // TODO: verify if this needs some checks.
805 addAvailableCaps({Capability::Float16, Capability::Float64});
806
807 // TODO: add OpenCL extensions.
808}
809
810void RequirementHandler::initAvailableCapabilitiesForVulkan(
811 const SPIRVSubtarget &ST) {
812
813 // Core in Vulkan 1.1 and earlier.
814 addAvailableCaps({Capability::Int64, Capability::Float16, Capability::Float64,
815 Capability::GroupNonUniform, Capability::Image1D,
816 Capability::SampledBuffer, Capability::ImageBuffer,
817 Capability::UniformBufferArrayDynamicIndexing,
818 Capability::SampledImageArrayDynamicIndexing,
819 Capability::StorageBufferArrayDynamicIndexing,
820 Capability::StorageImageArrayDynamicIndexing});
821
822 // Became core in Vulkan 1.2
823 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 5))) {
824 addAvailableCaps(
825 {Capability::ShaderNonUniformEXT, Capability::RuntimeDescriptorArrayEXT,
826 Capability::InputAttachmentArrayDynamicIndexingEXT,
827 Capability::UniformTexelBufferArrayDynamicIndexingEXT,
828 Capability::StorageTexelBufferArrayDynamicIndexingEXT,
829 Capability::UniformBufferArrayNonUniformIndexingEXT,
830 Capability::SampledImageArrayNonUniformIndexingEXT,
831 Capability::StorageBufferArrayNonUniformIndexingEXT,
832 Capability::StorageImageArrayNonUniformIndexingEXT,
833 Capability::InputAttachmentArrayNonUniformIndexingEXT,
834 Capability::UniformTexelBufferArrayNonUniformIndexingEXT,
835 Capability::StorageTexelBufferArrayNonUniformIndexingEXT});
836 }
837
838 // Became core in Vulkan 1.3
839 if (ST.isAtLeastSPIRVVer(VersionTuple(1, 6)))
840 addAvailableCaps({Capability::StorageImageWriteWithoutFormat,
841 Capability::StorageImageReadWithoutFormat});
842}
843
844} // namespace SPIRV
845} // namespace llvm
846
847// Add the required capabilities from a decoration instruction (including
848// BuiltIns).
849static void addOpDecorateReqs(const MachineInstr &MI, unsigned DecIndex,
851 const SPIRVSubtarget &ST) {
852 int64_t DecOp = MI.getOperand(DecIndex).getImm();
853 auto Dec = static_cast<SPIRV::Decoration::Decoration>(DecOp);
854 Reqs.addRequirements(getSymbolicOperandRequirements(
855 SPIRV::OperandCategory::DecorationOperand, Dec, ST, Reqs));
856
857 if (Dec == SPIRV::Decoration::BuiltIn) {
858 int64_t BuiltInOp = MI.getOperand(DecIndex + 1).getImm();
859 auto BuiltIn = static_cast<SPIRV::BuiltIn::BuiltIn>(BuiltInOp);
860 Reqs.addRequirements(getSymbolicOperandRequirements(
861 SPIRV::OperandCategory::BuiltInOperand, BuiltIn, ST, Reqs));
862 } else if (Dec == SPIRV::Decoration::LinkageAttributes) {
863 int64_t LinkageOp = MI.getOperand(MI.getNumOperands() - 1).getImm();
864 SPIRV::LinkageType::LinkageType LnkType =
865 static_cast<SPIRV::LinkageType::LinkageType>(LinkageOp);
866 if (LnkType == SPIRV::LinkageType::LinkOnceODR)
867 Reqs.addExtension(SPIRV::Extension::SPV_KHR_linkonce_odr);
868 } else if (Dec == SPIRV::Decoration::CacheControlLoadINTEL ||
869 Dec == SPIRV::Decoration::CacheControlStoreINTEL) {
870 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_cache_controls);
871 } else if (Dec == SPIRV::Decoration::HostAccessINTEL) {
872 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_global_variable_host_access);
873 } else if (Dec == SPIRV::Decoration::InitModeINTEL ||
874 Dec == SPIRV::Decoration::ImplementInRegisterMapINTEL) {
875 Reqs.addExtension(
876 SPIRV::Extension::SPV_INTEL_global_variable_fpga_decorations);
877 } else if (Dec == SPIRV::Decoration::NonUniformEXT) {
878 Reqs.addRequirements(SPIRV::Capability::ShaderNonUniformEXT);
879 }
880}
881
882// Add requirements for image handling.
883static void addOpTypeImageReqs(const MachineInstr &MI,
885 const SPIRVSubtarget &ST) {
886 assert(MI.getNumOperands() >= 8 && "Insufficient operands for OpTypeImage");
887 // The operand indices used here are based on the OpTypeImage layout, which
888 // the MachineInstr follows as well.
889 int64_t ImgFormatOp = MI.getOperand(7).getImm();
890 auto ImgFormat = static_cast<SPIRV::ImageFormat::ImageFormat>(ImgFormatOp);
891 Reqs.getAndAddRequirements(SPIRV::OperandCategory::ImageFormatOperand,
892 ImgFormat, ST);
893
894 bool IsArrayed = MI.getOperand(4).getImm() == 1;
895 bool IsMultisampled = MI.getOperand(5).getImm() == 1;
896 bool NoSampler = MI.getOperand(6).getImm() == 2;
897 // Add dimension requirements.
898 assert(MI.getOperand(2).isImm());
899 switch (MI.getOperand(2).getImm()) {
900 case SPIRV::Dim::DIM_1D:
901 Reqs.addRequirements(NoSampler ? SPIRV::Capability::Image1D
902 : SPIRV::Capability::Sampled1D);
903 break;
904 case SPIRV::Dim::DIM_2D:
905 if (IsMultisampled && NoSampler)
906 Reqs.addRequirements(SPIRV::Capability::ImageMSArray);
907 break;
908 case SPIRV::Dim::DIM_Cube:
909 Reqs.addRequirements(SPIRV::Capability::Shader);
910 if (IsArrayed)
911 Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageCubeArray
912 : SPIRV::Capability::SampledCubeArray);
913 break;
914 case SPIRV::Dim::DIM_Rect:
915 Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageRect
916 : SPIRV::Capability::SampledRect);
917 break;
918 case SPIRV::Dim::DIM_Buffer:
919 Reqs.addRequirements(NoSampler ? SPIRV::Capability::ImageBuffer
920 : SPIRV::Capability::SampledBuffer);
921 break;
922 case SPIRV::Dim::DIM_SubpassData:
923 Reqs.addRequirements(SPIRV::Capability::InputAttachment);
924 break;
925 }
926
927 // Has optional access qualifier.
928 if (ST.isOpenCLEnv()) {
929 if (MI.getNumOperands() > 8 &&
930 MI.getOperand(8).getImm() == SPIRV::AccessQualifier::ReadWrite)
931 Reqs.addRequirements(SPIRV::Capability::ImageReadWrite);
932 else
933 Reqs.addRequirements(SPIRV::Capability::ImageBasic);
934 }
935}
936
937// Add requirements for handling atomic float instructions
938#define ATOM_FLT_REQ_EXT_MSG(ExtName) \
939 "The atomic float instruction requires the following SPIR-V " \
940 "extension: SPV_EXT_shader_atomic_float" ExtName
941static void AddAtomicFloatRequirements(const MachineInstr &MI,
943 const SPIRVSubtarget &ST) {
944 assert(MI.getOperand(1).isReg() &&
945 "Expect register operand in atomic float instruction");
946 Register TypeReg = MI.getOperand(1).getReg();
947 SPIRVType *TypeDef = MI.getMF()->getRegInfo().getVRegDef(TypeReg);
948 if (TypeDef->getOpcode() != SPIRV::OpTypeFloat)
949 report_fatal_error("Result type of an atomic float instruction must be a "
950 "floating-point type scalar");
951
952 unsigned BitWidth = TypeDef->getOperand(1).getImm();
953 unsigned Op = MI.getOpcode();
954 if (Op == SPIRV::OpAtomicFAddEXT) {
955 if (!ST.canUseExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add))
957 Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_add);
958 switch (BitWidth) {
959 case 16:
960 if (!ST.canUseExtension(
961 SPIRV::Extension::SPV_EXT_shader_atomic_float16_add))
963 Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float16_add);
964 Reqs.addCapability(SPIRV::Capability::AtomicFloat16AddEXT);
965 break;
966 case 32:
967 Reqs.addCapability(SPIRV::Capability::AtomicFloat32AddEXT);
968 break;
969 case 64:
970 Reqs.addCapability(SPIRV::Capability::AtomicFloat64AddEXT);
971 break;
972 default:
974 "Unexpected floating-point type width in atomic float instruction");
975 }
976 } else {
977 if (!ST.canUseExtension(
978 SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max))
979 report_fatal_error(ATOM_FLT_REQ_EXT_MSG("_min_max"), false);
980 Reqs.addExtension(SPIRV::Extension::SPV_EXT_shader_atomic_float_min_max);
981 switch (BitWidth) {
982 case 16:
983 Reqs.addCapability(SPIRV::Capability::AtomicFloat16MinMaxEXT);
984 break;
985 case 32:
986 Reqs.addCapability(SPIRV::Capability::AtomicFloat32MinMaxEXT);
987 break;
988 case 64:
989 Reqs.addCapability(SPIRV::Capability::AtomicFloat64MinMaxEXT);
990 break;
991 default:
993 "Unexpected floating-point type width in atomic float instruction");
994 }
995 }
996}
997
998bool isUniformTexelBuffer(MachineInstr *ImageInst) {
999 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1000 return false;
1001 uint32_t Dim = ImageInst->getOperand(2).getImm();
1002 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1003 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 1;
1004}
1005
1006bool isStorageTexelBuffer(MachineInstr *ImageInst) {
1007 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1008 return false;
1009 uint32_t Dim = ImageInst->getOperand(2).getImm();
1010 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1011 return Dim == SPIRV::Dim::DIM_Buffer && Sampled == 2;
1012}
1013
1014bool isSampledImage(MachineInstr *ImageInst) {
1015 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1016 return false;
1017 uint32_t Dim = ImageInst->getOperand(2).getImm();
1018 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1019 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 1;
1020}
1021
1022bool isInputAttachment(MachineInstr *ImageInst) {
1023 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1024 return false;
1025 uint32_t Dim = ImageInst->getOperand(2).getImm();
1026 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1027 return Dim == SPIRV::Dim::DIM_SubpassData && Sampled == 2;
1028}
1029
1030bool isStorageImage(MachineInstr *ImageInst) {
1031 if (ImageInst->getOpcode() != SPIRV::OpTypeImage)
1032 return false;
1033 uint32_t Dim = ImageInst->getOperand(2).getImm();
1034 uint32_t Sampled = ImageInst->getOperand(6).getImm();
1035 return Dim != SPIRV::Dim::DIM_Buffer && Sampled == 2;
1036}
1037
1038bool isCombinedImageSampler(MachineInstr *SampledImageInst) {
1039 if (SampledImageInst->getOpcode() != SPIRV::OpTypeSampledImage)
1040 return false;
1041
1042 const MachineRegisterInfo &MRI = SampledImageInst->getMF()->getRegInfo();
1043 Register ImageReg = SampledImageInst->getOperand(1).getReg();
1044 auto *ImageInst = MRI.getUniqueVRegDef(ImageReg);
1045 return isSampledImage(ImageInst);
1046}
1047
1048bool hasNonUniformDecoration(Register Reg, const MachineRegisterInfo &MRI) {
1049 for (const auto &MI : MRI.reg_instructions(Reg)) {
1050 if (MI.getOpcode() != SPIRV::OpDecorate)
1051 continue;
1052
1053 uint32_t Dec = MI.getOperand(1).getImm();
1054 if (Dec == SPIRV::Decoration::NonUniformEXT)
1055 return true;
1056 }
1057 return false;
1058}
1059
1060void addOpAccessChainReqs(const MachineInstr &Instr,
1062 const SPIRVSubtarget &Subtarget) {
1063 const MachineRegisterInfo &MRI = Instr.getMF()->getRegInfo();
1064 // Get the result type. If it is an image type, then the shader uses
1065 // descriptor indexing. The appropriate capabilities will be added based
1066 // on the specifics of the image.
1067 Register ResTypeReg = Instr.getOperand(1).getReg();
1068 MachineInstr *ResTypeInst = MRI.getUniqueVRegDef(ResTypeReg);
1069
1070 assert(ResTypeInst->getOpcode() == SPIRV::OpTypePointer);
1071 uint32_t StorageClass = ResTypeInst->getOperand(1).getImm();
1072 if (StorageClass != SPIRV::StorageClass::StorageClass::UniformConstant &&
1073 StorageClass != SPIRV::StorageClass::StorageClass::Uniform &&
1074 StorageClass != SPIRV::StorageClass::StorageClass::StorageBuffer) {
1075 return;
1076 }
1077
1078 Register PointeeTypeReg = ResTypeInst->getOperand(2).getReg();
1079 MachineInstr *PointeeType = MRI.getUniqueVRegDef(PointeeTypeReg);
1080 if (PointeeType->getOpcode() != SPIRV::OpTypeImage &&
1081 PointeeType->getOpcode() != SPIRV::OpTypeSampledImage &&
1082 PointeeType->getOpcode() != SPIRV::OpTypeSampler) {
1083 return;
1084 }
1085
1086 bool IsNonUniform =
1087 hasNonUniformDecoration(Instr.getOperand(0).getReg(), MRI);
1088 if (isUniformTexelBuffer(PointeeType)) {
1089 if (IsNonUniform)
1090 Handler.addRequirements(
1091 SPIRV::Capability::UniformTexelBufferArrayNonUniformIndexingEXT);
1092 else
1093 Handler.addRequirements(
1094 SPIRV::Capability::UniformTexelBufferArrayDynamicIndexingEXT);
1095 } else if (isInputAttachment(PointeeType)) {
1096 if (IsNonUniform)
1097 Handler.addRequirements(
1098 SPIRV::Capability::InputAttachmentArrayNonUniformIndexingEXT);
1099 else
1100 Handler.addRequirements(
1101 SPIRV::Capability::InputAttachmentArrayDynamicIndexingEXT);
1102 } else if (isStorageTexelBuffer(PointeeType)) {
1103 if (IsNonUniform)
1104 Handler.addRequirements(
1105 SPIRV::Capability::StorageTexelBufferArrayNonUniformIndexingEXT);
1106 else
1107 Handler.addRequirements(
1108 SPIRV::Capability::StorageTexelBufferArrayDynamicIndexingEXT);
1109 } else if (isSampledImage(PointeeType) ||
1110 isCombinedImageSampler(PointeeType) ||
1111 PointeeType->getOpcode() == SPIRV::OpTypeSampler) {
1112 if (IsNonUniform)
1113 Handler.addRequirements(
1114 SPIRV::Capability::SampledImageArrayNonUniformIndexingEXT);
1115 else
1116 Handler.addRequirements(
1117 SPIRV::Capability::SampledImageArrayDynamicIndexing);
1118 } else if (isStorageImage(PointeeType)) {
1119 if (IsNonUniform)
1120 Handler.addRequirements(
1121 SPIRV::Capability::StorageImageArrayNonUniformIndexingEXT);
1122 else
1123 Handler.addRequirements(
1124 SPIRV::Capability::StorageImageArrayDynamicIndexing);
1125 }
1126}
1127
1128static bool isImageTypeWithUnknownFormat(SPIRVType *TypeInst) {
1129 if (TypeInst->getOpcode() != SPIRV::OpTypeImage)
1130 return false;
1131 assert(TypeInst->getOperand(7).isImm() && "The image format must be an imm.");
1132 return TypeInst->getOperand(7).getImm() == 0;
1133}
1134
1135static void AddDotProductRequirements(const MachineInstr &MI,
1137 const SPIRVSubtarget &ST) {
1138 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product))
1139 Reqs.addExtension(SPIRV::Extension::SPV_KHR_integer_dot_product);
1140 Reqs.addCapability(SPIRV::Capability::DotProduct);
1141
1142 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1143 assert(MI.getOperand(2).isReg() && "Unexpected operand in dot");
1144 // We do not consider what the previous instruction is. This is just used
1145 // to get the input register and to check the type.
1146 const MachineInstr *Input = MRI.getVRegDef(MI.getOperand(2).getReg());
1147 assert(Input->getOperand(1).isReg() && "Unexpected operand in dot input");
1148 Register InputReg = Input->getOperand(1).getReg();
1149
1150 SPIRVType *TypeDef = MRI.getVRegDef(InputReg);
1151 if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
1152 assert(TypeDef->getOperand(1).getImm() == 32);
1153 Reqs.addCapability(SPIRV::Capability::DotProductInput4x8BitPacked);
1154 } else if (TypeDef->getOpcode() == SPIRV::OpTypeVector) {
1155 SPIRVType *ScalarTypeDef = MRI.getVRegDef(TypeDef->getOperand(1).getReg());
1156 assert(ScalarTypeDef->getOpcode() == SPIRV::OpTypeInt);
1157 if (ScalarTypeDef->getOperand(1).getImm() == 8) {
1158 assert(TypeDef->getOperand(2).getImm() == 4 &&
1159 "Dot operand of 8-bit integer type requires 4 components");
1160 Reqs.addCapability(SPIRV::Capability::DotProductInput4x8Bit);
1161 } else {
1162 Reqs.addCapability(SPIRV::Capability::DotProductInputAll);
1163 }
1164 }
1165}
1166
1167void addInstrRequirements(const MachineInstr &MI,
1169 const SPIRVSubtarget &ST) {
1170 switch (MI.getOpcode()) {
1171 case SPIRV::OpMemoryModel: {
1172 int64_t Addr = MI.getOperand(0).getImm();
1173 Reqs.getAndAddRequirements(SPIRV::OperandCategory::AddressingModelOperand,
1174 Addr, ST);
1175 int64_t Mem = MI.getOperand(1).getImm();
1176 Reqs.getAndAddRequirements(SPIRV::OperandCategory::MemoryModelOperand, Mem,
1177 ST);
1178 break;
1179 }
1180 case SPIRV::OpEntryPoint: {
1181 int64_t Exe = MI.getOperand(0).getImm();
1182 Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModelOperand,
1183 Exe, ST);
1184 break;
1185 }
1186 case SPIRV::OpExecutionMode:
1187 case SPIRV::OpExecutionModeId: {
1188 int64_t Exe = MI.getOperand(1).getImm();
1189 Reqs.getAndAddRequirements(SPIRV::OperandCategory::ExecutionModeOperand,
1190 Exe, ST);
1191 break;
1192 }
1193 case SPIRV::OpTypeMatrix:
1194 Reqs.addCapability(SPIRV::Capability::Matrix);
1195 break;
1196 case SPIRV::OpTypeInt: {
1197 unsigned BitWidth = MI.getOperand(1).getImm();
1198 if (BitWidth == 64)
1199 Reqs.addCapability(SPIRV::Capability::Int64);
1200 else if (BitWidth == 16)
1201 Reqs.addCapability(SPIRV::Capability::Int16);
1202 else if (BitWidth == 8)
1203 Reqs.addCapability(SPIRV::Capability::Int8);
1204 break;
1205 }
1206 case SPIRV::OpTypeFloat: {
1207 unsigned BitWidth = MI.getOperand(1).getImm();
1208 if (BitWidth == 64)
1209 Reqs.addCapability(SPIRV::Capability::Float64);
1210 else if (BitWidth == 16)
1211 Reqs.addCapability(SPIRV::Capability::Float16);
1212 break;
1213 }
1214 case SPIRV::OpTypeVector: {
1215 unsigned NumComponents = MI.getOperand(2).getImm();
1216 if (NumComponents == 8 || NumComponents == 16)
1217 Reqs.addCapability(SPIRV::Capability::Vector16);
1218 break;
1219 }
1220 case SPIRV::OpTypePointer: {
1221 auto SC = MI.getOperand(1).getImm();
1222 Reqs.getAndAddRequirements(SPIRV::OperandCategory::StorageClassOperand, SC,
1223 ST);
1224 // If it's a type of pointer to float16 targeting OpenCL, add Float16Buffer
1225 // capability.
1226 if (!ST.isOpenCLEnv())
1227 break;
1228 assert(MI.getOperand(2).isReg());
1229 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1230 SPIRVType *TypeDef = MRI.getVRegDef(MI.getOperand(2).getReg());
1231 if (TypeDef->getOpcode() == SPIRV::OpTypeFloat &&
1232 TypeDef->getOperand(1).getImm() == 16)
1233 Reqs.addCapability(SPIRV::Capability::Float16Buffer);
1234 break;
1235 }
1236 case SPIRV::OpExtInst: {
1237 if (MI.getOperand(2).getImm() ==
1238 static_cast<int64_t>(
1239 SPIRV::InstructionSet::NonSemantic_Shader_DebugInfo_100)) {
1240 Reqs.addExtension(SPIRV::Extension::SPV_KHR_non_semantic_info);
1241 }
1242 break;
1243 }
1244 case SPIRV::OpBitReverse:
1245 case SPIRV::OpBitFieldInsert:
1246 case SPIRV::OpBitFieldSExtract:
1247 case SPIRV::OpBitFieldUExtract:
1248 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1249 Reqs.addCapability(SPIRV::Capability::Shader);
1250 break;
1251 }
1252 Reqs.addExtension(SPIRV::Extension::SPV_KHR_bit_instructions);
1253 Reqs.addCapability(SPIRV::Capability::BitInstructions);
1254 break;
1255 case SPIRV::OpTypeRuntimeArray:
1256 Reqs.addCapability(SPIRV::Capability::Shader);
1257 break;
1258 case SPIRV::OpTypeOpaque:
1259 case SPIRV::OpTypeEvent:
1260 Reqs.addCapability(SPIRV::Capability::Kernel);
1261 break;
1262 case SPIRV::OpTypePipe:
1263 case SPIRV::OpTypeReserveId:
1264 Reqs.addCapability(SPIRV::Capability::Pipes);
1265 break;
1266 case SPIRV::OpTypeDeviceEvent:
1267 case SPIRV::OpTypeQueue:
1268 case SPIRV::OpBuildNDRange:
1269 Reqs.addCapability(SPIRV::Capability::DeviceEnqueue);
1270 break;
1271 case SPIRV::OpDecorate:
1272 case SPIRV::OpDecorateId:
1273 case SPIRV::OpDecorateString:
1274 addOpDecorateReqs(MI, 1, Reqs, ST);
1275 break;
1276 case SPIRV::OpMemberDecorate:
1277 case SPIRV::OpMemberDecorateString:
1278 addOpDecorateReqs(MI, 2, Reqs, ST);
1279 break;
1280 case SPIRV::OpInBoundsPtrAccessChain:
1281 Reqs.addCapability(SPIRV::Capability::Addresses);
1282 break;
1283 case SPIRV::OpConstantSampler:
1284 Reqs.addCapability(SPIRV::Capability::LiteralSampler);
1285 break;
1286 case SPIRV::OpInBoundsAccessChain:
1287 case SPIRV::OpAccessChain:
1288 addOpAccessChainReqs(MI, Reqs, ST);
1289 break;
1290 case SPIRV::OpTypeImage:
1291 addOpTypeImageReqs(MI, Reqs, ST);
1292 break;
1293 case SPIRV::OpTypeSampler:
1294 if (!ST.isVulkanEnv()) {
1295 Reqs.addCapability(SPIRV::Capability::ImageBasic);
1296 }
1297 break;
1298 case SPIRV::OpTypeForwardPointer:
1299 // TODO: check if it's OpenCL's kernel.
1300 Reqs.addCapability(SPIRV::Capability::Addresses);
1301 break;
1302 case SPIRV::OpAtomicFlagTestAndSet:
1303 case SPIRV::OpAtomicLoad:
1304 case SPIRV::OpAtomicStore:
1305 case SPIRV::OpAtomicExchange:
1306 case SPIRV::OpAtomicCompareExchange:
1307 case SPIRV::OpAtomicIIncrement:
1308 case SPIRV::OpAtomicIDecrement:
1309 case SPIRV::OpAtomicIAdd:
1310 case SPIRV::OpAtomicISub:
1311 case SPIRV::OpAtomicUMin:
1312 case SPIRV::OpAtomicUMax:
1313 case SPIRV::OpAtomicSMin:
1314 case SPIRV::OpAtomicSMax:
1315 case SPIRV::OpAtomicAnd:
1316 case SPIRV::OpAtomicOr:
1317 case SPIRV::OpAtomicXor: {
1318 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1319 const MachineInstr *InstrPtr = &MI;
1320 if (MI.getOpcode() == SPIRV::OpAtomicStore) {
1321 assert(MI.getOperand(3).isReg());
1322 InstrPtr = MRI.getVRegDef(MI.getOperand(3).getReg());
1323 assert(InstrPtr && "Unexpected type instruction for OpAtomicStore");
1324 }
1325 assert(InstrPtr->getOperand(1).isReg() && "Unexpected operand in atomic");
1326 Register TypeReg = InstrPtr->getOperand(1).getReg();
1327 SPIRVType *TypeDef = MRI.getVRegDef(TypeReg);
1328 if (TypeDef->getOpcode() == SPIRV::OpTypeInt) {
1329 unsigned BitWidth = TypeDef->getOperand(1).getImm();
1330 if (BitWidth == 64)
1331 Reqs.addCapability(SPIRV::Capability::Int64Atomics);
1332 }
1333 break;
1334 }
1335 case SPIRV::OpGroupNonUniformIAdd:
1336 case SPIRV::OpGroupNonUniformFAdd:
1337 case SPIRV::OpGroupNonUniformIMul:
1338 case SPIRV::OpGroupNonUniformFMul:
1339 case SPIRV::OpGroupNonUniformSMin:
1340 case SPIRV::OpGroupNonUniformUMin:
1341 case SPIRV::OpGroupNonUniformFMin:
1342 case SPIRV::OpGroupNonUniformSMax:
1343 case SPIRV::OpGroupNonUniformUMax:
1344 case SPIRV::OpGroupNonUniformFMax:
1345 case SPIRV::OpGroupNonUniformBitwiseAnd:
1346 case SPIRV::OpGroupNonUniformBitwiseOr:
1347 case SPIRV::OpGroupNonUniformBitwiseXor:
1348 case SPIRV::OpGroupNonUniformLogicalAnd:
1349 case SPIRV::OpGroupNonUniformLogicalOr:
1350 case SPIRV::OpGroupNonUniformLogicalXor: {
1351 assert(MI.getOperand(3).isImm());
1352 int64_t GroupOp = MI.getOperand(3).getImm();
1353 switch (GroupOp) {
1354 case SPIRV::GroupOperation::Reduce:
1355 case SPIRV::GroupOperation::InclusiveScan:
1356 case SPIRV::GroupOperation::ExclusiveScan:
1357 Reqs.addCapability(SPIRV::Capability::GroupNonUniformArithmetic);
1358 break;
1359 case SPIRV::GroupOperation::ClusteredReduce:
1360 Reqs.addCapability(SPIRV::Capability::GroupNonUniformClustered);
1361 break;
1362 case SPIRV::GroupOperation::PartitionedReduceNV:
1363 case SPIRV::GroupOperation::PartitionedInclusiveScanNV:
1364 case SPIRV::GroupOperation::PartitionedExclusiveScanNV:
1365 Reqs.addCapability(SPIRV::Capability::GroupNonUniformPartitionedNV);
1366 break;
1367 }
1368 break;
1369 }
1370 case SPIRV::OpGroupNonUniformShuffle:
1371 case SPIRV::OpGroupNonUniformShuffleXor:
1372 Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffle);
1373 break;
1374 case SPIRV::OpGroupNonUniformShuffleUp:
1375 case SPIRV::OpGroupNonUniformShuffleDown:
1376 Reqs.addCapability(SPIRV::Capability::GroupNonUniformShuffleRelative);
1377 break;
1378 case SPIRV::OpGroupAll:
1379 case SPIRV::OpGroupAny:
1380 case SPIRV::OpGroupBroadcast:
1381 case SPIRV::OpGroupIAdd:
1382 case SPIRV::OpGroupFAdd:
1383 case SPIRV::OpGroupFMin:
1384 case SPIRV::OpGroupUMin:
1385 case SPIRV::OpGroupSMin:
1386 case SPIRV::OpGroupFMax:
1387 case SPIRV::OpGroupUMax:
1388 case SPIRV::OpGroupSMax:
1389 Reqs.addCapability(SPIRV::Capability::Groups);
1390 break;
1391 case SPIRV::OpGroupNonUniformElect:
1392 Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
1393 break;
1394 case SPIRV::OpGroupNonUniformAll:
1395 case SPIRV::OpGroupNonUniformAny:
1396 case SPIRV::OpGroupNonUniformAllEqual:
1397 Reqs.addCapability(SPIRV::Capability::GroupNonUniformVote);
1398 break;
1399 case SPIRV::OpGroupNonUniformBroadcast:
1400 case SPIRV::OpGroupNonUniformBroadcastFirst:
1401 case SPIRV::OpGroupNonUniformBallot:
1402 case SPIRV::OpGroupNonUniformInverseBallot:
1403 case SPIRV::OpGroupNonUniformBallotBitExtract:
1404 case SPIRV::OpGroupNonUniformBallotBitCount:
1405 case SPIRV::OpGroupNonUniformBallotFindLSB:
1406 case SPIRV::OpGroupNonUniformBallotFindMSB:
1407 Reqs.addCapability(SPIRV::Capability::GroupNonUniformBallot);
1408 break;
1409 case SPIRV::OpSubgroupShuffleINTEL:
1410 case SPIRV::OpSubgroupShuffleDownINTEL:
1411 case SPIRV::OpSubgroupShuffleUpINTEL:
1412 case SPIRV::OpSubgroupShuffleXorINTEL:
1413 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1414 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1415 Reqs.addCapability(SPIRV::Capability::SubgroupShuffleINTEL);
1416 }
1417 break;
1418 case SPIRV::OpSubgroupBlockReadINTEL:
1419 case SPIRV::OpSubgroupBlockWriteINTEL:
1420 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1421 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1422 Reqs.addCapability(SPIRV::Capability::SubgroupBufferBlockIOINTEL);
1423 }
1424 break;
1425 case SPIRV::OpSubgroupImageBlockReadINTEL:
1426 case SPIRV::OpSubgroupImageBlockWriteINTEL:
1427 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1428 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_subgroups);
1429 Reqs.addCapability(SPIRV::Capability::SubgroupImageBlockIOINTEL);
1430 }
1431 break;
1432 case SPIRV::OpSubgroupImageMediaBlockReadINTEL:
1433 case SPIRV::OpSubgroupImageMediaBlockWriteINTEL:
1434 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1435 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_media_block_io);
1436 Reqs.addCapability(SPIRV::Capability::SubgroupImageMediaBlockIOINTEL);
1437 }
1438 break;
1439 case SPIRV::OpAssumeTrueKHR:
1440 case SPIRV::OpExpectKHR:
1441 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume)) {
1442 Reqs.addExtension(SPIRV::Extension::SPV_KHR_expect_assume);
1443 Reqs.addCapability(SPIRV::Capability::ExpectAssumeKHR);
1444 }
1445 break;
1446 case SPIRV::OpPtrCastToCrossWorkgroupINTEL:
1447 case SPIRV::OpCrossWorkgroupCastToPtrINTEL:
1448 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes)) {
1449 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_usm_storage_classes);
1450 Reqs.addCapability(SPIRV::Capability::USMStorageClassesINTEL);
1451 }
1452 break;
1453 case SPIRV::OpConstantFunctionPointerINTEL:
1454 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1455 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1456 Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL);
1457 }
1458 break;
1459 case SPIRV::OpGroupNonUniformRotateKHR:
1460 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate))
1461 report_fatal_error("OpGroupNonUniformRotateKHR instruction requires the "
1462 "following SPIR-V extension: SPV_KHR_subgroup_rotate",
1463 false);
1464 Reqs.addExtension(SPIRV::Extension::SPV_KHR_subgroup_rotate);
1465 Reqs.addCapability(SPIRV::Capability::GroupNonUniformRotateKHR);
1466 Reqs.addCapability(SPIRV::Capability::GroupNonUniform);
1467 break;
1468 case SPIRV::OpGroupIMulKHR:
1469 case SPIRV::OpGroupFMulKHR:
1470 case SPIRV::OpGroupBitwiseAndKHR:
1471 case SPIRV::OpGroupBitwiseOrKHR:
1472 case SPIRV::OpGroupBitwiseXorKHR:
1473 case SPIRV::OpGroupLogicalAndKHR:
1474 case SPIRV::OpGroupLogicalOrKHR:
1475 case SPIRV::OpGroupLogicalXorKHR:
1476 if (ST.canUseExtension(
1477 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1478 Reqs.addExtension(SPIRV::Extension::SPV_KHR_uniform_group_instructions);
1479 Reqs.addCapability(SPIRV::Capability::GroupUniformArithmeticKHR);
1480 }
1481 break;
1482 case SPIRV::OpReadClockKHR:
1483 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock))
1484 report_fatal_error("OpReadClockKHR instruction requires the "
1485 "following SPIR-V extension: SPV_KHR_shader_clock",
1486 false);
1487 Reqs.addExtension(SPIRV::Extension::SPV_KHR_shader_clock);
1488 Reqs.addCapability(SPIRV::Capability::ShaderClockKHR);
1489 break;
1490 case SPIRV::OpFunctionPointerCallINTEL:
1491 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)) {
1492 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_function_pointers);
1493 Reqs.addCapability(SPIRV::Capability::FunctionPointersINTEL);
1494 }
1495 break;
1496 case SPIRV::OpAtomicFAddEXT:
1497 case SPIRV::OpAtomicFMinEXT:
1498 case SPIRV::OpAtomicFMaxEXT:
1499 AddAtomicFloatRequirements(MI, Reqs, ST);
1500 break;
1501 case SPIRV::OpConvertBF16ToFINTEL:
1502 case SPIRV::OpConvertFToBF16INTEL:
1503 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion)) {
1504 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_bfloat16_conversion);
1505 Reqs.addCapability(SPIRV::Capability::BFloat16ConversionINTEL);
1506 }
1507 break;
1508 case SPIRV::OpVariableLengthArrayINTEL:
1509 case SPIRV::OpSaveMemoryINTEL:
1510 case SPIRV::OpRestoreMemoryINTEL:
1511 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array)) {
1512 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_variable_length_array);
1513 Reqs.addCapability(SPIRV::Capability::VariableLengthArrayINTEL);
1514 }
1515 break;
1516 case SPIRV::OpAsmTargetINTEL:
1517 case SPIRV::OpAsmINTEL:
1518 case SPIRV::OpAsmCallINTEL:
1519 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_inline_assembly)) {
1520 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_inline_assembly);
1521 Reqs.addCapability(SPIRV::Capability::AsmINTEL);
1522 }
1523 break;
1524 case SPIRV::OpTypeCooperativeMatrixKHR:
1525 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1527 "OpTypeCooperativeMatrixKHR type requires the "
1528 "following SPIR-V extension: SPV_KHR_cooperative_matrix",
1529 false);
1530 Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1531 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1532 break;
1533 case SPIRV::OpArithmeticFenceEXT:
1534 if (!ST.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
1535 report_fatal_error("OpArithmeticFenceEXT requires the "
1536 "following SPIR-V extension: SPV_EXT_arithmetic_fence",
1537 false);
1538 Reqs.addExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence);
1539 Reqs.addCapability(SPIRV::Capability::ArithmeticFenceEXT);
1540 break;
1541 case SPIRV::OpControlBarrierArriveINTEL:
1542 case SPIRV::OpControlBarrierWaitINTEL:
1543 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
1544 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_split_barrier);
1545 Reqs.addCapability(SPIRV::Capability::SplitBarrierINTEL);
1546 }
1547 break;
1548 case SPIRV::OpCooperativeMatrixMulAddKHR: {
1549 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1550 report_fatal_error("Cooperative matrix instructions require the "
1551 "following SPIR-V extension: "
1552 "SPV_KHR_cooperative_matrix",
1553 false);
1554 Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1555 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1556 constexpr unsigned MulAddMaxSize = 6;
1557 if (MI.getNumOperands() != MulAddMaxSize)
1558 break;
1559 const int64_t CoopOperands = MI.getOperand(MulAddMaxSize - 1).getImm();
1560 if (CoopOperands &
1561 SPIRV::CooperativeMatrixOperands::MatrixAAndBTF32ComponentsINTEL) {
1562 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1563 report_fatal_error("MatrixAAndBTF32ComponentsINTEL type interpretation "
1564 "require the following SPIR-V extension: "
1565 "SPV_INTEL_joint_matrix",
1566 false);
1567 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1568 Reqs.addCapability(
1569 SPIRV::Capability::CooperativeMatrixTF32ComponentTypeINTEL);
1570 }
1571 if (CoopOperands & SPIRV::CooperativeMatrixOperands::
1572 MatrixAAndBBFloat16ComponentsINTEL ||
1573 CoopOperands &
1574 SPIRV::CooperativeMatrixOperands::MatrixCBFloat16ComponentsINTEL ||
1575 CoopOperands & SPIRV::CooperativeMatrixOperands::
1576 MatrixResultBFloat16ComponentsINTEL) {
1577 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1578 report_fatal_error("***BF16ComponentsINTEL type interpretations "
1579 "require the following SPIR-V extension: "
1580 "SPV_INTEL_joint_matrix",
1581 false);
1582 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1583 Reqs.addCapability(
1584 SPIRV::Capability::CooperativeMatrixBFloat16ComponentTypeINTEL);
1585 }
1586 break;
1587 }
1588 case SPIRV::OpCooperativeMatrixLoadKHR:
1589 case SPIRV::OpCooperativeMatrixStoreKHR:
1590 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1591 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1592 case SPIRV::OpCooperativeMatrixPrefetchINTEL: {
1593 if (!ST.canUseExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix))
1594 report_fatal_error("Cooperative matrix instructions require the "
1595 "following SPIR-V extension: "
1596 "SPV_KHR_cooperative_matrix",
1597 false);
1598 Reqs.addExtension(SPIRV::Extension::SPV_KHR_cooperative_matrix);
1599 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixKHR);
1600
1601 // Check Layout operand in case if it's not a standard one and add the
1602 // appropriate capability.
1603 std::unordered_map<unsigned, unsigned> LayoutToInstMap = {
1604 {SPIRV::OpCooperativeMatrixLoadKHR, 3},
1605 {SPIRV::OpCooperativeMatrixStoreKHR, 2},
1606 {SPIRV::OpCooperativeMatrixLoadCheckedINTEL, 5},
1607 {SPIRV::OpCooperativeMatrixStoreCheckedINTEL, 4},
1608 {SPIRV::OpCooperativeMatrixPrefetchINTEL, 4}};
1609
1610 const auto OpCode = MI.getOpcode();
1611 const unsigned LayoutNum = LayoutToInstMap[OpCode];
1612 Register RegLayout = MI.getOperand(LayoutNum).getReg();
1613 const MachineRegisterInfo &MRI = MI.getMF()->getRegInfo();
1614 MachineInstr *MILayout = MRI.getUniqueVRegDef(RegLayout);
1615 if (MILayout->getOpcode() == SPIRV::OpConstantI) {
1616 const unsigned LayoutVal = MILayout->getOperand(2).getImm();
1617 if (LayoutVal ==
1618 static_cast<unsigned>(SPIRV::CooperativeMatrixLayout::PackedINTEL)) {
1619 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1620 report_fatal_error("PackedINTEL layout require the following SPIR-V "
1621 "extension: SPV_INTEL_joint_matrix",
1622 false);
1623 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1624 Reqs.addCapability(SPIRV::Capability::PackedCooperativeMatrixINTEL);
1625 }
1626 }
1627
1628 // Nothing to do.
1629 if (OpCode == SPIRV::OpCooperativeMatrixLoadKHR ||
1630 OpCode == SPIRV::OpCooperativeMatrixStoreKHR)
1631 break;
1632
1633 std::string InstName;
1634 switch (OpCode) {
1635 case SPIRV::OpCooperativeMatrixPrefetchINTEL:
1636 InstName = "OpCooperativeMatrixPrefetchINTEL";
1637 break;
1638 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
1639 InstName = "OpCooperativeMatrixLoadCheckedINTEL";
1640 break;
1641 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
1642 InstName = "OpCooperativeMatrixStoreCheckedINTEL";
1643 break;
1644 }
1645
1646 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix)) {
1647 const std::string ErrorMsg =
1648 InstName + " instruction requires the "
1649 "following SPIR-V extension: SPV_INTEL_joint_matrix";
1650 report_fatal_error(ErrorMsg.c_str(), false);
1651 }
1652 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1653 if (OpCode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
1654 Reqs.addCapability(SPIRV::Capability::CooperativeMatrixPrefetchINTEL);
1655 break;
1656 }
1657 Reqs.addCapability(
1658 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1659 break;
1660 }
1661 case SPIRV::OpCooperativeMatrixConstructCheckedINTEL:
1662 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1663 report_fatal_error("OpCooperativeMatrixConstructCheckedINTEL "
1664 "instructions require the following SPIR-V extension: "
1665 "SPV_INTEL_joint_matrix",
1666 false);
1667 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1668 Reqs.addCapability(
1669 SPIRV::Capability::CooperativeMatrixCheckedInstructionsINTEL);
1670 break;
1671 case SPIRV::OpCooperativeMatrixGetElementCoordINTEL:
1672 if (!ST.canUseExtension(SPIRV::Extension::SPV_INTEL_joint_matrix))
1673 report_fatal_error("OpCooperativeMatrixGetElementCoordINTEL requires the "
1674 "following SPIR-V extension: SPV_INTEL_joint_matrix",
1675 false);
1676 Reqs.addExtension(SPIRV::Extension::SPV_INTEL_joint_matrix);
1677 Reqs.addCapability(
1678 SPIRV::Capability::CooperativeMatrixInvocationInstructionsINTEL);
1679 break;
1680 case SPIRV::OpKill: {
1681 Reqs.addCapability(SPIRV::Capability::Shader);
1682 } break;
1683 case SPIRV::OpDemoteToHelperInvocation:
1684 Reqs.addCapability(SPIRV::Capability::DemoteToHelperInvocation);
1685
1686 if (ST.canUseExtension(
1687 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation)) {
1688 if (!ST.isAtLeastSPIRVVer(llvm::VersionTuple(1, 6)))
1689 Reqs.addExtension(
1690 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation);
1691 }
1692 break;
1693 case SPIRV::OpSDot:
1694 case SPIRV::OpUDot:
1695 AddDotProductRequirements(MI, Reqs, ST);
1696 break;
1697 case SPIRV::OpImageRead: {
1698 Register ImageReg = MI.getOperand(2).getReg();
1699 SPIRVType *TypeDef = ST.getSPIRVGlobalRegistry()->getResultType(
1700 ImageReg, const_cast<MachineFunction *>(MI.getMF()));
1701 if (isImageTypeWithUnknownFormat(TypeDef))
1702 Reqs.addCapability(SPIRV::Capability::StorageImageReadWithoutFormat);
1703 break;
1704 }
1705 case SPIRV::OpImageWrite: {
1706 Register ImageReg = MI.getOperand(0).getReg();
1707 SPIRVType *TypeDef = ST.getSPIRVGlobalRegistry()->getResultType(
1708 ImageReg, const_cast<MachineFunction *>(MI.getMF()));
1709 if (isImageTypeWithUnknownFormat(TypeDef))
1710 Reqs.addCapability(SPIRV::Capability::StorageImageWriteWithoutFormat);
1711 break;
1712 }
1713
1714 default:
1715 break;
1716 }
1717
1718 // If we require capability Shader, then we can remove the requirement for
1719 // the BitInstructions capability, since Shader is a superset capability
1720 // of BitInstructions.
1721 Reqs.removeCapabilityIf(SPIRV::Capability::BitInstructions,
1722 SPIRV::Capability::Shader);
1723}
1724
1725static void collectReqs(const Module &M, SPIRV::ModuleAnalysisInfo &MAI,
1726 MachineModuleInfo *MMI, const SPIRVSubtarget &ST) {
1727 // Collect requirements for existing instructions.
1728 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1730 if (!MF)
1731 continue;
1732 for (const MachineBasicBlock &MBB : *MF)
1733 for (const MachineInstr &MI : MBB)
1734 addInstrRequirements(MI, MAI.Reqs, ST);
1735 }
1736 // Collect requirements for OpExecutionMode instructions.
1737 auto Node = M.getNamedMetadata("spirv.ExecutionMode");
1738 if (Node) {
1739 bool RequireFloatControls = false, RequireFloatControls2 = false,
1740 VerLower14 = !ST.isAtLeastSPIRVVer(VersionTuple(1, 4));
1741 bool HasFloatControls2 =
1742 ST.canUseExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
1743 for (unsigned i = 0; i < Node->getNumOperands(); i++) {
1744 MDNode *MDN = cast<MDNode>(Node->getOperand(i));
1745 const MDOperand &MDOp = MDN->getOperand(1);
1746 if (auto *CMeta = dyn_cast<ConstantAsMetadata>(MDOp)) {
1747 Constant *C = CMeta->getValue();
1748 if (ConstantInt *Const = dyn_cast<ConstantInt>(C)) {
1749 auto EM = Const->getZExtValue();
1750 // SPV_KHR_float_controls is not available until v1.4:
1751 // add SPV_KHR_float_controls if the version is too low
1752 switch (EM) {
1753 case SPIRV::ExecutionMode::DenormPreserve:
1754 case SPIRV::ExecutionMode::DenormFlushToZero:
1755 case SPIRV::ExecutionMode::SignedZeroInfNanPreserve:
1756 case SPIRV::ExecutionMode::RoundingModeRTE:
1757 case SPIRV::ExecutionMode::RoundingModeRTZ:
1758 RequireFloatControls = VerLower14;
1760 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1761 break;
1762 case SPIRV::ExecutionMode::RoundingModeRTPINTEL:
1763 case SPIRV::ExecutionMode::RoundingModeRTNINTEL:
1764 case SPIRV::ExecutionMode::FloatingPointModeALTINTEL:
1765 case SPIRV::ExecutionMode::FloatingPointModeIEEEINTEL:
1766 if (HasFloatControls2) {
1767 RequireFloatControls2 = true;
1769 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1770 }
1771 break;
1772 default:
1774 SPIRV::OperandCategory::ExecutionModeOperand, EM, ST);
1775 }
1776 }
1777 }
1778 }
1779 if (RequireFloatControls &&
1780 ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls))
1781 MAI.Reqs.addExtension(SPIRV::Extension::SPV_KHR_float_controls);
1782 if (RequireFloatControls2)
1783 MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_float_controls2);
1784 }
1785 for (auto FI = M.begin(), E = M.end(); FI != E; ++FI) {
1786 const Function &F = *FI;
1787 if (F.isDeclaration())
1788 continue;
1789 if (F.getMetadata("reqd_work_group_size"))
1791 SPIRV::OperandCategory::ExecutionModeOperand,
1792 SPIRV::ExecutionMode::LocalSize, ST);
1793 if (F.getFnAttribute("hlsl.numthreads").isValid()) {
1795 SPIRV::OperandCategory::ExecutionModeOperand,
1796 SPIRV::ExecutionMode::LocalSize, ST);
1797 }
1798 if (F.getMetadata("work_group_size_hint"))
1800 SPIRV::OperandCategory::ExecutionModeOperand,
1801 SPIRV::ExecutionMode::LocalSizeHint, ST);
1802 if (F.getMetadata("intel_reqd_sub_group_size"))
1804 SPIRV::OperandCategory::ExecutionModeOperand,
1805 SPIRV::ExecutionMode::SubgroupSize, ST);
1806 if (F.getMetadata("vec_type_hint"))
1808 SPIRV::OperandCategory::ExecutionModeOperand,
1809 SPIRV::ExecutionMode::VecTypeHint, ST);
1810
1811 if (F.hasOptNone()) {
1812 if (ST.canUseExtension(SPIRV::Extension::SPV_INTEL_optnone)) {
1813 MAI.Reqs.addExtension(SPIRV::Extension::SPV_INTEL_optnone);
1814 MAI.Reqs.addCapability(SPIRV::Capability::OptNoneINTEL);
1815 } else if (ST.canUseExtension(SPIRV::Extension::SPV_EXT_optnone)) {
1816 MAI.Reqs.addExtension(SPIRV::Extension::SPV_EXT_optnone);
1817 MAI.Reqs.addCapability(SPIRV::Capability::OptNoneEXT);
1818 }
1819 }
1820 }
1821}
1822
1823static unsigned getFastMathFlags(const MachineInstr &I) {
1824 unsigned Flags = SPIRV::FPFastMathMode::None;
1825 if (I.getFlag(MachineInstr::MIFlag::FmNoNans))
1826 Flags |= SPIRV::FPFastMathMode::NotNaN;
1827 if (I.getFlag(MachineInstr::MIFlag::FmNoInfs))
1828 Flags |= SPIRV::FPFastMathMode::NotInf;
1829 if (I.getFlag(MachineInstr::MIFlag::FmNsz))
1830 Flags |= SPIRV::FPFastMathMode::NSZ;
1831 if (I.getFlag(MachineInstr::MIFlag::FmArcp))
1832 Flags |= SPIRV::FPFastMathMode::AllowRecip;
1834 Flags |= SPIRV::FPFastMathMode::Fast;
1835 return Flags;
1836}
1837
1838static void handleMIFlagDecoration(MachineInstr &I, const SPIRVSubtarget &ST,
1839 const SPIRVInstrInfo &TII,
1841 if (I.getFlag(MachineInstr::MIFlag::NoSWrap) && TII.canUseNSW(I) &&
1842 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
1843 SPIRV::Decoration::NoSignedWrap, ST, Reqs)
1844 .IsSatisfiable) {
1845 buildOpDecorate(I.getOperand(0).getReg(), I, TII,
1846 SPIRV::Decoration::NoSignedWrap, {});
1847 }
1848 if (I.getFlag(MachineInstr::MIFlag::NoUWrap) && TII.canUseNUW(I) &&
1849 getSymbolicOperandRequirements(SPIRV::OperandCategory::DecorationOperand,
1850 SPIRV::Decoration::NoUnsignedWrap, ST,
1851 Reqs)
1852 .IsSatisfiable) {
1853 buildOpDecorate(I.getOperand(0).getReg(), I, TII,
1854 SPIRV::Decoration::NoUnsignedWrap, {});
1855 }
1856 if (!TII.canUseFastMathFlags(I))
1857 return;
1858 unsigned FMFlags = getFastMathFlags(I);
1859 if (FMFlags == SPIRV::FPFastMathMode::None)
1860 return;
1861 Register DstReg = I.getOperand(0).getReg();
1862 buildOpDecorate(DstReg, I, TII, SPIRV::Decoration::FPFastMathMode, {FMFlags});
1863}
1864
1865// Walk all functions and add decorations related to MI flags.
1866static void addDecorations(const Module &M, const SPIRVInstrInfo &TII,
1867 MachineModuleInfo *MMI, const SPIRVSubtarget &ST,
1869 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1871 if (!MF)
1872 continue;
1873 for (auto &MBB : *MF)
1874 for (auto &MI : MBB)
1875 handleMIFlagDecoration(MI, ST, TII, MAI.Reqs);
1876 }
1877}
1878
1879static void addMBBNames(const Module &M, const SPIRVInstrInfo &TII,
1880 MachineModuleInfo *MMI, const SPIRVSubtarget &ST,
1882 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1884 if (!MF)
1885 continue;
1887 for (auto &MBB : *MF) {
1888 if (!MBB.hasName() || MBB.empty())
1889 continue;
1890 // Emit basic block names.
1891 Register Reg = MRI.createGenericVirtualRegister(LLT::scalar(64));
1892 MRI.setRegClass(Reg, &SPIRV::IDRegClass);
1893 buildOpName(Reg, MBB.getName(), *std::prev(MBB.end()), TII);
1894 Register GlobalReg = MAI.getOrCreateMBBRegister(MBB);
1895 MAI.setRegisterAlias(MF, Reg, GlobalReg);
1896 }
1897 }
1898}
1899
1900// patching Instruction::PHI to SPIRV::OpPhi
1901static void patchPhis(const Module &M, SPIRVGlobalRegistry *GR,
1902 const SPIRVInstrInfo &TII, MachineModuleInfo *MMI) {
1903 for (auto F = M.begin(), E = M.end(); F != E; ++F) {
1905 if (!MF)
1906 continue;
1907 for (auto &MBB : *MF) {
1908 for (MachineInstr &MI : MBB) {
1909 if (MI.getOpcode() != TargetOpcode::PHI)
1910 continue;
1911 MI.setDesc(TII.get(SPIRV::OpPhi));
1912 Register ResTypeReg = GR->getSPIRVTypeID(
1913 GR->getSPIRVTypeForVReg(MI.getOperand(0).getReg(), MF));
1914 MI.insert(MI.operands_begin() + 1,
1915 {MachineOperand::CreateReg(ResTypeReg, false)});
1916 }
1917 }
1918 }
1919}
1920
1922
1924 AU.addRequired<TargetPassConfig>();
1925 AU.addRequired<MachineModuleInfoWrapperPass>();
1926}
1927
1929 SPIRVTargetMachine &TM =
1930 getAnalysis<TargetPassConfig>().getTM<SPIRVTargetMachine>();
1931 ST = TM.getSubtargetImpl();
1932 GR = ST->getSPIRVGlobalRegistry();
1933 TII = ST->getInstrInfo();
1934
1935 MMI = &getAnalysis<MachineModuleInfoWrapperPass>().getMMI();
1936
1937 setBaseInfo(M);
1938
1939 patchPhis(M, GR, *TII, MMI);
1940
1941 addMBBNames(M, *TII, MMI, *ST, MAI);
1942 addDecorations(M, *TII, MMI, *ST, MAI);
1943
1944 collectReqs(M, MAI, MMI, *ST);
1945
1946 // Process type/const/global var/func decl instructions, number their
1947 // destination registers from 0 to N, collect Extensions and Capabilities.
1948 collectDeclarations(M);
1949
1950 // Number rest of registers from N+1 onwards.
1951 numberRegistersGlobally(M);
1952
1953 // Collect OpName, OpEntryPoint, OpDecorate etc, process other instructions.
1954 processOtherInstrs(M);
1955
1956 // If there are no entry points, we need the Linkage capability.
1957 if (MAI.MS[SPIRV::MB_EntryPoints].empty())
1958 MAI.Reqs.addCapability(SPIRV::Capability::Linkage);
1959
1960 // Set maximum ID used.
1961 GR->setBound(MAI.MaxID);
1962
1963 return false;
1964}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
aarch64 promote const
ReachingDefAnalysis InstSet & ToRemove
MachineBasicBlock & MBB
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:686
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
#define LLVM_DEBUG(...)
Definition: Debug.h:106
uint64_t Addr
const HexagonInstrInfo * TII
IRTranslator LLVM IR MI
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:38
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
#define ATOM_FLT_REQ_EXT_MSG(ExtName)
static cl::opt< bool > SPVDumpDeps("spv-dump-deps", cl::desc("Dump MIR with SPIR-V dependencies info"), cl::Optional, cl::init(false))
unsigned unsigned DefaultVal
unsigned OpIndex
#define DEBUG_TYPE
static cl::list< SPIRV::Capability::Capability > AvoidCapabilities("avoid-spirv-capabilities", cl::desc("SPIR-V capabilities to avoid if there are " "other options enabling a feature"), cl::ZeroOrMore, cl::Hidden, cl::values(clEnumValN(SPIRV::Capability::Shader, "Shader", "SPIR-V Shader capability")))
spirv structurize SPIRV
This file contains some templates that are useful if you are working with the STL at all.
Target-Independent Code Generator Pass Configuration Options pass.
This is the shared class of boolean and integer constants.
Definition: Constants.h:83
This is an important base class in LLVM.
Definition: Constant.h:42
This class represents an Operation in the Expression.
Implements a dense probed hash-table based set.
Definition: DenseSet.h:278
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
Metadata node.
Definition: Metadata.h:1073
const MDOperand & getOperand(unsigned I) const
Definition: Metadata.h:1434
unsigned getNumOperands() const
Return number of MDNode operands.
Definition: Metadata.h:1440
Tracking metadata reference owned by Metadata.
Definition: Metadata.h:895
bool hasName() const
Check if there is a name of corresponding LLVM basic block.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Register getReg(unsigned Idx) const
Get the register for the operand index.
Representation of each machine instruction.
Definition: MachineInstr.h:71
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:577
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:349
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:587
This class contains meta information specific to a module.
MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
void print(raw_ostream &os, const TargetRegisterInfo *TRI=nullptr, const TargetIntrinsicInfo *IntrinsicInfo=nullptr) const
Print the MachineOperand to os.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:65
PassRegistry - This class manages the registration and intitialization of the pass subsystem as appli...
Definition: PassRegistry.h:37
virtual void print(raw_ostream &OS, const Module *M) const
print - Print out the internal state of the pass.
Definition: Pass.cpp:130
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
Definition: Register.h:84
constexpr bool isValid() const
Definition: Register.h:115
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
const Value * getGlobalObject(const MachineFunction *MF, Register R)
const MachineOperand * getFunctionDefinitionByUse(const MachineOperand *Use)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isConstantInstr(const MachineInstr &MI) const
bool isInlineAsmDefInstr(const MachineInstr &MI) const
bool isTypeDeclInstr(const MachineInstr &MI) const
bool isSpecConstantInstr(const MachineInstr &MI) const
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:132
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition: SmallSet.h:222
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:181
bool empty() const
Definition: SmallVector.h:81
size_t size() const
Definition: SmallVector.h:78
iterator insert(iterator I, T &&Elt)
Definition: SmallVector.h:805
void push_back(const T &Elt)
Definition: SmallVector.h:413
std::pair< typename Base::iterator, bool > insert(StringRef key)
Definition: StringSet.h:38
Target-Independent Code Generator Pass Configuration Options.
Target - Wrapper for Target specific information.
LLVM Value Representation.
Definition: Value.h:74
Represents a version number in the form major[.minor[.subminor[.build]]].
Definition: VersionTuple.h:29
bool empty() const
Determine whether this version information is empty (e.g., all version components are zero).
Definition: VersionTuple.h:66
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ SC
CHAIN = SC CHAIN, Imm128 - System call.
Reg
All possible values of the reg field in the ModR/M byte.
StorageClass
Definition: XCOFF.h:170
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:711
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:443
NodeAddr< InstrNode * > Instr
Definition: RDFGraph.h:389
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:103
std::string getStringImm(const MachineInstr &MI, unsigned StartIndex)
Definition: SPIRVUtils.cpp:79
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1739
hash_code hash_value(const FixedPointSemantics &Val)
Definition: APFixedPoint.h:136
ExtensionList getSymbolicOperandExtensions(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
CapabilityList getSymbolicOperandCapabilities(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
VersionTuple getSymbolicOperandMaxVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
void initializeSPIRVModuleAnalysisPass(PassRegistry &)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:130
CapabilityList getCapabilitiesEnabledByExtension(SPIRV::Extension::Extension Extension)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
std::set< InstrSignature > InstrTraces
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
std::string getSymbolicOperandMnemonic(SPIRV::OperandCategory::OperandCategory Category, int32_t Value)
VersionTuple getSymbolicOperandMinVersion(SPIRV::OperandCategory::OperandCategory Category, uint32_t Value)
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:217
std::map< SmallVector< size_t >, unsigned > InstrGRegsMap
hash_code hash_combine(const Ts &...args)
Combine values into a single hash_code.
Definition: Hashing.h:590
#define N
SmallSet< SPIRV::Capability::Capability, 4 > S
static struct SPIRV::ModuleAnalysisInfo MAI
bool runOnModule(Module &M) override
runOnModule - Virtual method overriden by subclasses to process the module being operated on.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Register getRegisterAlias(const MachineFunction *MF, Register Reg)
SmallVector< const MachineInstr *, 4 > GlobalVarList
DenseMap< const Function *, Register > FuncMap
void setSkipEmission(const MachineInstr *MI)
void setRegisterAlias(const MachineFunction *MF, Register Reg, Register AliasReg)
DenseSet< const MachineInstr * > InstrsToDelete
bool hasRegisterAlias(const MachineFunction *MF, Register Reg)
Register getOrCreateMBBRegister(const MachineBasicBlock &MBB)
bool getSkipEmission(const MachineInstr *MI)
InstrList MS[NUM_MODULE_SECTIONS]
AddressingModel::AddressingModel Addr
SourceLanguage::SourceLanguage SrcLang
DenseMap< unsigned, Register > ExtInstSetMap
void addCapabilities(const CapabilityList &ToAdd)
bool isCapabilityAvailable(Capability::Capability Cap) const
void checkSatisfiable(const SPIRVSubtarget &ST) const
void getAndAddRequirements(SPIRV::OperandCategory::OperandCategory Category, uint32_t i, const SPIRVSubtarget &ST)
void addExtension(Extension::Extension ToAdd)
void initAvailableCapabilities(const SPIRVSubtarget &ST)
void removeCapabilityIf(const Capability::Capability ToRemove, const Capability::Capability IfPresent)
void addCapability(Capability::Capability ToAdd)
void addAvailableCaps(const CapabilityList &ToAdd)
void addRequirements(const Requirements &Req)
const std::optional< Capability::Capability > Cap