LLVM 19.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <string>
22#include <tuple>
23
24#define DEBUG_TYPE "spirv-builtins"
25
26namespace llvm {
27namespace SPIRV {
28#define GET_BuiltinGroup_DECL
29#include "SPIRVGenTables.inc"
30
33 InstructionSet::InstructionSet Set;
34 BuiltinGroup Group;
35 uint8_t MinNumArgs;
36 uint8_t MaxNumArgs;
37};
38
39#define GET_DemangledBuiltins_DECL
40#define GET_DemangledBuiltins_IMPL
41
43 const std::string BuiltinName;
45
49
56
57 bool isSpirvOp() const { return BuiltinName.rfind("__spirv_", 0) == 0; }
58};
59
62 InstructionSet::InstructionSet Set;
64};
65
66#define GET_NativeBuiltins_DECL
67#define GET_NativeBuiltins_IMPL
68
73 bool IsElect;
83};
84
85#define GET_GroupBuiltins_DECL
86#define GET_GroupBuiltins_IMPL
87
91 bool IsBlock;
92 bool IsWrite;
93};
94
95#define GET_IntelSubgroupsBuiltins_DECL
96#define GET_IntelSubgroupsBuiltins_IMPL
97
101};
102
103#define GET_AtomicFloatingBuiltins_DECL
104#define GET_AtomicFloatingBuiltins_IMPL
109};
110
111#define GET_GroupUniformBuiltins_DECL
112#define GET_GroupUniformBuiltins_IMPL
113
116 InstructionSet::InstructionSet Set;
117 BuiltIn::BuiltIn Value;
118};
119
120using namespace BuiltIn;
121#define GET_GetBuiltins_DECL
122#define GET_GetBuiltins_IMPL
123
126 InstructionSet::InstructionSet Set;
128};
129
130#define GET_ImageQueryBuiltins_DECL
131#define GET_ImageQueryBuiltins_IMPL
132
135 InstructionSet::InstructionSet Set;
140 FPRoundingMode::FPRoundingMode RoundingMode;
141};
142
145 InstructionSet::InstructionSet Set;
149 FPRoundingMode::FPRoundingMode RoundingMode;
150};
151
152using namespace FPRoundingMode;
153#define GET_ConvertBuiltins_DECL
154#define GET_ConvertBuiltins_IMPL
155
156using namespace InstructionSet;
157#define GET_VectorLoadStoreBuiltins_DECL
158#define GET_VectorLoadStoreBuiltins_IMPL
159
160#define GET_CLMemoryScope_DECL
161#define GET_CLSamplerAddressingMode_DECL
162#define GET_CLMemoryFenceFlags_DECL
163#define GET_ExtendedBuiltins_DECL
164#include "SPIRVGenTables.inc"
165} // namespace SPIRV
166
167//===----------------------------------------------------------------------===//
168// Misc functions for looking up builtins and veryfying requirements using
169// TableGen records
170//===----------------------------------------------------------------------===//
171
172/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
173/// the provided \p DemangledCall and specified \p Set.
174///
175/// The lookup follows the following algorithm, returning the first successful
176/// match:
177/// 1. Search with the plain demangled name (expecting a 1:1 match).
178/// 2. Search with the prefix before or suffix after the demangled name
179/// signyfying the type of the first argument.
180///
181/// \returns Wrapper around the demangled call and found builtin definition.
182static std::unique_ptr<const SPIRV::IncomingCall>
184 SPIRV::InstructionSet::InstructionSet Set,
185 Register ReturnRegister, const SPIRVType *ReturnType,
187 // Extract the builtin function name and types of arguments from the call
188 // skeleton.
189 std::string BuiltinName =
190 DemangledCall.substr(0, DemangledCall.find('(')).str();
191
192 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
193 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
194 BuiltinName = BuiltinName.substr(12);
195
196 // Check if the extracted name contains type information between angle
197 // brackets. If so, the builtin is an instantiated template - needs to have
198 // the information after angle brackets and return type removed.
199 if (BuiltinName.find('<') && BuiltinName.back() == '>') {
200 BuiltinName = BuiltinName.substr(0, BuiltinName.find('<'));
201 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
202 }
203
204 // Check if the extracted name begins with "__spirv_ImageSampleExplicitLod"
205 // contains return type information at the end "_R<type>", if so extract the
206 // plain builtin name without the type information.
207 if (StringRef(BuiltinName).contains("__spirv_ImageSampleExplicitLod") &&
208 StringRef(BuiltinName).contains("_R")) {
209 BuiltinName = BuiltinName.substr(0, BuiltinName.find("_R"));
210 }
211
212 SmallVector<StringRef, 10> BuiltinArgumentTypes;
213 StringRef BuiltinArgs =
214 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
215 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
216
217 // Look up the builtin in the defined set. Start with the plain demangled
218 // name, expecting a 1:1 match in the defined builtin set.
219 const SPIRV::DemangledBuiltin *Builtin;
220 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
221 return std::make_unique<SPIRV::IncomingCall>(
222 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
223
224 // If the initial look up was unsuccessful and the demangled call takes at
225 // least 1 argument, add a prefix or suffix signifying the type of the first
226 // argument and repeat the search.
227 if (BuiltinArgumentTypes.size() >= 1) {
228 char FirstArgumentType = BuiltinArgumentTypes[0][0];
229 // Prefix to be added to the builtin's name for lookup.
230 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
231 std::string Prefix;
232
233 switch (FirstArgumentType) {
234 // Unsigned:
235 case 'u':
236 if (Set == SPIRV::InstructionSet::OpenCL_std)
237 Prefix = "u_";
238 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
239 Prefix = "u";
240 break;
241 // Signed:
242 case 'c':
243 case 's':
244 case 'i':
245 case 'l':
246 if (Set == SPIRV::InstructionSet::OpenCL_std)
247 Prefix = "s_";
248 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
249 Prefix = "s";
250 break;
251 // Floating-point:
252 case 'f':
253 case 'd':
254 case 'h':
255 if (Set == SPIRV::InstructionSet::OpenCL_std ||
256 Set == SPIRV::InstructionSet::GLSL_std_450)
257 Prefix = "f";
258 break;
259 }
260
261 // If argument-type name prefix was added, look up the builtin again.
262 if (!Prefix.empty() &&
263 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
264 return std::make_unique<SPIRV::IncomingCall>(
265 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
266
267 // If lookup with a prefix failed, find a suffix to be added to the
268 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
269 // an unsigned value has a suffix "u".
270 std::string Suffix;
271
272 switch (FirstArgumentType) {
273 // Unsigned:
274 case 'u':
275 Suffix = "u";
276 break;
277 // Signed:
278 case 'c':
279 case 's':
280 case 'i':
281 case 'l':
282 Suffix = "s";
283 break;
284 // Floating-point:
285 case 'f':
286 case 'd':
287 case 'h':
288 Suffix = "f";
289 break;
290 }
291
292 // If argument-type name suffix was added, look up the builtin again.
293 if (!Suffix.empty() &&
294 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
295 return std::make_unique<SPIRV::IncomingCall>(
296 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
297 }
298
299 // No builtin with such name was found in the set.
300 return nullptr;
301}
302
303//===----------------------------------------------------------------------===//
304// Helper functions for building misc instructions
305//===----------------------------------------------------------------------===//
306
307/// Helper function building either a resulting scalar or vector bool register
308/// depending on the expected \p ResultType.
309///
310/// \returns Tuple of the resulting register and its type.
311static std::tuple<Register, SPIRVType *>
312buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
314 LLT Type;
315 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
316
317 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
318 unsigned VectorElements = ResultType->getOperand(2).getImm();
319 BoolType =
320 GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder);
322 cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
323 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
324 } else {
325 Type = LLT::scalar(1);
326 }
327
328 Register ResultRegister =
330 MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::IDRegClass);
331 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
332 return std::make_tuple(ResultRegister, BoolType);
333}
334
335/// Helper function for building either a vector or scalar select instruction
336/// depending on the expected \p ResultType.
337static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
338 Register ReturnRegister, Register SourceRegister,
339 const SPIRVType *ReturnType,
341 Register TrueConst, FalseConst;
342
343 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
344 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
346 TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType);
347 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType);
348 } else {
349 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType);
350 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType);
351 }
352 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
353 FalseConst);
354}
355
356/// Helper function for building a load instruction loading into the
357/// \p DestinationReg.
359 MachineIRBuilder &MIRBuilder,
360 SPIRVGlobalRegistry *GR, LLT LowLevelType,
361 Register DestinationReg = Register(0)) {
362 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
363 if (!DestinationReg.isValid()) {
364 DestinationReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
365 MRI->setType(DestinationReg, LLT::scalar(32));
366 GR->assignSPIRVTypeToVReg(BaseType, DestinationReg, MIRBuilder.getMF());
367 }
368 // TODO: consider using correct address space and alignment (p0 is canonical
369 // type for selection though).
371 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
372 return DestinationReg;
373}
374
375/// Helper function for building a load instruction for loading a builtin global
376/// variable of \p BuiltinValue value.
378 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
379 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
380 Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
381 Register NewRegister =
382 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
383 MIRBuilder.getMRI()->setType(NewRegister,
384 LLT::pointer(0, GR->getPointerSize()));
386 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
387 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
388
389 // Set up the global OpVariable with the necessary builtin decorations.
390 Register Variable = GR->buildGlobalVariable(
391 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
392 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
393 /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
394 false);
395
396 // Load the value from the global variable.
397 Register LoadedRegister =
398 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
399 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
400 return LoadedRegister;
401}
402
403/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
404/// and its definition, set the new register as a destination of the definition,
405/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
406/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
407/// SPIRVPreLegalizer.cpp.
408extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
409 SPIRVGlobalRegistry *GR,
410 MachineIRBuilder &MIB,
411 MachineRegisterInfo &MRI);
412
413// TODO: Move to TableGen.
414static SPIRV::MemorySemantics::MemorySemantics
415getSPIRVMemSemantics(std::memory_order MemOrder) {
416 switch (MemOrder) {
417 case std::memory_order::memory_order_relaxed:
418 return SPIRV::MemorySemantics::None;
419 case std::memory_order::memory_order_acquire:
420 return SPIRV::MemorySemantics::Acquire;
421 case std::memory_order::memory_order_release:
422 return SPIRV::MemorySemantics::Release;
423 case std::memory_order::memory_order_acq_rel:
424 return SPIRV::MemorySemantics::AcquireRelease;
425 case std::memory_order::memory_order_seq_cst:
426 return SPIRV::MemorySemantics::SequentiallyConsistent;
427 default:
428 report_fatal_error("Unknown CL memory scope");
429 }
430}
431
432static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
433 switch (ClScope) {
434 case SPIRV::CLMemoryScope::memory_scope_work_item:
435 return SPIRV::Scope::Invocation;
436 case SPIRV::CLMemoryScope::memory_scope_work_group:
437 return SPIRV::Scope::Workgroup;
438 case SPIRV::CLMemoryScope::memory_scope_device:
439 return SPIRV::Scope::Device;
440 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
441 return SPIRV::Scope::CrossDevice;
442 case SPIRV::CLMemoryScope::memory_scope_sub_group:
443 return SPIRV::Scope::Subgroup;
444 }
445 report_fatal_error("Unknown CL memory scope");
446}
447
450 unsigned BitWidth = 32) {
451 SPIRVType *IntType = GR->getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder);
452 return GR->buildConstantInt(Val, MIRBuilder, IntType);
453}
454
455static Register buildScopeReg(Register CLScopeRegister,
456 SPIRV::Scope::Scope Scope,
457 MachineIRBuilder &MIRBuilder,
460 if (CLScopeRegister.isValid()) {
461 auto CLScope =
462 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
463 Scope = getSPIRVScope(CLScope);
464
465 if (CLScope == static_cast<unsigned>(Scope)) {
466 MRI->setRegClass(CLScopeRegister, &SPIRV::IDRegClass);
467 return CLScopeRegister;
468 }
469 }
470 return buildConstantIntReg(Scope, MIRBuilder, GR);
471}
472
473static Register buildMemSemanticsReg(Register SemanticsRegister,
474 Register PtrRegister, unsigned &Semantics,
475 MachineIRBuilder &MIRBuilder,
477 if (SemanticsRegister.isValid()) {
478 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
479 std::memory_order Order =
480 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
481 Semantics =
482 getSPIRVMemSemantics(Order) |
484
485 if (Order == Semantics) {
486 MRI->setRegClass(SemanticsRegister, &SPIRV::IDRegClass);
487 return SemanticsRegister;
488 }
489 }
490 return buildConstantIntReg(Semantics, MIRBuilder, GR);
491}
492
493static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
494 const SPIRV::IncomingCall *Call,
495 Register TypeReg = Register(0)) {
496 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
497 auto MIB = MIRBuilder.buildInstr(Opcode);
498 if (TypeReg.isValid())
499 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
500 for (Register ArgReg : Call->Arguments) {
501 if (!MRI->getRegClassOrNull(ArgReg))
502 MRI->setRegClass(ArgReg, &SPIRV::IDRegClass);
503 MIB.addUse(ArgReg);
504 }
505 return true;
506}
507
508/// Helper function for translating atomic init to OpStore.
510 MachineIRBuilder &MIRBuilder) {
511 if (Call->isSpirvOp())
512 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call);
513
514 assert(Call->Arguments.size() == 2 &&
515 "Need 2 arguments for atomic init translation");
516 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
517 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
518 MIRBuilder.buildInstr(SPIRV::OpStore)
519 .addUse(Call->Arguments[0])
520 .addUse(Call->Arguments[1]);
521 return true;
522}
523
524/// Helper function for building an atomic load instruction.
526 MachineIRBuilder &MIRBuilder,
528 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
529 if (Call->isSpirvOp())
530 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
531
532 Register PtrRegister = Call->Arguments[0];
533 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
534 // TODO: if true insert call to __translate_ocl_memory_sccope before
535 // OpAtomicLoad and the function implementation. We can use Translator's
536 // output for transcoding/atomic_explicit_arguments.cl as an example.
537 Register ScopeRegister;
538 if (Call->Arguments.size() > 1) {
539 ScopeRegister = Call->Arguments[1];
540 MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::IDRegClass);
541 } else
542 ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
543
544 Register MemSemanticsReg;
545 if (Call->Arguments.size() > 2) {
546 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
547 MemSemanticsReg = Call->Arguments[2];
548 MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
549 } else {
550 int Semantics =
551 SPIRV::MemorySemantics::SequentiallyConsistent |
553 MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
554 }
555
556 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
557 .addDef(Call->ReturnRegister)
558 .addUse(TypeReg)
559 .addUse(PtrRegister)
560 .addUse(ScopeRegister)
561 .addUse(MemSemanticsReg);
562 return true;
563}
564
565/// Helper function for building an atomic store instruction.
567 MachineIRBuilder &MIRBuilder,
569 if (Call->isSpirvOp())
570 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call);
571
572 Register ScopeRegister =
573 buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
574 Register PtrRegister = Call->Arguments[0];
575 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
576 int Semantics =
577 SPIRV::MemorySemantics::SequentiallyConsistent |
579 Register MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
580 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
581 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
582 .addUse(PtrRegister)
583 .addUse(ScopeRegister)
584 .addUse(MemSemanticsReg)
585 .addUse(Call->Arguments[1]);
586 return true;
587}
588
589/// Helper function for building an atomic compare-exchange instruction.
591 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
592 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
593 if (Call->isSpirvOp())
594 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
595 GR->getSPIRVTypeID(Call->ReturnType));
596
597 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
598 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
599
600 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
601 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
602 Register Desired = Call->Arguments[2]; // Value (C Desired).
603 MRI->setRegClass(ObjectPtr, &SPIRV::IDRegClass);
604 MRI->setRegClass(ExpectedArg, &SPIRV::IDRegClass);
605 MRI->setRegClass(Desired, &SPIRV::IDRegClass);
606 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
607 LLT DesiredLLT = MRI->getType(Desired);
608
609 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
610 SPIRV::OpTypePointer);
611 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
612 (void)ExpectedType;
613 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
614 : ExpectedType == SPIRV::OpTypePointer);
615 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
616
617 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
618 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
619 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
620 SpvObjectPtrTy->getOperand(1).getImm());
621 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
622
623 Register MemSemEqualReg;
624 Register MemSemUnequalReg;
625 uint64_t MemSemEqual =
626 IsCmpxchg
627 ? SPIRV::MemorySemantics::None
628 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
629 uint64_t MemSemUnequal =
630 IsCmpxchg
631 ? SPIRV::MemorySemantics::None
632 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
633 if (Call->Arguments.size() >= 4) {
634 assert(Call->Arguments.size() >= 5 &&
635 "Need 5+ args for explicit atomic cmpxchg");
636 auto MemOrdEq =
637 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
638 auto MemOrdNeq =
639 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
640 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
641 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
642 if (MemOrdEq == MemSemEqual)
643 MemSemEqualReg = Call->Arguments[3];
644 if (MemOrdNeq == MemSemEqual)
645 MemSemUnequalReg = Call->Arguments[4];
646 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
647 MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass);
648 }
649 if (!MemSemEqualReg.isValid())
650 MemSemEqualReg = buildConstantIntReg(MemSemEqual, MIRBuilder, GR);
651 if (!MemSemUnequalReg.isValid())
652 MemSemUnequalReg = buildConstantIntReg(MemSemUnequal, MIRBuilder, GR);
653
654 Register ScopeReg;
655 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
656 if (Call->Arguments.size() >= 6) {
657 assert(Call->Arguments.size() == 6 &&
658 "Extra args for explicit atomic cmpxchg");
659 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
660 getIConstVal(Call->Arguments[5], MRI));
661 Scope = getSPIRVScope(ClScope);
662 if (ClScope == static_cast<unsigned>(Scope))
663 ScopeReg = Call->Arguments[5];
664 MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass);
665 }
666 if (!ScopeReg.isValid())
667 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
668
669 Register Expected = IsCmpxchg
670 ? ExpectedArg
671 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
672 GR, LLT::scalar(32));
673 MRI->setType(Expected, DesiredLLT);
674 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
675 : Call->ReturnRegister;
676 if (!MRI->getRegClassOrNull(Tmp))
677 MRI->setRegClass(Tmp, &SPIRV::IDRegClass);
678 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
679
680 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
681 MIRBuilder.buildInstr(Opcode)
682 .addDef(Tmp)
683 .addUse(GR->getSPIRVTypeID(IntTy))
684 .addUse(ObjectPtr)
685 .addUse(ScopeReg)
686 .addUse(MemSemEqualReg)
687 .addUse(MemSemUnequalReg)
688 .addUse(Desired)
690 if (!IsCmpxchg) {
691 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
692 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
693 }
694 return true;
695}
696
697/// Helper function for building an atomic load instruction.
698static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
699 MachineIRBuilder &MIRBuilder,
701 if (Call->isSpirvOp())
702 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
703 GR->getSPIRVTypeID(Call->ReturnType));
704
705 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
706 Register ScopeRegister =
707 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
708
709 assert(Call->Arguments.size() <= 4 &&
710 "Too many args for explicit atomic RMW");
711 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
712 MIRBuilder, GR, MRI);
713
714 Register PtrRegister = Call->Arguments[0];
715 unsigned Semantics = SPIRV::MemorySemantics::None;
716 MRI->setRegClass(PtrRegister, &SPIRV::IDRegClass);
717 Register MemSemanticsReg =
718 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
719 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
720 Semantics, MIRBuilder, GR);
721 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
722 MIRBuilder.buildInstr(Opcode)
723 .addDef(Call->ReturnRegister)
724 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
725 .addUse(PtrRegister)
726 .addUse(ScopeRegister)
727 .addUse(MemSemanticsReg)
728 .addUse(Call->Arguments[1]);
729 return true;
730}
731
732/// Helper function for building an atomic floating-type instruction.
734 unsigned Opcode,
735 MachineIRBuilder &MIRBuilder,
737 assert(Call->Arguments.size() == 4 &&
738 "Wrong number of atomic floating-type builtin");
739
740 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
741
742 Register PtrReg = Call->Arguments[0];
743 MRI->setRegClass(PtrReg, &SPIRV::IDRegClass);
744
745 Register ScopeReg = Call->Arguments[1];
746 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
747
748 Register MemSemanticsReg = Call->Arguments[2];
749 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
750
751 Register ValueReg = Call->Arguments[3];
752 MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
753
754 MIRBuilder.buildInstr(Opcode)
755 .addDef(Call->ReturnRegister)
756 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
757 .addUse(PtrReg)
758 .addUse(ScopeReg)
759 .addUse(MemSemanticsReg)
760 .addUse(ValueReg);
761 return true;
762}
763
764/// Helper function for building atomic flag instructions (e.g.
765/// OpAtomicFlagTestAndSet).
767 unsigned Opcode, MachineIRBuilder &MIRBuilder,
769 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
770 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
771 if (Call->isSpirvOp())
772 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
773 IsSet ? TypeReg : Register(0));
774
775 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
776 Register PtrRegister = Call->Arguments[0];
777 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
778 Register MemSemanticsReg =
779 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
780 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
781 Semantics, MIRBuilder, GR);
782
783 assert((Opcode != SPIRV::OpAtomicFlagClear ||
784 (Semantics != SPIRV::MemorySemantics::Acquire &&
785 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
786 "Invalid memory order argument!");
787
788 Register ScopeRegister =
789 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
790 ScopeRegister =
791 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
792
793 auto MIB = MIRBuilder.buildInstr(Opcode);
794 if (IsSet)
795 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
796
797 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
798 return true;
799}
800
801/// Helper function for building barriers, i.e., memory/control ordering
802/// operations.
803static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
804 MachineIRBuilder &MIRBuilder,
806 if (Call->isSpirvOp())
807 return buildOpFromWrapper(MIRBuilder, Opcode, Call);
808
809 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
810 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
811 unsigned MemSemantics = SPIRV::MemorySemantics::None;
812
813 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
814 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
815
816 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
817 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
818
819 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
820 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
821
822 if (Opcode == SPIRV::OpMemoryBarrier) {
823 std::memory_order MemOrder =
824 static_cast<std::memory_order>(getIConstVal(Call->Arguments[1], MRI));
825 MemSemantics = getSPIRVMemSemantics(MemOrder) | MemSemantics;
826 } else {
827 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
828 }
829
830 Register MemSemanticsReg;
831 if (MemFlags == MemSemantics) {
832 MemSemanticsReg = Call->Arguments[0];
833 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
834 } else
835 MemSemanticsReg = buildConstantIntReg(MemSemantics, MIRBuilder, GR);
836
837 Register ScopeReg;
838 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
839 SPIRV::Scope::Scope MemScope = Scope;
840 if (Call->Arguments.size() >= 2) {
841 assert(
842 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
843 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
844 "Extra args for explicitly scoped barrier");
845 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
846 : Call->Arguments[1];
847 SPIRV::CLMemoryScope CLScope =
848 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
849 MemScope = getSPIRVScope(CLScope);
850 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
851 (Opcode == SPIRV::OpMemoryBarrier))
852 Scope = MemScope;
853
854 if (CLScope == static_cast<unsigned>(Scope)) {
855 ScopeReg = Call->Arguments[1];
856 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
857 }
858 }
859
860 if (!ScopeReg.isValid())
861 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
862
863 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
864 if (Opcode != SPIRV::OpMemoryBarrier)
865 MIB.addUse(buildConstantIntReg(MemScope, MIRBuilder, GR));
866 MIB.addUse(MemSemanticsReg);
867 return true;
868}
869
870static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
871 switch (dim) {
872 case SPIRV::Dim::DIM_1D:
873 case SPIRV::Dim::DIM_Buffer:
874 return 1;
875 case SPIRV::Dim::DIM_2D:
876 case SPIRV::Dim::DIM_Cube:
877 case SPIRV::Dim::DIM_Rect:
878 return 2;
879 case SPIRV::Dim::DIM_3D:
880 return 3;
881 default:
882 report_fatal_error("Cannot get num components for given Dim");
883 }
884}
885
886/// Helper function for obtaining the number of size components.
887static unsigned getNumSizeComponents(SPIRVType *imgType) {
888 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
889 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
890 unsigned numComps = getNumComponentsForDim(dim);
891 bool arrayed = imgType->getOperand(4).getImm() == 1;
892 return arrayed ? numComps + 1 : numComps;
893}
894
895//===----------------------------------------------------------------------===//
896// Implementation functions for each builtin group
897//===----------------------------------------------------------------------===//
898
899static bool generateExtInst(const SPIRV::IncomingCall *Call,
900 MachineIRBuilder &MIRBuilder,
902 // Lookup the extended instruction number in the TableGen records.
903 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
905 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
906
907 // Build extended instruction.
908 auto MIB =
909 MIRBuilder.buildInstr(SPIRV::OpExtInst)
910 .addDef(Call->ReturnRegister)
911 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
912 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
913 .addImm(Number);
914
915 for (auto Argument : Call->Arguments)
916 MIB.addUse(Argument);
917 return true;
918}
919
921 MachineIRBuilder &MIRBuilder,
923 // Lookup the instruction opcode in the TableGen records.
924 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
925 unsigned Opcode =
926 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
927
928 Register CompareRegister;
929 SPIRVType *RelationType;
930 std::tie(CompareRegister, RelationType) =
931 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
932
933 // Build relational instruction.
934 auto MIB = MIRBuilder.buildInstr(Opcode)
935 .addDef(CompareRegister)
936 .addUse(GR->getSPIRVTypeID(RelationType));
937
938 for (auto Argument : Call->Arguments)
939 MIB.addUse(Argument);
940
941 // Build select instruction.
942 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
943 Call->ReturnType, GR);
944}
945
947 MachineIRBuilder &MIRBuilder,
949 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
950 const SPIRV::GroupBuiltin *GroupBuiltin =
951 SPIRV::lookupGroupBuiltin(Builtin->Name);
952 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
953 Register Arg0;
954 if (GroupBuiltin->HasBoolArg) {
955 Register ConstRegister = Call->Arguments[0];
956 auto ArgInstruction = getDefInstrMaybeConstant(ConstRegister, MRI);
957 (void)ArgInstruction;
958 // TODO: support non-constant bool values.
959 assert(ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT &&
960 "Only constant bool value args are supported");
961 if (GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() !=
962 SPIRV::OpTypeBool)
963 Arg0 = GR->buildConstantInt(getIConstVal(ConstRegister, MRI), MIRBuilder,
964 GR->getOrCreateSPIRVBoolType(MIRBuilder));
965 }
966
967 Register GroupResultRegister = Call->ReturnRegister;
968 SPIRVType *GroupResultType = Call->ReturnType;
969
970 // TODO: maybe we need to check whether the result type is already boolean
971 // and in this case do not insert select instruction.
972 const bool HasBoolReturnTy =
973 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
974 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
975 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
976
977 if (HasBoolReturnTy)
978 std::tie(GroupResultRegister, GroupResultType) =
979 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
980
981 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
982 : SPIRV::Scope::Workgroup;
983 Register ScopeRegister = buildConstantIntReg(Scope, MIRBuilder, GR);
984
985 // Build work/sub group instruction.
986 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
987 .addDef(GroupResultRegister)
988 .addUse(GR->getSPIRVTypeID(GroupResultType))
989 .addUse(ScopeRegister);
990
991 if (!GroupBuiltin->NoGroupOperation)
992 MIB.addImm(GroupBuiltin->GroupOperation);
993 if (Call->Arguments.size() > 0) {
994 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
995 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
996 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
997 MIB.addUse(Call->Arguments[i]);
998 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
999 }
1000 }
1001
1002 // Build select instruction.
1003 if (HasBoolReturnTy)
1004 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1005 Call->ReturnType, GR);
1006 return true;
1007}
1008
1010 MachineIRBuilder &MIRBuilder,
1011 SPIRVGlobalRegistry *GR) {
1012 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1013 MachineFunction &MF = MIRBuilder.getMF();
1014 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1015 if (!ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1016 std::string DiagMsg = std::string(Builtin->Name) +
1017 ": the builtin requires the following SPIR-V "
1018 "extension: SPV_INTEL_subgroups";
1019 report_fatal_error(DiagMsg.c_str(), false);
1020 }
1021 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1022 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1023 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1024
1025 uint32_t OpCode = IntelSubgroups->Opcode;
1026 if (IntelSubgroups->IsBlock) {
1027 // Minimal number or arguments set in TableGen records is 1
1028 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1029 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1030 // TODO: add required validation from the specification:
1031 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1032 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1033 // dimensions require a capability."
1034 switch (OpCode) {
1035 case SPIRV::OpSubgroupBlockReadINTEL:
1036 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1037 break;
1038 case SPIRV::OpSubgroupBlockWriteINTEL:
1039 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1040 break;
1041 }
1042 }
1043 }
1044 }
1045
1046 // TODO: opaque pointers types should be eventually resolved in such a way
1047 // that validation of block read is enabled with respect to the following
1048 // specification requirement:
1049 // "'Result Type' may be a scalar or vector type, and its component type must
1050 // be equal to the type pointed to by 'Ptr'."
1051 // For example, function parameter type should not be default i8 pointer, but
1052 // depend on the result type of the instruction where it is used as a pointer
1053 // argument of OpSubgroupBlockReadINTEL
1054
1055 // Build Intel subgroups instruction
1057 IntelSubgroups->IsWrite
1058 ? MIRBuilder.buildInstr(OpCode)
1059 : MIRBuilder.buildInstr(OpCode)
1060 .addDef(Call->ReturnRegister)
1061 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1062 for (size_t i = 0; i < Call->Arguments.size(); ++i) {
1063 MIB.addUse(Call->Arguments[i]);
1064 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
1065 }
1066
1067 return true;
1068}
1069
1071 MachineIRBuilder &MIRBuilder,
1072 SPIRVGlobalRegistry *GR) {
1073 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1074 MachineFunction &MF = MIRBuilder.getMF();
1075 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1076 if (!ST->canUseExtension(
1077 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1078 std::string DiagMsg = std::string(Builtin->Name) +
1079 ": the builtin requires the following SPIR-V "
1080 "extension: SPV_KHR_uniform_group_instructions";
1081 report_fatal_error(DiagMsg.c_str(), false);
1082 }
1083 const SPIRV::GroupUniformBuiltin *GroupUniform =
1084 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1085 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1086
1087 Register GroupResultReg = Call->ReturnRegister;
1088 MRI->setRegClass(GroupResultReg, &SPIRV::IDRegClass);
1089
1090 // Scope
1091 Register ScopeReg = Call->Arguments[0];
1092 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
1093
1094 // Group Operation
1095 Register ConstGroupOpReg = Call->Arguments[1];
1096 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1097 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1099 "expect a constant group operation for a uniform group instruction",
1100 false);
1101 const MachineOperand &ConstOperand = Const->getOperand(1);
1102 if (!ConstOperand.isCImm())
1103 report_fatal_error("uniform group instructions: group operation must be an "
1104 "integer constant",
1105 false);
1106
1107 // Value
1108 Register ValueReg = Call->Arguments[2];
1109 MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
1110
1111 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1112 .addDef(GroupResultReg)
1113 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1114 .addUse(ScopeReg);
1115 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1116 MIB.addUse(ValueReg);
1117
1118 return true;
1119}
1120
1121// These queries ask for a single size_t result for a given dimension index, e.g
1122// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
1123// these values are all vec3 types, so we need to extract the correct index or
1124// return defaultVal (0 or 1 depending on the query). We also handle extending
1125// or tuncating in case size_t does not match the expected result type's
1126// bitwidth.
1127//
1128// For a constant index >= 3 we generate:
1129// %res = OpConstant %SizeT 0
1130//
1131// For other indices we generate:
1132// %g = OpVariable %ptr_V3_SizeT Input
1133// OpDecorate %g BuiltIn XXX
1134// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1135// OpDecorate %g Constant
1136// %loadedVec = OpLoad %V3_SizeT %g
1137//
1138// Then, if the index is constant < 3, we generate:
1139// %res = OpCompositeExtract %SizeT %loadedVec idx
1140// If the index is dynamic, we generate:
1141// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1142// %cmp = OpULessThan %bool %idx %const_3
1143// %res = OpSelect %SizeT %cmp %tmp %const_0
1144//
1145// If the bitwidth of %res does not match the expected return type, we add an
1146// extend or truncate.
1148 MachineIRBuilder &MIRBuilder,
1150 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1151 uint64_t DefaultValue) {
1152 Register IndexRegister = Call->Arguments[0];
1153 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1154 const unsigned PointerSize = GR->getPointerSize();
1155 const SPIRVType *PointerSizeType =
1156 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1157 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1158 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1159
1160 // Set up the final register to do truncation or extension on at the end.
1161 Register ToTruncate = Call->ReturnRegister;
1162
1163 // If the index is constant, we can statically determine if it is in range.
1164 bool IsConstantIndex =
1165 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1166
1167 // If it's out of range (max dimension is 3), we can just return the constant
1168 // default value (0 or 1 depending on which query function).
1169 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1170 Register DefaultReg = Call->ReturnRegister;
1171 if (PointerSize != ResultWidth) {
1172 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1173 MRI->setRegClass(DefaultReg, &SPIRV::IDRegClass);
1174 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1175 MIRBuilder.getMF());
1176 ToTruncate = DefaultReg;
1177 }
1178 auto NewRegister =
1179 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1180 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1181 } else { // If it could be in range, we need to load from the given builtin.
1182 auto Vec3Ty =
1183 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder);
1184 Register LoadedVector =
1185 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1186 LLT::fixed_vector(3, PointerSize));
1187 // Set up the vreg to extract the result to (possibly a new temporary one).
1188 Register Extracted = Call->ReturnRegister;
1189 if (!IsConstantIndex || PointerSize != ResultWidth) {
1190 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1191 MRI->setRegClass(Extracted, &SPIRV::IDRegClass);
1192 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1193 }
1194 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1195 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1196 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1197 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1198 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1199
1200 // If the index is dynamic, need check if it's < 3, and then use a select.
1201 if (!IsConstantIndex) {
1202 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1203 *MRI);
1204
1205 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1206 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1207
1208 Register CompareRegister =
1209 MRI->createGenericVirtualRegister(LLT::scalar(1));
1210 MRI->setRegClass(CompareRegister, &SPIRV::IDRegClass);
1211 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1212
1213 // Use G_ICMP to check if idxVReg < 3.
1214 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1215 GR->buildConstantInt(3, MIRBuilder, IndexType));
1216
1217 // Get constant for the default value (0 or 1 depending on which
1218 // function).
1219 Register DefaultRegister =
1220 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1221
1222 // Get a register for the selection result (possibly a new temporary one).
1223 Register SelectionResult = Call->ReturnRegister;
1224 if (PointerSize != ResultWidth) {
1225 SelectionResult =
1226 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1227 MRI->setRegClass(SelectionResult, &SPIRV::IDRegClass);
1228 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1229 MIRBuilder.getMF());
1230 }
1231 // Create the final G_SELECT to return the extracted value or the default.
1232 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1233 DefaultRegister);
1234 ToTruncate = SelectionResult;
1235 } else {
1236 ToTruncate = Extracted;
1237 }
1238 }
1239 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1240 if (PointerSize != ResultWidth)
1241 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1242 return true;
1243}
1244
1246 MachineIRBuilder &MIRBuilder,
1247 SPIRVGlobalRegistry *GR) {
1248 // Lookup the builtin variable record.
1249 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1250 SPIRV::BuiltIn::BuiltIn Value =
1251 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1252
1253 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1254 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1255
1256 // Build a load instruction for the builtin variable.
1257 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1258 LLT LLType;
1259 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1260 LLType =
1261 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1262 else
1263 LLType = LLT::scalar(BitWidth);
1264
1265 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1266 LLType, Call->ReturnRegister);
1267}
1268
1270 MachineIRBuilder &MIRBuilder,
1271 SPIRVGlobalRegistry *GR) {
1272 // Lookup the instruction opcode in the TableGen records.
1273 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1274 unsigned Opcode =
1275 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1276
1277 switch (Opcode) {
1278 case SPIRV::OpStore:
1279 return buildAtomicInitInst(Call, MIRBuilder);
1280 case SPIRV::OpAtomicLoad:
1281 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1282 case SPIRV::OpAtomicStore:
1283 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1284 case SPIRV::OpAtomicCompareExchange:
1285 case SPIRV::OpAtomicCompareExchangeWeak:
1286 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1287 GR);
1288 case SPIRV::OpAtomicIAdd:
1289 case SPIRV::OpAtomicISub:
1290 case SPIRV::OpAtomicOr:
1291 case SPIRV::OpAtomicXor:
1292 case SPIRV::OpAtomicAnd:
1293 case SPIRV::OpAtomicExchange:
1294 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1295 case SPIRV::OpMemoryBarrier:
1296 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1297 case SPIRV::OpAtomicFlagTestAndSet:
1298 case SPIRV::OpAtomicFlagClear:
1299 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1300 default:
1301 return false;
1302 }
1303}
1304
1306 MachineIRBuilder &MIRBuilder,
1307 SPIRVGlobalRegistry *GR) {
1308 // Lookup the instruction opcode in the TableGen records.
1309 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1310 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1311
1312 switch (Opcode) {
1313 case SPIRV::OpAtomicFAddEXT:
1314 case SPIRV::OpAtomicFMinEXT:
1315 case SPIRV::OpAtomicFMaxEXT:
1316 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1317 default:
1318 return false;
1319 }
1320}
1321
1323 MachineIRBuilder &MIRBuilder,
1324 SPIRVGlobalRegistry *GR) {
1325 // Lookup the instruction opcode in the TableGen records.
1326 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1327 unsigned Opcode =
1328 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1329
1330 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1331}
1332
1334 MachineIRBuilder &MIRBuilder,
1335 SPIRVGlobalRegistry *GR) {
1336 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1337 bool IsVec = Opcode == SPIRV::OpTypeVector;
1338 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1339 MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1340 .addDef(Call->ReturnRegister)
1341 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1342 .addUse(Call->Arguments[0])
1343 .addUse(Call->Arguments[1]);
1344 return true;
1345}
1346
1348 MachineIRBuilder &MIRBuilder,
1349 SPIRVGlobalRegistry *GR) {
1350 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1351 SPIRV::BuiltIn::BuiltIn Value =
1352 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1353
1354 // For now, we only support a single Wave intrinsic with a single return type.
1355 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1356 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1357
1359 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1360 /* isConst= */ false, /* hasLinkageTy= */ false);
1361}
1362
1364 MachineIRBuilder &MIRBuilder,
1365 SPIRVGlobalRegistry *GR) {
1366 // Lookup the builtin record.
1367 SPIRV::BuiltIn::BuiltIn Value =
1368 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1369 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1370 Value == SPIRV::BuiltIn::WorkgroupSize ||
1371 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1372 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1373}
1374
1376 MachineIRBuilder &MIRBuilder,
1377 SPIRVGlobalRegistry *GR) {
1378 // Lookup the image size query component number in the TableGen records.
1379 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1380 uint32_t Component =
1381 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1382 // Query result may either be a vector or a scalar. If return type is not a
1383 // vector, expect only a single size component. Otherwise get the number of
1384 // expected components.
1385 SPIRVType *RetTy = Call->ReturnType;
1386 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1387 ? RetTy->getOperand(2).getImm()
1388 : 1;
1389 // Get the actual number of query result/size components.
1390 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1391 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1392 Register QueryResult = Call->ReturnRegister;
1393 SPIRVType *QueryResultType = Call->ReturnType;
1394 if (NumExpectedRetComponents != NumActualRetComponents) {
1395 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1396 LLT::fixed_vector(NumActualRetComponents, 32));
1397 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::IDRegClass);
1398 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1399 QueryResultType = GR->getOrCreateSPIRVVectorType(
1400 IntTy, NumActualRetComponents, MIRBuilder);
1401 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1402 }
1403 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1404 unsigned Opcode =
1405 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1406 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1407 auto MIB = MIRBuilder.buildInstr(Opcode)
1408 .addDef(QueryResult)
1409 .addUse(GR->getSPIRVTypeID(QueryResultType))
1410 .addUse(Call->Arguments[0]);
1411 if (!IsDimBuf)
1412 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Lod id.
1413 if (NumExpectedRetComponents == NumActualRetComponents)
1414 return true;
1415 if (NumExpectedRetComponents == 1) {
1416 // Only 1 component is expected, build OpCompositeExtract instruction.
1417 unsigned ExtractedComposite =
1418 Component == 3 ? NumActualRetComponents - 1 : Component;
1419 assert(ExtractedComposite < NumActualRetComponents &&
1420 "Invalid composite index!");
1421 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1422 .addDef(Call->ReturnRegister)
1423 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1424 .addUse(QueryResult)
1425 .addImm(ExtractedComposite);
1426 } else {
1427 // More than 1 component is expected, fill a new vector.
1428 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1429 .addDef(Call->ReturnRegister)
1430 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1431 .addUse(QueryResult)
1432 .addUse(QueryResult);
1433 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1434 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1435 }
1436 return true;
1437}
1438
1440 MachineIRBuilder &MIRBuilder,
1441 SPIRVGlobalRegistry *GR) {
1442 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1443 "Image samples query result must be of int type!");
1444
1445 // Lookup the instruction opcode in the TableGen records.
1446 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1447 unsigned Opcode =
1448 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1449
1450 Register Image = Call->Arguments[0];
1451 MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::IDRegClass);
1452 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1453 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1454 (void)ImageDimensionality;
1455
1456 switch (Opcode) {
1457 case SPIRV::OpImageQuerySamples:
1458 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1459 "Image must be of 2D dimensionality");
1460 break;
1461 case SPIRV::OpImageQueryLevels:
1462 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1463 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1464 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1465 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1466 "Image must be of 1D/2D/3D/Cube dimensionality");
1467 break;
1468 }
1469
1470 MIRBuilder.buildInstr(Opcode)
1471 .addDef(Call->ReturnRegister)
1472 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1473 .addUse(Image);
1474 return true;
1475}
1476
1477// TODO: Move to TableGen.
1478static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1480 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1481 case SPIRV::CLK_ADDRESS_CLAMP:
1482 return SPIRV::SamplerAddressingMode::Clamp;
1483 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1484 return SPIRV::SamplerAddressingMode::ClampToEdge;
1485 case SPIRV::CLK_ADDRESS_REPEAT:
1486 return SPIRV::SamplerAddressingMode::Repeat;
1487 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1488 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1489 case SPIRV::CLK_ADDRESS_NONE:
1490 return SPIRV::SamplerAddressingMode::None;
1491 default:
1492 report_fatal_error("Unknown CL address mode");
1493 }
1494}
1495
1496static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
1497 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1498}
1499
1500static SPIRV::SamplerFilterMode::SamplerFilterMode
1502 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1503 return SPIRV::SamplerFilterMode::Linear;
1504 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1505 return SPIRV::SamplerFilterMode::Nearest;
1506 return SPIRV::SamplerFilterMode::Nearest;
1507}
1508
1509static bool generateReadImageInst(const StringRef DemangledCall,
1510 const SPIRV::IncomingCall *Call,
1511 MachineIRBuilder &MIRBuilder,
1512 SPIRVGlobalRegistry *GR) {
1513 Register Image = Call->Arguments[0];
1514 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1515 MRI->setRegClass(Image, &SPIRV::IDRegClass);
1516 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1517 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
1518 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
1519 if (HasOclSampler || HasMsaa)
1520 MRI->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1521 if (HasOclSampler) {
1522 Register Sampler = Call->Arguments[1];
1523
1524 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1525 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
1526 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
1527 Sampler = GR->buildConstantSampler(
1529 getSamplerParamFromBitmask(SamplerMask),
1530 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder,
1531 GR->getSPIRVTypeForVReg(Sampler));
1532 }
1533 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1534 SPIRVType *SampledImageType =
1535 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1536 Register SampledImage = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1537
1538 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1539 .addDef(SampledImage)
1540 .addUse(GR->getSPIRVTypeID(SampledImageType))
1541 .addUse(Image)
1542 .addUse(Sampler);
1543
1545 MIRBuilder);
1546 SPIRVType *TempType = Call->ReturnType;
1547 bool NeedsExtraction = false;
1548 if (TempType->getOpcode() != SPIRV::OpTypeVector) {
1549 TempType =
1550 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1551 NeedsExtraction = true;
1552 }
1553 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(TempType));
1554 Register TempRegister = MRI->createGenericVirtualRegister(LLType);
1555 MRI->setRegClass(TempRegister, &SPIRV::IDRegClass);
1556 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
1557
1558 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1559 .addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister)
1560 .addUse(GR->getSPIRVTypeID(TempType))
1561 .addUse(SampledImage)
1562 .addUse(Call->Arguments[2]) // Coordinate.
1563 .addImm(SPIRV::ImageOperand::Lod)
1564 .addUse(Lod);
1565
1566 if (NeedsExtraction)
1567 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1568 .addDef(Call->ReturnRegister)
1569 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1570 .addUse(TempRegister)
1571 .addImm(0);
1572 } else if (HasMsaa) {
1573 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1574 .addDef(Call->ReturnRegister)
1575 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1576 .addUse(Image)
1577 .addUse(Call->Arguments[1]) // Coordinate.
1578 .addImm(SPIRV::ImageOperand::Sample)
1579 .addUse(Call->Arguments[2]);
1580 } else {
1581 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1582 .addDef(Call->ReturnRegister)
1583 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1584 .addUse(Image)
1585 .addUse(Call->Arguments[1]); // Coordinate.
1586 }
1587 return true;
1588}
1589
1591 MachineIRBuilder &MIRBuilder,
1592 SPIRVGlobalRegistry *GR) {
1593 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1594 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1595 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1596 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1597 .addUse(Call->Arguments[0]) // Image.
1598 .addUse(Call->Arguments[1]) // Coordinate.
1599 .addUse(Call->Arguments[2]); // Texel.
1600 return true;
1601}
1602
1603static bool generateSampleImageInst(const StringRef DemangledCall,
1604 const SPIRV::IncomingCall *Call,
1605 MachineIRBuilder &MIRBuilder,
1606 SPIRVGlobalRegistry *GR) {
1607 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1608 if (Call->Builtin->Name.contains_insensitive(
1609 "__translate_sampler_initializer")) {
1610 // Build sampler literal.
1611 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1612 Register Sampler = GR->buildConstantSampler(
1613 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1615 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1616 return Sampler.isValid();
1617 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1618 // Create OpSampledImage.
1619 Register Image = Call->Arguments[0];
1620 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1621 SPIRVType *SampledImageType =
1622 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1623 Register SampledImage =
1624 Call->ReturnRegister.isValid()
1625 ? Call->ReturnRegister
1626 : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1627 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1628 .addDef(SampledImage)
1629 .addUse(GR->getSPIRVTypeID(SampledImageType))
1630 .addUse(Image)
1631 .addUse(Call->Arguments[1]); // Sampler.
1632 return true;
1633 } else if (Call->Builtin->Name.contains_insensitive(
1634 "__spirv_ImageSampleExplicitLod")) {
1635 // Sample an image using an explicit level of detail.
1636 std::string ReturnType = DemangledCall.str();
1637 if (DemangledCall.contains("_R")) {
1638 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
1639 ReturnType = ReturnType.substr(0, ReturnType.find('('));
1640 }
1641 SPIRVType *Type =
1642 Call->ReturnType
1643 ? Call->ReturnType
1644 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder);
1645 if (!Type) {
1646 std::string DiagMsg =
1647 "Unable to recognize SPIRV type name: " + ReturnType;
1648 report_fatal_error(DiagMsg.c_str());
1649 }
1650 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1651 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1652 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
1653
1654 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1655 .addDef(Call->ReturnRegister)
1657 .addUse(Call->Arguments[0]) // Image.
1658 .addUse(Call->Arguments[1]) // Coordinate.
1659 .addImm(SPIRV::ImageOperand::Lod)
1660 .addUse(Call->Arguments[3]);
1661 return true;
1662 }
1663 return false;
1664}
1665
1667 MachineIRBuilder &MIRBuilder) {
1668 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1669 Call->Arguments[1], Call->Arguments[2]);
1670 return true;
1671}
1672
1674 MachineIRBuilder &MIRBuilder,
1675 SPIRVGlobalRegistry *GR) {
1676 // Lookup the instruction opcode in the TableGen records.
1677 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1678 unsigned Opcode =
1679 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1680 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1681
1682 switch (Opcode) {
1683 case SPIRV::OpSpecConstant: {
1684 // Build the SpecID decoration.
1685 unsigned SpecId =
1686 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
1687 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
1688 {SpecId});
1689 // Determine the constant MI.
1690 Register ConstRegister = Call->Arguments[1];
1691 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
1692 assert(Const &&
1693 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1694 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
1695 "Argument should be either an int or floating-point constant");
1696 // Determine the opcode and built the OpSpec MI.
1697 const MachineOperand &ConstOperand = Const->getOperand(1);
1698 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
1699 assert(ConstOperand.isCImm() && "Int constant operand is expected");
1700 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
1701 ? SPIRV::OpSpecConstantTrue
1702 : SPIRV::OpSpecConstantFalse;
1703 }
1704 auto MIB = MIRBuilder.buildInstr(Opcode)
1705 .addDef(Call->ReturnRegister)
1706 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1707
1708 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
1709 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
1710 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1711 else
1712 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
1713 }
1714 return true;
1715 }
1716 case SPIRV::OpSpecConstantComposite: {
1717 auto MIB = MIRBuilder.buildInstr(Opcode)
1718 .addDef(Call->ReturnRegister)
1719 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1720 for (unsigned i = 0; i < Call->Arguments.size(); i++)
1721 MIB.addUse(Call->Arguments[i]);
1722 return true;
1723 }
1724 default:
1725 return false;
1726 }
1727}
1728
1729static bool buildNDRange(const SPIRV::IncomingCall *Call,
1730 MachineIRBuilder &MIRBuilder,
1731 SPIRVGlobalRegistry *GR) {
1732 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1733 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1734 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1735 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
1736 PtrType->getOperand(2).isReg());
1737 Register TypeReg = PtrType->getOperand(2).getReg();
1739 MachineFunction &MF = MIRBuilder.getMF();
1740 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1741 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
1742 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
1743 // three other arguments, so pass zero constant on absence.
1744 unsigned NumArgs = Call->Arguments.size();
1745 assert(NumArgs >= 2);
1746 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
1747 MRI->setRegClass(GlobalWorkSize, &SPIRV::IDRegClass);
1748 Register LocalWorkSize =
1749 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
1750 if (LocalWorkSize.isValid())
1751 MRI->setRegClass(LocalWorkSize, &SPIRV::IDRegClass);
1752 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
1753 if (GlobalWorkOffset.isValid())
1754 MRI->setRegClass(GlobalWorkOffset, &SPIRV::IDRegClass);
1755 if (NumArgs < 4) {
1756 Register Const;
1757 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
1758 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
1759 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
1760 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
1761 DefInstr->getOperand(3).isReg());
1762 Register GWSPtr = DefInstr->getOperand(3).getReg();
1763 if (!MRI->getRegClassOrNull(GWSPtr))
1764 MRI->setRegClass(GWSPtr, &SPIRV::IDRegClass);
1765 // TODO: Maybe simplify generation of the type of the fields.
1766 unsigned Size = Call->Builtin->Name.equals("ndrange_3D") ? 3 : 2;
1767 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
1769 Type *FieldTy = ArrayType::get(BaseTy, Size);
1770 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder);
1771 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1772 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
1773 MIRBuilder.buildInstr(SPIRV::OpLoad)
1774 .addDef(GlobalWorkSize)
1775 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
1776 .addUse(GWSPtr);
1777 Const = GR->getOrCreateConsIntArray(0, MIRBuilder, SpvFieldTy);
1778 } else {
1779 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy);
1780 }
1781 if (!LocalWorkSize.isValid())
1782 LocalWorkSize = Const;
1783 if (!GlobalWorkOffset.isValid())
1784 GlobalWorkOffset = Const;
1785 }
1786 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
1787 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
1788 .addDef(TmpReg)
1789 .addUse(TypeReg)
1790 .addUse(GlobalWorkSize)
1791 .addUse(LocalWorkSize)
1792 .addUse(GlobalWorkOffset);
1793 return MIRBuilder.buildInstr(SPIRV::OpStore)
1794 .addUse(Call->Arguments[0])
1795 .addUse(TmpReg);
1796}
1797
1800 // We expect the following sequence of instructions:
1801 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
1802 // or = G_GLOBAL_VALUE @block_literal_global
1803 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
1804 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
1805 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
1806 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
1807 MI->getOperand(1).isReg());
1808 Register BitcastReg = MI->getOperand(1).getReg();
1809 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
1810 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
1811 BitcastMI->getOperand(2).isReg());
1812 Register ValueReg = BitcastMI->getOperand(2).getReg();
1813 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
1814 return ValueMI;
1815}
1816
1817// Return an integer constant corresponding to the given register and
1818// defined in spv_track_constant.
1819// TODO: maybe unify with prelegalizer pass.
1821 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
1822 assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
1823 DefMI->getOperand(2).isReg());
1824 MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
1825 assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
1826 DefMI2->getOperand(1).isCImm());
1827 return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
1828}
1829
1830// Return type of the instruction result from spv_assign_type intrinsic.
1831// TODO: maybe unify with prelegalizer pass.
1833 MachineInstr *NextMI = MI->getNextNode();
1834 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
1835 NextMI = NextMI->getNextNode();
1836 Register ValueReg = MI->getOperand(0).getReg();
1837 if (!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) ||
1838 NextMI->getOperand(1).getReg() != ValueReg)
1839 return nullptr;
1840 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
1841 assert(Ty && "Type is expected");
1842 return Ty;
1843}
1844
1845static const Type *getBlockStructType(Register ParamReg,
1847 // In principle, this information should be passed to us from Clang via
1848 // an elementtype attribute. However, said attribute requires that
1849 // the function call be an intrinsic, which is not. Instead, we rely on being
1850 // able to trace this to the declaration of a variable: OpenCL C specification
1851 // section 6.12.5 should guarantee that we can do this.
1852 MachineInstr *MI = getBlockStructInstr(ParamReg, MRI);
1853 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
1854 return MI->getOperand(1).getGlobal()->getType();
1855 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
1856 "Blocks in OpenCL C must be traceable to allocation site");
1857 return getMachineInstrType(MI);
1858}
1859
1860// TODO: maybe move to the global register.
1861static SPIRVType *
1863 SPIRVGlobalRegistry *GR) {
1864 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
1865 Type *OpaqueType = StructType::getTypeByName(Context, "spirv.DeviceEvent");
1866 if (!OpaqueType)
1867 OpaqueType = StructType::getTypeByName(Context, "opencl.clk_event_t");
1868 if (!OpaqueType)
1869 OpaqueType = StructType::create(Context, "spirv.DeviceEvent");
1870 unsigned SC0 = storageClassToAddressSpace(SPIRV::StorageClass::Function);
1871 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
1872 Type *PtrType = PointerType::get(PointerType::get(OpaqueType, SC0), SC1);
1873 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder);
1874}
1875
1877 MachineIRBuilder &MIRBuilder,
1878 SPIRVGlobalRegistry *GR) {
1879 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1880 const DataLayout &DL = MIRBuilder.getDataLayout();
1881 bool IsSpirvOp = Call->isSpirvOp();
1882 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
1883 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1884
1885 // Make vararg instructions before OpEnqueueKernel.
1886 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
1887 // local size operands as an array, so we need to unpack them.
1888 SmallVector<Register, 16> LocalSizes;
1889 if (Call->Builtin->Name.find("_varargs") != StringRef::npos || IsSpirvOp) {
1890 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
1891 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
1892 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
1893 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
1894 GepMI->getOperand(3).isReg());
1895 Register ArrayReg = GepMI->getOperand(3).getReg();
1896 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
1897 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
1898 assert(LocalSizeTy && "Local size type is expected");
1899 const uint64_t LocalSizeNum =
1900 cast<ArrayType>(LocalSizeTy)->getNumElements();
1901 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
1902 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
1903 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
1904 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
1905 for (unsigned I = 0; I < LocalSizeNum; ++I) {
1906 Register Reg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1907 MRI->setType(Reg, LLType);
1908 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
1909 auto GEPInst = MIRBuilder.buildIntrinsic(
1910 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
1911 GEPInst
1912 .addImm(GepMI->getOperand(2).getImm()) // In bound.
1913 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
1914 .addUse(buildConstantIntReg(0, MIRBuilder, GR)) // Indices.
1915 .addUse(buildConstantIntReg(I, MIRBuilder, GR));
1916 LocalSizes.push_back(Reg);
1917 }
1918 }
1919
1920 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
1921 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
1922 .addDef(Call->ReturnRegister)
1924
1925 // Copy all arguments before block invoke function pointer.
1926 const unsigned BlockFIdx = HasEvents ? 6 : 3;
1927 for (unsigned i = 0; i < BlockFIdx; i++)
1928 MIB.addUse(Call->Arguments[i]);
1929
1930 // If there are no event arguments in the original call, add dummy ones.
1931 if (!HasEvents) {
1932 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Dummy num events.
1933 Register NullPtr = GR->getOrCreateConstNullPtr(
1934 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
1935 MIB.addUse(NullPtr); // Dummy wait events.
1936 MIB.addUse(NullPtr); // Dummy ret event.
1937 }
1938
1939 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
1940 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
1941 // Invoke: Pointer to invoke function.
1942 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
1943
1944 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
1945 // Param: Pointer to block literal.
1946 MIB.addUse(BlockLiteralReg);
1947
1948 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
1949 // TODO: these numbers should be obtained from block literal structure.
1950 // Param Size: Size of block literal structure.
1951 MIB.addUse(buildConstantIntReg(DL.getTypeStoreSize(PType), MIRBuilder, GR));
1952 // Param Aligment: Aligment of block literal structure.
1953 MIB.addUse(
1954 buildConstantIntReg(DL.getPrefTypeAlign(PType).value(), MIRBuilder, GR));
1955
1956 for (unsigned i = 0; i < LocalSizes.size(); i++)
1957 MIB.addUse(LocalSizes[i]);
1958 return true;
1959}
1960
1962 MachineIRBuilder &MIRBuilder,
1963 SPIRVGlobalRegistry *GR) {
1964 // Lookup the instruction opcode in the TableGen records.
1965 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1966 unsigned Opcode =
1967 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1968
1969 switch (Opcode) {
1970 case SPIRV::OpRetainEvent:
1971 case SPIRV::OpReleaseEvent:
1972 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1973 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
1974 case SPIRV::OpCreateUserEvent:
1975 case SPIRV::OpGetDefaultQueue:
1976 return MIRBuilder.buildInstr(Opcode)
1977 .addDef(Call->ReturnRegister)
1978 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1979 case SPIRV::OpIsValidEvent:
1980 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1981 return MIRBuilder.buildInstr(Opcode)
1982 .addDef(Call->ReturnRegister)
1983 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1984 .addUse(Call->Arguments[0]);
1985 case SPIRV::OpSetUserEventStatus:
1986 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1987 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1988 return MIRBuilder.buildInstr(Opcode)
1989 .addUse(Call->Arguments[0])
1990 .addUse(Call->Arguments[1]);
1991 case SPIRV::OpCaptureEventProfilingInfo:
1992 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1993 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1994 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1995 return MIRBuilder.buildInstr(Opcode)
1996 .addUse(Call->Arguments[0])
1997 .addUse(Call->Arguments[1])
1998 .addUse(Call->Arguments[2]);
1999 case SPIRV::OpBuildNDRange:
2000 return buildNDRange(Call, MIRBuilder, GR);
2001 case SPIRV::OpEnqueueKernel:
2002 return buildEnqueueKernel(Call, MIRBuilder, GR);
2003 default:
2004 return false;
2005 }
2006}
2007
2009 MachineIRBuilder &MIRBuilder,
2010 SPIRVGlobalRegistry *GR) {
2011 // Lookup the instruction opcode in the TableGen records.
2012 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2013 unsigned Opcode =
2014 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2015
2016 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2017 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2018 if (Call->isSpirvOp())
2019 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2020 IsSet ? TypeReg : Register(0));
2021
2022 auto Scope = buildConstantIntReg(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2023
2024 switch (Opcode) {
2025 case SPIRV::OpGroupAsyncCopy:
2026 return MIRBuilder.buildInstr(Opcode)
2027 .addDef(Call->ReturnRegister)
2028 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2029 .addUse(Scope)
2030 .addUse(Call->Arguments[0])
2031 .addUse(Call->Arguments[1])
2032 .addUse(Call->Arguments[2])
2033 .addUse(buildConstantIntReg(1, MIRBuilder, GR))
2034 .addUse(Call->Arguments[3]);
2035 case SPIRV::OpGroupWaitEvents:
2036 return MIRBuilder.buildInstr(Opcode)
2037 .addUse(Scope)
2038 .addUse(Call->Arguments[0])
2039 .addUse(Call->Arguments[1]);
2040 default:
2041 return false;
2042 }
2043}
2044
2045static bool generateConvertInst(const StringRef DemangledCall,
2046 const SPIRV::IncomingCall *Call,
2047 MachineIRBuilder &MIRBuilder,
2048 SPIRVGlobalRegistry *GR) {
2049 // Lookup the conversion builtin in the TableGen records.
2050 const SPIRV::ConvertBuiltin *Builtin =
2051 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2052
2053 if (Builtin->IsSaturated)
2054 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2055 SPIRV::Decoration::SaturatedConversion, {});
2056 if (Builtin->IsRounded)
2057 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2058 SPIRV::Decoration::FPRoundingMode,
2059 {(unsigned)Builtin->RoundingMode});
2060
2061 std::string NeedExtMsg; // no errors if empty
2062 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2063 unsigned Opcode = SPIRV::OpNop;
2064 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2065 // Int -> ...
2066 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2067 // Int -> Int
2068 if (Builtin->IsSaturated)
2069 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2070 : SPIRV::OpSatConvertSToU;
2071 else
2072 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2073 : SPIRV::OpSConvert;
2074 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2075 SPIRV::OpTypeFloat)) {
2076 // Int -> Float
2077 if (Builtin->IsBfloat16) {
2078 const auto *ST = static_cast<const SPIRVSubtarget *>(
2079 &MIRBuilder.getMF().getSubtarget());
2080 if (!ST->canUseExtension(
2081 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2082 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2083 IsRightComponentsNumber =
2084 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2085 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2086 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2087 } else {
2088 bool IsSourceSigned =
2089 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2090 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2091 }
2092 }
2093 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2094 SPIRV::OpTypeFloat)) {
2095 // Float -> ...
2096 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2097 // Float -> Int
2098 if (Builtin->IsBfloat16) {
2099 const auto *ST = static_cast<const SPIRVSubtarget *>(
2100 &MIRBuilder.getMF().getSubtarget());
2101 if (!ST->canUseExtension(
2102 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2103 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2104 IsRightComponentsNumber =
2105 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2106 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2107 Opcode = SPIRV::OpConvertFToBF16INTEL;
2108 } else {
2109 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2110 : SPIRV::OpConvertFToU;
2111 }
2112 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2113 SPIRV::OpTypeFloat)) {
2114 // Float -> Float
2115 Opcode = SPIRV::OpFConvert;
2116 }
2117 }
2118
2119 if (!NeedExtMsg.empty()) {
2120 std::string DiagMsg = std::string(Builtin->Name) +
2121 ": the builtin requires the following SPIR-V "
2122 "extension: " +
2123 NeedExtMsg;
2124 report_fatal_error(DiagMsg.c_str(), false);
2125 }
2126 if (!IsRightComponentsNumber) {
2127 std::string DiagMsg =
2128 std::string(Builtin->Name) +
2129 ": result and argument must have the same number of components";
2130 report_fatal_error(DiagMsg.c_str(), false);
2131 }
2132 assert(Opcode != SPIRV::OpNop &&
2133 "Conversion between the types not implemented!");
2134
2135 MIRBuilder.buildInstr(Opcode)
2136 .addDef(Call->ReturnRegister)
2137 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2138 .addUse(Call->Arguments[0]);
2139 return true;
2140}
2141
2143 MachineIRBuilder &MIRBuilder,
2144 SPIRVGlobalRegistry *GR) {
2145 // Lookup the vector load/store builtin in the TableGen records.
2146 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2147 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2148 Call->Builtin->Set);
2149 // Build extended instruction.
2150 auto MIB =
2151 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2152 .addDef(Call->ReturnRegister)
2153 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2154 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2155 .addImm(Builtin->Number);
2156 for (auto Argument : Call->Arguments)
2157 MIB.addUse(Argument);
2158 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2159 MIB.addImm(Builtin->ElementCount);
2160
2161 // Rounding mode should be passed as a last argument in the MI for builtins
2162 // like "vstorea_halfn_r".
2163 if (Builtin->IsRounded)
2164 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2165 return true;
2166}
2167
2169 MachineIRBuilder &MIRBuilder,
2170 SPIRVGlobalRegistry *GR) {
2171 // Lookup the instruction opcode in the TableGen records.
2172 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2173 unsigned Opcode =
2174 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2175 bool IsLoad = Opcode == SPIRV::OpLoad;
2176 // Build the instruction.
2177 auto MIB = MIRBuilder.buildInstr(Opcode);
2178 if (IsLoad) {
2179 MIB.addDef(Call->ReturnRegister);
2180 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2181 }
2182 // Add a pointer to the value to load/store.
2183 MIB.addUse(Call->Arguments[0]);
2184 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2185 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2186 // Add a value to store.
2187 if (!IsLoad) {
2188 MIB.addUse(Call->Arguments[1]);
2189 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2190 }
2191 // Add optional memory attributes and an alignment.
2192 unsigned NumArgs = Call->Arguments.size();
2193 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3) {
2194 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2195 MRI->setRegClass(Call->Arguments[IsLoad ? 1 : 2], &SPIRV::IDRegClass);
2196 }
2197 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4) {
2198 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2199 MRI->setRegClass(Call->Arguments[IsLoad ? 2 : 3], &SPIRV::IDRegClass);
2200 }
2201 return true;
2202}
2203
2204/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
2205/// and external instruction \p Set.
2206namespace SPIRV {
2207std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2208 SPIRV::InstructionSet::InstructionSet Set,
2209 MachineIRBuilder &MIRBuilder,
2210 const Register OrigRet, const Type *OrigRetTy,
2211 const SmallVectorImpl<Register> &Args,
2212 SPIRVGlobalRegistry *GR) {
2213 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2214
2215 // SPIR-V type and return register.
2216 Register ReturnRegister = OrigRet;
2217 SPIRVType *ReturnType = nullptr;
2218 if (OrigRetTy && !OrigRetTy->isVoidTy()) {
2219 ReturnType = GR->assignTypeToVReg(OrigRetTy, OrigRet, MIRBuilder);
2220 if (!MIRBuilder.getMRI()->getRegClassOrNull(ReturnRegister))
2221 MIRBuilder.getMRI()->setRegClass(ReturnRegister, &SPIRV::IDRegClass);
2222 } else if (OrigRetTy && OrigRetTy->isVoidTy()) {
2223 ReturnRegister = MIRBuilder.getMRI()->createVirtualRegister(&IDRegClass);
2224 MIRBuilder.getMRI()->setType(ReturnRegister, LLT::scalar(32));
2225 ReturnType = GR->assignTypeToVReg(OrigRetTy, ReturnRegister, MIRBuilder);
2226 }
2227
2228 // Lookup the builtin in the TableGen records.
2229 std::unique_ptr<const IncomingCall> Call =
2230 lookupBuiltin(DemangledCall, Set, ReturnRegister, ReturnType, Args);
2231
2232 if (!Call) {
2233 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2234 return std::nullopt;
2235 }
2236
2237 // TODO: check if the provided args meet the builtin requirments.
2238 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2239 "Too few arguments to generate the builtin");
2240 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2241 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2242
2243 // Match the builtin with implementation based on the grouping.
2244 switch (Call->Builtin->Group) {
2245 case SPIRV::Extended:
2246 return generateExtInst(Call.get(), MIRBuilder, GR);
2247 case SPIRV::Relational:
2248 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2249 case SPIRV::Group:
2250 return generateGroupInst(Call.get(), MIRBuilder, GR);
2251 case SPIRV::Variable:
2252 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2253 case SPIRV::Atomic:
2254 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2255 case SPIRV::AtomicFloating:
2256 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2257 case SPIRV::Barrier:
2258 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2259 case SPIRV::Dot:
2260 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
2261 case SPIRV::Wave:
2262 return generateWaveInst(Call.get(), MIRBuilder, GR);
2263 case SPIRV::GetQuery:
2264 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2265 case SPIRV::ImageSizeQuery:
2266 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2267 case SPIRV::ImageMiscQuery:
2268 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2269 case SPIRV::ReadImage:
2270 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2271 case SPIRV::WriteImage:
2272 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2273 case SPIRV::SampleImage:
2274 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2275 case SPIRV::Select:
2276 return generateSelectInst(Call.get(), MIRBuilder);
2277 case SPIRV::SpecConstant:
2278 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2279 case SPIRV::Enqueue:
2280 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2281 case SPIRV::AsyncCopy:
2282 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2283 case SPIRV::Convert:
2284 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2285 case SPIRV::VectorLoadStore:
2286 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2287 case SPIRV::LoadStore:
2288 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2289 case SPIRV::IntelSubgroups:
2290 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2291 case SPIRV::GroupUniform:
2292 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2293 }
2294 return false;
2295}
2296
2298 unsigned ArgIdx, LLVMContext &Ctx) {
2299 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
2300 StringRef BuiltinArgs =
2301 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
2302 BuiltinArgs.split(BuiltinArgsTypeStrs, ',', -1, false);
2303 if (ArgIdx >= BuiltinArgsTypeStrs.size())
2304 return nullptr;
2305 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
2306
2307 // Parse strings representing OpenCL builtin types.
2308 if (hasBuiltinTypePrefix(TypeStr)) {
2309 // OpenCL builtin types in demangled call strings have the following format:
2310 // e.g. ocl_image2d_ro
2311 bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
2312 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
2313
2314 // Check if this is pointer to a builtin type and not just pointer
2315 // representing a builtin type. In case it is a pointer to builtin type,
2316 // this will require additional handling in the method calling
2317 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
2318 // base types.
2319 if (TypeStr.ends_with("*"))
2320 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
2321
2322 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
2323 Ctx);
2324 }
2325
2326 // Parse type name in either "typeN" or "type vector[N]" format, where
2327 // N is the number of elements of the vector.
2328 Type *BaseType;
2329 unsigned VecElts = 0;
2330
2331 BaseType = parseBasicTypeName(TypeStr, Ctx);
2332 if (!BaseType)
2333 // Unable to recognize SPIRV type name.
2334 return nullptr;
2335
2336 if (BaseType->isVoidTy())
2338
2339 // Handle "typeN*" or "type vector[N]*".
2340 TypeStr.consume_back("*");
2341
2342 if (TypeStr.consume_front(" vector["))
2343 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
2344
2345 TypeStr.getAsInteger(10, VecElts);
2346 if (VecElts > 0)
2347 BaseType = VectorType::get(BaseType, VecElts, false);
2348
2349 return BaseType;
2350}
2351
2355};
2356
2357#define GET_BuiltinTypes_DECL
2358#define GET_BuiltinTypes_IMPL
2359
2363};
2364
2365#define GET_OpenCLTypes_DECL
2366#define GET_OpenCLTypes_IMPL
2367
2368#include "SPIRVGenTables.inc"
2369} // namespace SPIRV
2370
2371//===----------------------------------------------------------------------===//
2372// Misc functions for parsing builtin types.
2373//===----------------------------------------------------------------------===//
2374
2376 if (Name.starts_with("void"))
2377 return Type::getVoidTy(Context);
2378 else if (Name.starts_with("int") || Name.starts_with("uint"))
2379 return Type::getInt32Ty(Context);
2380 else if (Name.starts_with("float"))
2381 return Type::getFloatTy(Context);
2382 else if (Name.starts_with("half"))
2383 return Type::getHalfTy(Context);
2384 report_fatal_error("Unable to recognize type!");
2385}
2386
2387//===----------------------------------------------------------------------===//
2388// Implementation functions for builtin types.
2389//===----------------------------------------------------------------------===//
2390
2392 const SPIRV::BuiltinType *TypeRecord,
2393 MachineIRBuilder &MIRBuilder,
2394 SPIRVGlobalRegistry *GR) {
2395 unsigned Opcode = TypeRecord->Opcode;
2396 // Create or get an existing type from GlobalRegistry.
2397 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
2398}
2399
2401 SPIRVGlobalRegistry *GR) {
2402 // Create or get an existing type from GlobalRegistry.
2403 return GR->getOrCreateOpTypeSampler(MIRBuilder);
2404}
2405
2406static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
2407 MachineIRBuilder &MIRBuilder,
2408 SPIRVGlobalRegistry *GR) {
2409 assert(ExtensionType->getNumIntParameters() == 1 &&
2410 "Invalid number of parameters for SPIR-V pipe builtin!");
2411 // Create or get an existing type from GlobalRegistry.
2412 return GR->getOrCreateOpTypePipe(MIRBuilder,
2413 SPIRV::AccessQualifier::AccessQualifier(
2414 ExtensionType->getIntParameter(0)));
2415}
2416
2417static SPIRVType *
2418getImageType(const TargetExtType *ExtensionType,
2419 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2420 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
2421 assert(ExtensionType->getNumTypeParameters() == 1 &&
2422 "SPIR-V image builtin type must have sampled type parameter!");
2423 const SPIRVType *SampledType =
2424 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2425 assert(ExtensionType->getNumIntParameters() == 7 &&
2426 "Invalid number of parameters for SPIR-V image builtin!");
2427 // Create or get an existing type from GlobalRegistry.
2428 return GR->getOrCreateOpTypeImage(
2429 MIRBuilder, SampledType,
2430 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2431 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2432 ExtensionType->getIntParameter(3), ExtensionType->getIntParameter(4),
2433 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2434 Qualifier == SPIRV::AccessQualifier::WriteOnly
2435 ? SPIRV::AccessQualifier::WriteOnly
2436 : SPIRV::AccessQualifier::AccessQualifier(
2437 ExtensionType->getIntParameter(6)));
2438}
2439
2441 MachineIRBuilder &MIRBuilder,
2442 SPIRVGlobalRegistry *GR) {
2443 SPIRVType *OpaqueImageType = getImageType(
2444 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2445 // Create or get an existing type from GlobalRegistry.
2446 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
2447}
2448
2449namespace SPIRV {
2451 LLVMContext &Context) {
2452 StringRef NameWithParameters = TypeName;
2453
2454 // Pointers-to-opaque-structs representing OpenCL types are first translated
2455 // to equivalent SPIR-V types. OpenCL builtin type names should have the
2456 // following format: e.g. %opencl.event_t
2457 if (NameWithParameters.starts_with("opencl.")) {
2458 const SPIRV::OpenCLType *OCLTypeRecord =
2459 SPIRV::lookupOpenCLType(NameWithParameters);
2460 if (!OCLTypeRecord)
2461 report_fatal_error("Missing TableGen record for OpenCL type: " +
2462 NameWithParameters);
2463 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
2464 // Continue with the SPIR-V builtin type...
2465 }
2466
2467 // Names of the opaque structs representing a SPIR-V builtins without
2468 // parameters should have the following format: e.g. %spirv.Event
2469 assert(NameWithParameters.starts_with("spirv.") &&
2470 "Unknown builtin opaque type!");
2471
2472 // Parameterized SPIR-V builtins names follow this format:
2473 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
2474 if (!NameWithParameters.contains('_'))
2475 return TargetExtType::get(Context, NameWithParameters);
2476
2477 SmallVector<StringRef> Parameters;
2478 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
2479 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
2480
2481 SmallVector<Type *, 1> TypeParameters;
2482 bool HasTypeParameter = !isDigit(Parameters[0][0]);
2483 if (HasTypeParameter)
2484 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
2485 SmallVector<unsigned> IntParameters;
2486 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2487 unsigned IntParameter = 0;
2488 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2489 (void)ValidLiteral;
2490 assert(ValidLiteral &&
2491 "Invalid format of SPIR-V builtin parameter literal!");
2492 IntParameters.push_back(IntParameter);
2493 }
2495 NameWithParameters.substr(0, BaseNameLength),
2496 TypeParameters, IntParameters);
2497}
2498
2500 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2501 MachineIRBuilder &MIRBuilder,
2502 SPIRVGlobalRegistry *GR) {
2503 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
2504 // target(...) target extension types or pointers-to-opaque-structs. The
2505 // approach relying on structs is deprecated and works only in the non-opaque
2506 // pointer mode (-opaque-pointers=0).
2507 // In order to maintain compatibility with LLVM IR generated by older versions
2508 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
2509 // "translated" to target extension types. This translation is temporary and
2510 // will be removed in the future release of LLVM.
2511 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
2512 if (!BuiltinType)
2514 OpaqueType->getStructName().str(), MIRBuilder.getContext());
2515
2516 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
2517
2518 const StringRef Name = BuiltinType->getName();
2519 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
2520
2521 // Lookup the demangled builtin type in the TableGen records.
2522 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2523 if (!TypeRecord)
2524 report_fatal_error("Missing TableGen record for builtin type: " + Name);
2525
2526 // "Lower" the BuiltinType into TargetType. The following get<...>Type methods
2527 // use the implementation details from TableGen records or TargetExtType
2528 // parameters to either create a new OpType<...> machine instruction or get an
2529 // existing equivalent SPIRVType from GlobalRegistry.
2530 SPIRVType *TargetType;
2531 switch (TypeRecord->Opcode) {
2532 case SPIRV::OpTypeImage:
2533 TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR);
2534 break;
2535 case SPIRV::OpTypePipe:
2536 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
2537 break;
2538 case SPIRV::OpTypeDeviceEvent:
2539 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
2540 break;
2541 case SPIRV::OpTypeSampler:
2542 TargetType = getSamplerType(MIRBuilder, GR);
2543 break;
2544 case SPIRV::OpTypeSampledImage:
2545 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
2546 break;
2547 default:
2548 TargetType =
2549 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
2550 break;
2551 }
2552
2553 // Emit OpName instruction if a new OpType<...> instruction was added
2554 // (equivalent type was not found in GlobalRegistry).
2555 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
2556 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
2557
2558 return TargetType;
2559}
2560} // namespace SPIRV
2561} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
return RetTy
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
IntegerType * Int32Ty
LLVMContext & Context
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
APInt bitcastToAPInt() const
Definition: APFloat.h:1210
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:957
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1491
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:1018
@ ICMP_EQ
equal
Definition: InstrTypes.h:1014
const APFloat & getValueAPF() const
Definition: Constants.h:311
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:145
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Tagged union holding either a T or a Error.
Definition: Error.h:474
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:356
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:558
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:568
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, MachineFunction &MF)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * assignTypeToVReg(const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntArray(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr, bool EmitIR=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:686
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition: StringRef.h:641
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:456
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:557
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:257
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:422
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:670
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:410
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:621
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition: StringRef.h:363
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:283
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:266
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:216
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
Definition: Type.cpp:632
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:513
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:720
unsigned getNumIntParameters() const
Definition: DerivedTypes.h:765
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types=std::nullopt, ArrayRef< unsigned > Ints=std::nullopt)
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:796
Type * getTypeParameter(unsigned i) const
Definition: DerivedTypes.h:755
unsigned getNumTypeParameters() const
Definition: DerivedTypes.h:756
unsigned getIntParameter(unsigned i) const
Definition: DerivedTypes.h:764
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
LLVM Value Representation.
Definition: Value.h:74
Value(Type *Ty, unsigned scid)
Definition: Value.cpp:53
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:316
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition: Core.cpp:864
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
StorageClass
Definition: XCOFF.h:170
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:100
unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:138
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildConstantIntReg(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, unsigned BitWidth=32)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, bool hasLinkageTy=true)
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:80
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:241
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:190
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
Definition: SPIRVUtils.cpp:372
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg=Register(0))
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:226
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool hasBuiltinTypePrefix(StringRef Name)
Definition: SPIRVUtils.cpp:344
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:253
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:247
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:249
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
BuiltIn::BuiltIn Value
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode