LLVM 23.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 StringRef SpvPrefix = "__spv::";
190 std::string BuiltinName = DemangledCall.str();
191
192 // Check if the extracted name contains type information between angle
193 // brackets. If so, the builtin is an instantiated template - needs to have
194 // the information after angle brackets and return type removed.
195 std::size_t Pos = BuiltinName.find(">(");
196 if (Pos != std::string::npos) {
197 BuiltinName = BuiltinName.substr(0, BuiltinName.rfind('<', Pos));
198 } else {
199 Pos = BuiltinName.find('(');
200 if (Pos != std::string::npos)
201 BuiltinName = BuiltinName.substr(0, Pos);
202 }
203 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
204
205 // Itanium Demangler result may have "(anonymous namespace)::" or "__spv::"
206 // prefix.
207 if (BuiltinName.find(PassPrefix) == 0)
208 BuiltinName = BuiltinName.substr(PassPrefix.size());
209 else if (BuiltinName.find(SpvPrefix) == 0)
210 BuiltinName = BuiltinName.substr(SpvPrefix.size());
211
212 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
213 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
214 BuiltinName = BuiltinName.substr(12);
215
216 // Check if the extracted name begins with:
217 // - "__spirv_ImageSampleExplicitLod"
218 // - "__spirv_ImageRead"
219 // - "__spirv_ImageWrite"
220 // - "__spirv_ImageQuerySizeLod"
221 // - "__spirv_UDotKHR"
222 // - "__spirv_SDotKHR"
223 // - "__spirv_SUDotKHR"
224 // - "__spirv_SDotAccSatKHR"
225 // - "__spirv_UDotAccSatKHR"
226 // - "__spirv_SUDotAccSatKHR"
227 // - "__spirv_ReadClockKHR"
228 // - "__spirv_SubgroupBlockReadINTEL"
229 // - "__spirv_SubgroupImageBlockReadINTEL"
230 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
232 // - "__spirv_Convert"
233 // - "__spirv_Round"
234 // - "__spirv_UConvert"
235 // - "__spirv_SConvert"
236 // - "__spirv_FConvert"
237 // - "__spirv_SatConvert"
238 // and maybe contains return type information at the end "_R<type>".
239 // If so, extract the plain builtin name without the type information.
240 static const std::regex SpvWithR(
241 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
242 "UDotKHR|"
243 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
244 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
245 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
246 "Convert|Round|"
247 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
248 std::smatch Match;
249 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
250 std::ssub_match SubMatch;
251 if (DecorationId && Match.size() > 3) {
252 SubMatch = Match[4];
253 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
254 }
255 SubMatch = Match[1];
256 BuiltinName = SubMatch.str();
257 }
258
259 return BuiltinName;
260}
261} // namespace SPIRV
262
263/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
264/// the provided \p DemangledCall and specified \p Set.
265///
266/// The lookup follows the following algorithm, returning the first successful
267/// match:
268/// 1. Search with the plain demangled name (expecting a 1:1 match).
269/// 2. Search with the prefix before or suffix after the demangled name
270/// signyfying the type of the first argument.
271///
272/// \returns Wrapper around the demangled call and found builtin definition.
273static std::unique_ptr<const SPIRV::IncomingCall>
275 SPIRV::InstructionSet::InstructionSet Set,
276 Register ReturnRegister, const SPIRVType *ReturnType,
278 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
279
280 SmallVector<StringRef, 10> BuiltinArgumentTypes;
281 StringRef BuiltinArgs =
282 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
283 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
284
285 // Look up the builtin in the defined set. Start with the plain demangled
286 // name, expecting a 1:1 match in the defined builtin set.
287 const SPIRV::DemangledBuiltin *Builtin;
288 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
289 return std::make_unique<SPIRV::IncomingCall>(
290 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
291
292 // If the initial look up was unsuccessful and the demangled call takes at
293 // least 1 argument, add a prefix or suffix signifying the type of the first
294 // argument and repeat the search.
295 if (BuiltinArgumentTypes.size() >= 1) {
296 char FirstArgumentType = BuiltinArgumentTypes[0][0];
297 // Prefix to be added to the builtin's name for lookup.
298 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
299 std::string Prefix;
300
301 switch (FirstArgumentType) {
302 // Unsigned:
303 case 'u':
304 if (Set == SPIRV::InstructionSet::OpenCL_std)
305 Prefix = "u_";
306 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
307 Prefix = "u";
308 break;
309 // Signed:
310 case 'c':
311 case 's':
312 case 'i':
313 case 'l':
314 if (Set == SPIRV::InstructionSet::OpenCL_std)
315 Prefix = "s_";
316 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
317 Prefix = "s";
318 break;
319 // Floating-point:
320 case 'f':
321 case 'd':
322 case 'h':
323 if (Set == SPIRV::InstructionSet::OpenCL_std ||
324 Set == SPIRV::InstructionSet::GLSL_std_450)
325 Prefix = "f";
326 break;
327 }
328
329 // If argument-type name prefix was added, look up the builtin again.
330 if (!Prefix.empty() &&
331 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
332 return std::make_unique<SPIRV::IncomingCall>(
333 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
334
335 // If lookup with a prefix failed, find a suffix to be added to the
336 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
337 // an unsigned value has a suffix "u".
338 std::string Suffix;
339
340 switch (FirstArgumentType) {
341 // Unsigned:
342 case 'u':
343 Suffix = "u";
344 break;
345 // Signed:
346 case 'c':
347 case 's':
348 case 'i':
349 case 'l':
350 Suffix = "s";
351 break;
352 // Floating-point:
353 case 'f':
354 case 'd':
355 case 'h':
356 Suffix = "f";
357 break;
358 }
359
360 // If argument-type name suffix was added, look up the builtin again.
361 if (!Suffix.empty() &&
362 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
363 return std::make_unique<SPIRV::IncomingCall>(
364 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
365 }
366
367 // No builtin with such name was found in the set.
368 return nullptr;
369}
370
373 // We expect the following sequence of instructions:
374 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
375 // or = G_GLOBAL_VALUE @block_literal_global
376 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
377 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
378 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
379 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
380 MI->getOperand(1).isReg());
381 Register BitcastReg = MI->getOperand(1).getReg();
382 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
383 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
384 BitcastMI->getOperand(2).isReg());
385 Register ValueReg = BitcastMI->getOperand(2).getReg();
386 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
387 return ValueMI;
388}
389
390// Return an integer constant corresponding to the given register and
391// defined in spv_track_constant.
392// TODO: maybe unify with prelegalizer pass.
394 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
395 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
396 DefMI->getOperand(1).isCImm());
397 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
398}
399
400// Return type of the instruction result from spv_assign_type intrinsic.
401// TODO: maybe unify with prelegalizer pass.
403 MachineInstr *NextMI = MI->getNextNode();
404 if (!NextMI)
405 return nullptr;
406 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
407 if ((NextMI = NextMI->getNextNode()) == nullptr)
408 return nullptr;
409 Register ValueReg = MI->getOperand(0).getReg();
410 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
411 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
412 NextMI->getOperand(1).getReg() != ValueReg)
413 return nullptr;
414 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
415 assert(Ty && "Type is expected");
416 return Ty;
417}
418
419static const Type *getBlockStructType(Register ParamReg,
421 // In principle, this information should be passed to us from Clang via
422 // an elementtype attribute. However, said attribute requires that
423 // the function call be an intrinsic, which is not. Instead, we rely on being
424 // able to trace this to the declaration of a variable: OpenCL C specification
425 // section 6.12.5 should guarantee that we can do this.
427 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
428 return MI->getOperand(1).getGlobal()->getType();
429 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
430 "Blocks in OpenCL C must be traceable to allocation site");
431 return getMachineInstrType(MI);
432}
433
434//===----------------------------------------------------------------------===//
435// Helper functions for building misc instructions
436//===----------------------------------------------------------------------===//
437
438/// Helper function building either a resulting scalar or vector bool register
439/// depending on the expected \p ResultType.
440///
441/// \returns Tuple of the resulting register and its type.
442static std::tuple<Register, SPIRVType *>
443buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
445 LLT Type;
446 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
447
448 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
449 unsigned VectorElements = ResultType->getOperand(2).getImm();
450 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
451 MIRBuilder, true);
454 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
455 } else {
456 Type = LLT::scalar(1);
457 }
458
459 Register ResultRegister =
461 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
462 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
463 return std::make_tuple(ResultRegister, BoolType);
464}
465
466/// Helper function for building either a vector or scalar select instruction
467/// depending on the expected \p ResultType.
468static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
469 Register ReturnRegister, Register SourceRegister,
470 const SPIRVType *ReturnType,
472 Register TrueConst, FalseConst;
473
474 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
475 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
477 TrueConst =
478 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
479 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
480 } else {
481 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
482 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
483 }
484
485 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
486 FalseConst);
487}
488
489/// Helper function for building a load instruction loading into the
490/// \p DestinationReg.
492 MachineIRBuilder &MIRBuilder,
493 SPIRVGlobalRegistry *GR, LLT LowLevelType,
494 Register DestinationReg = Register(0)) {
495 if (!DestinationReg.isValid())
496 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
497 // TODO: consider using correct address space and alignment (p0 is canonical
498 // type for selection though).
500 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
501 return DestinationReg;
502}
503
504/// Helper function for building a load instruction for loading a builtin global
505/// variable of \p BuiltinValue value.
507 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
508 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
509 Register Reg = Register(0), bool isConst = true,
510 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
511 SPIRV::LinkageType::Import}) {
512 Register NewRegister =
513 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
514 MIRBuilder.getMRI()->setType(
515 NewRegister,
516 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
517 GR->getPointerSize()));
519 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
520 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
521
522 // Set up the global OpVariable with the necessary builtin decorations.
523 Register Variable = GR->buildGlobalVariable(
524 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
525 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst, LinkageTy,
526 MIRBuilder, false);
527
528 // Load the value from the global variable.
529 Register LoadedRegister =
530 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
531 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
532 return LoadedRegister;
533}
534
535/// Helper external function for assigning SPIRVType to a register, ensuring the
536/// register class and type are set in MRI. Defined in SPIRVPreLegalizer.cpp.
537extern void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy,
540
541// TODO: Move to TableGen.
542static SPIRV::MemorySemantics::MemorySemantics
543getSPIRVMemSemantics(std::memory_order MemOrder) {
544 switch (MemOrder) {
545 case std::memory_order_relaxed:
546 return SPIRV::MemorySemantics::None;
547 case std::memory_order_acquire:
548 return SPIRV::MemorySemantics::Acquire;
549 case std::memory_order_release:
550 return SPIRV::MemorySemantics::Release;
551 case std::memory_order_acq_rel:
552 return SPIRV::MemorySemantics::AcquireRelease;
553 case std::memory_order_seq_cst:
554 return SPIRV::MemorySemantics::SequentiallyConsistent;
555 default:
556 report_fatal_error("Unknown CL memory scope");
557 }
558}
559
560static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
561 switch (ClScope) {
562 case SPIRV::CLMemoryScope::memory_scope_work_item:
563 return SPIRV::Scope::Invocation;
564 case SPIRV::CLMemoryScope::memory_scope_work_group:
565 return SPIRV::Scope::Workgroup;
566 case SPIRV::CLMemoryScope::memory_scope_device:
567 return SPIRV::Scope::Device;
568 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
569 return SPIRV::Scope::CrossDevice;
570 case SPIRV::CLMemoryScope::memory_scope_sub_group:
571 return SPIRV::Scope::Subgroup;
572 }
573 report_fatal_error("Unknown CL memory scope");
574}
575
577 MachineIRBuilder &MIRBuilder,
579 return GR->buildConstantInt(
580 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
581}
582
583static Register buildScopeReg(Register CLScopeRegister,
584 SPIRV::Scope::Scope Scope,
585 MachineIRBuilder &MIRBuilder,
588 if (CLScopeRegister.isValid()) {
589 auto CLScope =
590 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
591 Scope = getSPIRVScope(CLScope);
592
593 if (CLScope == static_cast<unsigned>(Scope)) {
594 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
595 return CLScopeRegister;
596 }
597 }
598 return buildConstantIntReg32(Scope, MIRBuilder, GR);
599}
600
603 if (MRI->getRegClassOrNull(Reg))
604 return;
605 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
606 MRI->setRegClass(Reg,
607 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
608}
609
610static Register buildMemSemanticsReg(Register SemanticsRegister,
611 Register PtrRegister, unsigned &Semantics,
612 MachineIRBuilder &MIRBuilder,
614 if (SemanticsRegister.isValid()) {
615 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
616 std::memory_order Order =
617 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
618 Semantics =
619 getSPIRVMemSemantics(Order) |
621 if (static_cast<unsigned>(Order) == Semantics) {
622 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
623 return SemanticsRegister;
624 }
625 }
626 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
627}
628
629static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
631 Register TypeReg,
632 ArrayRef<uint32_t> ImmArgs = {}) {
633 auto MIB = MIRBuilder.buildInstr(Opcode);
634 if (TypeReg.isValid())
635 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
636 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
637 for (unsigned i = 0; i < Sz; ++i)
638 MIB.addUse(Call->Arguments[i]);
639 for (uint32_t ImmArg : ImmArgs)
640 MIB.addImm(ImmArg);
641 return true;
642}
643
644/// Helper function for translating atomic init to OpStore.
646 MachineIRBuilder &MIRBuilder) {
647 if (Call->isSpirvOp())
648 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
649
650 assert(Call->Arguments.size() == 2 &&
651 "Need 2 arguments for atomic init translation");
652 MIRBuilder.buildInstr(SPIRV::OpStore)
653 .addUse(Call->Arguments[0])
654 .addUse(Call->Arguments[1]);
655 return true;
656}
657
658/// Helper function for building an atomic load instruction.
660 MachineIRBuilder &MIRBuilder,
662 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
663 if (Call->isSpirvOp())
664 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
665
666 Register PtrRegister = Call->Arguments[0];
667 // TODO: if true insert call to __translate_ocl_memory_sccope before
668 // OpAtomicLoad and the function implementation. We can use Translator's
669 // output for transcoding/atomic_explicit_arguments.cl as an example.
670 Register ScopeRegister =
671 Call->Arguments.size() > 1
672 ? Call->Arguments[1]
673 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
674 Register MemSemanticsReg;
675 if (Call->Arguments.size() > 2) {
676 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
677 MemSemanticsReg = Call->Arguments[2];
678 } else {
679 int Semantics =
680 SPIRV::MemorySemantics::SequentiallyConsistent |
682 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
683 }
684
685 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
686 .addDef(Call->ReturnRegister)
687 .addUse(TypeReg)
688 .addUse(PtrRegister)
689 .addUse(ScopeRegister)
690 .addUse(MemSemanticsReg);
691 return true;
692}
693
694/// Helper function for building an atomic store instruction.
696 MachineIRBuilder &MIRBuilder,
698 if (Call->isSpirvOp())
699 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
700 Register(0));
701
702 Register ScopeRegister =
703 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
704 Register PtrRegister = Call->Arguments[0];
705 int Semantics =
706 SPIRV::MemorySemantics::SequentiallyConsistent |
708 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
709 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
710 .addUse(PtrRegister)
711 .addUse(ScopeRegister)
712 .addUse(MemSemanticsReg)
713 .addUse(Call->Arguments[1]);
714 return true;
715}
716
717/// Helper function for building an atomic compare-exchange instruction.
719 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
720 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
721 if (Call->isSpirvOp())
722 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
723 GR->getSPIRVTypeID(Call->ReturnType));
724
725 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
726 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
727
728 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
729 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
730 Register Desired = Call->Arguments[2]; // Value (C Desired).
731 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
732 LLT DesiredLLT = MRI->getType(Desired);
733
734 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
735 SPIRV::OpTypePointer);
736 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
737 (void)ExpectedType;
738 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
739 : ExpectedType == SPIRV::OpTypePointer);
740 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
741
742 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
743 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
744 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
745 SpvObjectPtrTy->getOperand(1).getImm());
746 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
747
748 Register MemSemEqualReg;
749 Register MemSemUnequalReg;
750 uint64_t MemSemEqual =
751 IsCmpxchg
752 ? SPIRV::MemorySemantics::None
753 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
754 uint64_t MemSemUnequal =
755 IsCmpxchg
756 ? SPIRV::MemorySemantics::None
757 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
758 if (Call->Arguments.size() >= 4) {
759 assert(Call->Arguments.size() >= 5 &&
760 "Need 5+ args for explicit atomic cmpxchg");
761 auto MemOrdEq =
762 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
763 auto MemOrdNeq =
764 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
765 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
766 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
767 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
768 MemSemEqualReg = Call->Arguments[3];
769 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
770 MemSemUnequalReg = Call->Arguments[4];
771 }
772 if (!MemSemEqualReg.isValid())
773 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
774 if (!MemSemUnequalReg.isValid())
775 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
776
777 Register ScopeReg;
778 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
779 if (Call->Arguments.size() >= 6) {
780 assert(Call->Arguments.size() == 6 &&
781 "Extra args for explicit atomic cmpxchg");
782 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
783 getIConstVal(Call->Arguments[5], MRI));
784 Scope = getSPIRVScope(ClScope);
785 if (ClScope == static_cast<unsigned>(Scope))
786 ScopeReg = Call->Arguments[5];
787 }
788 if (!ScopeReg.isValid())
789 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
790
791 Register Expected = IsCmpxchg
792 ? ExpectedArg
793 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
794 GR, LLT::scalar(64));
795 MRI->setType(Expected, DesiredLLT);
796 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
797 : Call->ReturnRegister;
798 if (!MRI->getRegClassOrNull(Tmp))
799 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
800 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
801
802 MIRBuilder.buildInstr(Opcode)
803 .addDef(Tmp)
804 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
805 .addUse(ObjectPtr)
806 .addUse(ScopeReg)
807 .addUse(MemSemEqualReg)
808 .addUse(MemSemUnequalReg)
809 .addUse(Desired)
811 if (!IsCmpxchg) {
812 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
813 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
814 }
815 return true;
816}
817
818/// Helper function for building atomic instructions.
819static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
820 MachineIRBuilder &MIRBuilder,
822 if (Call->isSpirvOp())
823 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
824 GR->getSPIRVTypeID(Call->ReturnType));
825
826 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
827 Register ScopeRegister =
828 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
829
830 assert(Call->Arguments.size() <= 4 &&
831 "Too many args for explicit atomic RMW");
832 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
833 MIRBuilder, GR, MRI);
834
835 Register PtrRegister = Call->Arguments[0];
836 unsigned Semantics = SPIRV::MemorySemantics::None;
837 Register MemSemanticsReg =
838 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
839 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
840 Semantics, MIRBuilder, GR);
841 Register ValueReg = Call->Arguments[1];
842 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
843 // support cl_ext_float_atomics
844 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
845 if (Opcode == SPIRV::OpAtomicIAdd) {
846 Opcode = SPIRV::OpAtomicFAddEXT;
847 } else if (Opcode == SPIRV::OpAtomicISub) {
848 // Translate OpAtomicISub applied to a floating type argument to
849 // OpAtomicFAddEXT with the negative value operand
850 Opcode = SPIRV::OpAtomicFAddEXT;
851 Register NegValueReg =
852 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
853 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
854 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
855 MIRBuilder.getMF());
856 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
857 .addDef(NegValueReg)
858 .addUse(ValueReg);
859 updateRegType(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
860 MIRBuilder.getMF().getRegInfo());
861 ValueReg = NegValueReg;
862 }
863 }
864 MIRBuilder.buildInstr(Opcode)
865 .addDef(Call->ReturnRegister)
866 .addUse(ValueTypeReg)
867 .addUse(PtrRegister)
868 .addUse(ScopeRegister)
869 .addUse(MemSemanticsReg)
870 .addUse(ValueReg);
871 return true;
872}
873
874/// Helper function for building an atomic floating-type instruction.
876 unsigned Opcode,
877 MachineIRBuilder &MIRBuilder,
879 assert(Call->Arguments.size() == 4 &&
880 "Wrong number of atomic floating-type builtin");
881 Register PtrReg = Call->Arguments[0];
882 Register ScopeReg = Call->Arguments[1];
883 Register MemSemanticsReg = Call->Arguments[2];
884 Register ValueReg = Call->Arguments[3];
885 MIRBuilder.buildInstr(Opcode)
886 .addDef(Call->ReturnRegister)
887 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
888 .addUse(PtrReg)
889 .addUse(ScopeReg)
890 .addUse(MemSemanticsReg)
891 .addUse(ValueReg);
892 return true;
893}
894
895/// Helper function for building atomic flag instructions (e.g.
896/// OpAtomicFlagTestAndSet).
898 unsigned Opcode, MachineIRBuilder &MIRBuilder,
900 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
901 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
902 if (Call->isSpirvOp())
903 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
904 IsSet ? TypeReg : Register(0));
905
906 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
907 Register PtrRegister = Call->Arguments[0];
908 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
909 Register MemSemanticsReg =
910 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
911 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
912 Semantics, MIRBuilder, GR);
913
914 assert((Opcode != SPIRV::OpAtomicFlagClear ||
915 (Semantics != SPIRV::MemorySemantics::Acquire &&
916 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
917 "Invalid memory order argument!");
918
919 Register ScopeRegister =
920 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
921 ScopeRegister =
922 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
923
924 auto MIB = MIRBuilder.buildInstr(Opcode);
925 if (IsSet)
926 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
927
928 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
929 return true;
930}
931
932/// Helper function for building barriers, i.e., memory/control ordering
933/// operations.
934static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
935 MachineIRBuilder &MIRBuilder,
937 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
938 const auto *ST =
939 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
940 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
941 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
942 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
943 std::string DiagMsg = std::string(Builtin->Name) +
944 ": the builtin requires the following SPIR-V "
945 "extension: SPV_INTEL_split_barrier";
946 report_fatal_error(DiagMsg.c_str(), false);
947 }
948
949 if (Call->isSpirvOp())
950 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
951
952 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
953 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
954 unsigned MemSemantics = SPIRV::MemorySemantics::None;
955
956 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
957 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
958
959 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
960 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
961
962 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
963 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
964
965 if (Opcode == SPIRV::OpMemoryBarrier)
966 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
967 getIConstVal(Call->Arguments[1], MRI))) |
968 MemSemantics;
969 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
970 MemSemantics |= SPIRV::MemorySemantics::Release;
971 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
972 MemSemantics |= SPIRV::MemorySemantics::Acquire;
973 else
974 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
975
976 Register MemSemanticsReg =
977 MemFlags == MemSemantics
978 ? Call->Arguments[0]
979 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
980 Register ScopeReg;
981 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
982 SPIRV::Scope::Scope MemScope = Scope;
983 if (Call->Arguments.size() >= 2) {
984 assert(
985 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
986 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
987 "Extra args for explicitly scoped barrier");
988 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
989 : Call->Arguments[1];
990 SPIRV::CLMemoryScope CLScope =
991 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
992 MemScope = getSPIRVScope(CLScope);
993 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
994 (Opcode == SPIRV::OpMemoryBarrier))
995 Scope = MemScope;
996 if (CLScope == static_cast<unsigned>(Scope))
997 ScopeReg = Call->Arguments[1];
998 }
999
1000 if (!ScopeReg.isValid())
1001 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1002
1003 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1004 if (Opcode != SPIRV::OpMemoryBarrier)
1005 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1006 MIB.addUse(MemSemanticsReg);
1007 return true;
1008}
1009
1010/// Helper function for building extended bit operations.
1012 unsigned Opcode,
1013 MachineIRBuilder &MIRBuilder,
1014 SPIRVGlobalRegistry *GR) {
1015 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1016 const auto *ST =
1017 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1018 if ((Opcode == SPIRV::OpBitFieldInsert ||
1019 Opcode == SPIRV::OpBitFieldSExtract ||
1020 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1021 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1022 std::string DiagMsg = std::string(Builtin->Name) +
1023 ": the builtin requires the following SPIR-V "
1024 "extension: SPV_KHR_bit_instructions";
1025 report_fatal_error(DiagMsg.c_str(), false);
1026 }
1027
1028 // Generate SPIRV instruction accordingly.
1029 if (Call->isSpirvOp())
1030 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1031 GR->getSPIRVTypeID(Call->ReturnType));
1032
1033 auto MIB = MIRBuilder.buildInstr(Opcode)
1034 .addDef(Call->ReturnRegister)
1035 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1036 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1037 MIB.addUse(Call->Arguments[i]);
1038
1039 return true;
1040}
1041
1042/// Helper function for building Intel's bindless image instructions.
1044 unsigned Opcode,
1045 MachineIRBuilder &MIRBuilder,
1046 SPIRVGlobalRegistry *GR) {
1047 // Generate SPIRV instruction accordingly.
1048 if (Call->isSpirvOp())
1049 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1050 GR->getSPIRVTypeID(Call->ReturnType));
1051
1052 MIRBuilder.buildInstr(Opcode)
1053 .addDef(Call->ReturnRegister)
1054 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1055 .addUse(Call->Arguments[0]);
1056
1057 return true;
1058}
1059
1060/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1062 const SPIRV::IncomingCall *Call, unsigned Opcode,
1063 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1064 // Generate SPIRV instruction accordingly.
1065 if (Call->isSpirvOp())
1066 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1067 GR->getSPIRVTypeID(Call->ReturnType));
1068
1069 auto MIB = MIRBuilder.buildInstr(Opcode)
1070 .addDef(Call->ReturnRegister)
1071 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1072 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1073 MIB.addUse(Call->Arguments[i]);
1074
1075 return true;
1076}
1077
1079 unsigned Opcode,
1080 MachineIRBuilder &MIRBuilder,
1081 SPIRVGlobalRegistry *GR) {
1082 if (Call->isSpirvOp())
1083 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1084 GR->getSPIRVTypeID(Call->ReturnType));
1085
1086 auto MIB = MIRBuilder.buildInstr(Opcode)
1087 .addDef(Call->ReturnRegister)
1088 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1089 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1090 MIB.addUse(Call->Arguments[i]);
1091
1092 return true;
1093}
1094
1095/// Helper function for building Intel's 2d block io instructions.
1097 unsigned Opcode,
1098 MachineIRBuilder &MIRBuilder,
1099 SPIRVGlobalRegistry *GR) {
1100 // Generate SPIRV instruction accordingly.
1101 if (Call->isSpirvOp())
1102 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1103
1104 auto MIB = MIRBuilder.buildInstr(Opcode)
1105 .addDef(Call->ReturnRegister)
1106 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1107 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1108 MIB.addUse(Call->Arguments[i]);
1109
1110 return true;
1111}
1112
1113static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1114 unsigned Scope, MachineIRBuilder &MIRBuilder,
1115 SPIRVGlobalRegistry *GR) {
1116 switch (Opcode) {
1117 case SPIRV::OpCommitReadPipe:
1118 case SPIRV::OpCommitWritePipe:
1119 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1120 case SPIRV::OpGroupCommitReadPipe:
1121 case SPIRV::OpGroupCommitWritePipe:
1122 case SPIRV::OpGroupReserveReadPipePackets:
1123 case SPIRV::OpGroupReserveWritePipePackets: {
1124 Register ScopeConstReg =
1125 MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
1126 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1127 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1129 MIB = MIRBuilder.buildInstr(Opcode);
1130 // Add Return register and type.
1131 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1132 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1133 MIB.addDef(Call->ReturnRegister)
1134 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1135
1136 MIB.addUse(ScopeConstReg);
1137 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1138 MIB.addUse(Call->Arguments[i]);
1139
1140 return true;
1141 }
1142 default:
1143 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1144 GR->getSPIRVTypeID(Call->ReturnType));
1145 }
1146}
1147
1148static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1149 switch (dim) {
1150 case SPIRV::Dim::DIM_1D:
1151 case SPIRV::Dim::DIM_Buffer:
1152 return 1;
1153 case SPIRV::Dim::DIM_2D:
1154 case SPIRV::Dim::DIM_Cube:
1155 case SPIRV::Dim::DIM_Rect:
1156 return 2;
1157 case SPIRV::Dim::DIM_3D:
1158 return 3;
1159 default:
1160 report_fatal_error("Cannot get num components for given Dim");
1161 }
1162}
1163
1164/// Helper function for obtaining the number of size components.
1165static unsigned getNumSizeComponents(SPIRVType *imgType) {
1166 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1167 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1168 unsigned numComps = getNumComponentsForDim(dim);
1169 bool arrayed = imgType->getOperand(4).getImm() == 1;
1170 return arrayed ? numComps + 1 : numComps;
1171}
1172
1173static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber) {
1174 switch (BuiltinNumber) {
1175 case SPIRV::OpenCLExtInst::s_min:
1176 case SPIRV::OpenCLExtInst::u_min:
1177 case SPIRV::OpenCLExtInst::s_max:
1178 case SPIRV::OpenCLExtInst::u_max:
1179 case SPIRV::OpenCLExtInst::fmax:
1180 case SPIRV::OpenCLExtInst::fmin:
1181 case SPIRV::OpenCLExtInst::fmax_common:
1182 case SPIRV::OpenCLExtInst::fmin_common:
1183 case SPIRV::OpenCLExtInst::s_clamp:
1184 case SPIRV::OpenCLExtInst::fclamp:
1185 case SPIRV::OpenCLExtInst::u_clamp:
1186 case SPIRV::OpenCLExtInst::mix:
1187 case SPIRV::OpenCLExtInst::step:
1188 case SPIRV::OpenCLExtInst::smoothstep:
1189 return true;
1190 default:
1191 break;
1192 }
1193 return false;
1194}
1195
1196//===----------------------------------------------------------------------===//
1197// Implementation functions for each builtin group
1198//===----------------------------------------------------------------------===//
1199
1202 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1203
1204 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1205 unsigned ResultElementCount =
1206 GR->getScalarOrVectorComponentCount(ReturnTypeId);
1207 bool MayNeedPromotionToVec =
1208 builtinMayNeedPromotionToVec(BuiltinNumber) && ResultElementCount > 1;
1209
1210 if (!MayNeedPromotionToVec)
1211 return {Call->Arguments.begin(), Call->Arguments.end()};
1212
1214 for (Register Argument : Call->Arguments) {
1215 Register VecArg = Argument;
1216 SPIRVType *ArgumentType = GR->getSPIRVTypeForVReg(Argument);
1217 if (ArgumentType != Call->ReturnType) {
1218 VecArg = createVirtualRegister(Call->ReturnType, GR, MIRBuilder);
1219 auto VecSplat = MIRBuilder.buildInstr(SPIRV::OpCompositeConstruct)
1220 .addDef(VecArg)
1221 .addUse(ReturnTypeId);
1222 for (unsigned I = 0; I != ResultElementCount; ++I)
1223 VecSplat.addUse(Argument);
1224 }
1225 Arguments.push_back(VecArg);
1226 }
1227 return Arguments;
1228}
1229
1231 MachineIRBuilder &MIRBuilder,
1232 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1233 // Lookup the extended instruction number in the TableGen records.
1234 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1236 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1237 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1238 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1239 // later the NoNans and NoInfs flags.
1240 uint32_t OrigNumber = Number;
1241 const SPIRVSubtarget &ST =
1242 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
1243 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1244 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1245 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1246 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1247 ? SPIRV::OpenCLExtInst::fmin
1248 : SPIRV::OpenCLExtInst::fmax;
1249 }
1250
1251 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1253 getBuiltinCallArguments(Call, Number, MIRBuilder, GR);
1254
1255 // Build extended instruction.
1256 auto MIB =
1257 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1258 .addDef(Call->ReturnRegister)
1259 .addUse(ReturnTypeId)
1260 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1261 .addImm(Number);
1262
1264 MIB.addUse(Argument);
1265
1266 MIB.getInstr()->copyIRFlags(CB);
1267 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1268 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1269 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1270 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans);
1271 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs);
1272 }
1273 return true;
1274}
1275
1277 MachineIRBuilder &MIRBuilder,
1278 SPIRVGlobalRegistry *GR) {
1279 // Lookup the instruction opcode in the TableGen records.
1280 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1281 unsigned Opcode =
1282 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1283
1284 Register CompareRegister;
1285 SPIRVType *RelationType;
1286 std::tie(CompareRegister, RelationType) =
1287 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1288
1289 // Build relational instruction.
1290 auto MIB = MIRBuilder.buildInstr(Opcode)
1291 .addDef(CompareRegister)
1292 .addUse(GR->getSPIRVTypeID(RelationType));
1293
1294 for (auto Argument : Call->Arguments)
1295 MIB.addUse(Argument);
1296
1297 // Build select instruction.
1298 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1299 Call->ReturnType, GR);
1300}
1301
1303 MachineIRBuilder &MIRBuilder,
1304 SPIRVGlobalRegistry *GR) {
1305 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1306 const SPIRV::GroupBuiltin *GroupBuiltin =
1307 SPIRV::lookupGroupBuiltin(Builtin->Name);
1308
1309 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1310 if (Call->isSpirvOp()) {
1311 if (GroupBuiltin->NoGroupOperation) {
1313 if (GroupBuiltin->Opcode ==
1314 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1315 Call->Arguments.size() > 4)
1316 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1317 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1318 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1319 }
1320
1321 // Group Operation is a literal
1322 Register GroupOpReg = Call->Arguments[1];
1323 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1324 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1326 "Group Operation parameter must be an integer constant");
1327 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1328 Register ScopeReg = Call->Arguments[0];
1329 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1330 .addDef(Call->ReturnRegister)
1331 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1332 .addUse(ScopeReg)
1333 .addImm(GrpOp);
1334 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1335 MIB.addUse(Call->Arguments[i]);
1336 return true;
1337 }
1338
1339 Register Arg0;
1340 if (GroupBuiltin->HasBoolArg) {
1341 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1342 Register BoolReg = Call->Arguments[0];
1343 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1344 if (!BoolRegType)
1345 report_fatal_error("Can't find a register's type definition");
1346 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1347 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1348 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1349 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1350 BoolType, true);
1351 } else {
1352 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1353 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1354 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1355 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1356 MIRBuilder.buildICmp(
1357 CmpInst::ICMP_NE, Arg0, BoolReg,
1358 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1359 updateRegType(Arg0, nullptr, BoolType, GR, MIRBuilder,
1360 MIRBuilder.getMF().getRegInfo());
1361 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1362 report_fatal_error("Expect a boolean argument");
1363 }
1364 // if BoolReg is a boolean register, we don't need to do anything
1365 }
1366 }
1367
1368 Register GroupResultRegister = Call->ReturnRegister;
1369 SPIRVType *GroupResultType = Call->ReturnType;
1370
1371 // TODO: maybe we need to check whether the result type is already boolean
1372 // and in this case do not insert select instruction.
1373 const bool HasBoolReturnTy =
1374 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1375 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1376 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1377
1378 if (HasBoolReturnTy)
1379 std::tie(GroupResultRegister, GroupResultType) =
1380 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1381
1382 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1383 : SPIRV::Scope::Workgroup;
1384 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1385
1386 Register VecReg;
1387 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1388 Call->Arguments.size() > 2) {
1389 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1390 // scalar, a vector with 2 components, or a vector with 3 components.",
1391 // meaning that we must create a vector from the function arguments if
1392 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1393 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1394 Register ElemReg = Call->Arguments[1];
1395 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1396 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1397 report_fatal_error("Expect an integer <LocalId> argument");
1398 unsigned VecLen = Call->Arguments.size() - 1;
1399 VecReg = MRI->createGenericVirtualRegister(
1400 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1401 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1402 SPIRVType *VecType =
1403 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1404 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1405 auto MIB =
1406 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1407 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1408 MIB.addUse(Call->Arguments[i]);
1409 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1410 }
1411 updateRegType(VecReg, nullptr, VecType, GR, MIRBuilder,
1412 MIRBuilder.getMF().getRegInfo());
1413 }
1414
1415 // Build work/sub group instruction.
1416 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1417 .addDef(GroupResultRegister)
1418 .addUse(GR->getSPIRVTypeID(GroupResultType))
1419 .addUse(ScopeRegister);
1420
1421 if (!GroupBuiltin->NoGroupOperation)
1422 MIB.addImm(GroupBuiltin->GroupOperation);
1423 if (Call->Arguments.size() > 0) {
1424 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1425 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1426 if (VecReg.isValid())
1427 MIB.addUse(VecReg);
1428 else
1429 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1430 MIB.addUse(Call->Arguments[i]);
1431 }
1432
1433 // Build select instruction.
1434 if (HasBoolReturnTy)
1435 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1436 Call->ReturnType, GR);
1437 return true;
1438}
1439
1441 MachineIRBuilder &MIRBuilder,
1442 SPIRVGlobalRegistry *GR) {
1443 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1444 MachineFunction &MF = MIRBuilder.getMF();
1445 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1446 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1447 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1448
1449 if (IntelSubgroups->IsMedia &&
1450 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1451 std::string DiagMsg = std::string(Builtin->Name) +
1452 ": the builtin requires the following SPIR-V "
1453 "extension: SPV_INTEL_media_block_io";
1454 report_fatal_error(DiagMsg.c_str(), false);
1455 } else if (!IntelSubgroups->IsMedia &&
1456 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1457 std::string DiagMsg = std::string(Builtin->Name) +
1458 ": the builtin requires the following SPIR-V "
1459 "extension: SPV_INTEL_subgroups";
1460 report_fatal_error(DiagMsg.c_str(), false);
1461 }
1462
1463 uint32_t OpCode = IntelSubgroups->Opcode;
1464 if (Call->isSpirvOp()) {
1465 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1466 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1467 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1468 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1469 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1470 : Register(0));
1471 }
1472
1473 if (IntelSubgroups->IsBlock) {
1474 // Minimal number or arguments set in TableGen records is 1
1475 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1476 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1477 // TODO: add required validation from the specification:
1478 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1479 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1480 // dimensions require a capability."
1481 switch (OpCode) {
1482 case SPIRV::OpSubgroupBlockReadINTEL:
1483 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1484 break;
1485 case SPIRV::OpSubgroupBlockWriteINTEL:
1486 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1487 break;
1488 }
1489 }
1490 }
1491 }
1492
1493 // TODO: opaque pointers types should be eventually resolved in such a way
1494 // that validation of block read is enabled with respect to the following
1495 // specification requirement:
1496 // "'Result Type' may be a scalar or vector type, and its component type must
1497 // be equal to the type pointed to by 'Ptr'."
1498 // For example, function parameter type should not be default i8 pointer, but
1499 // depend on the result type of the instruction where it is used as a pointer
1500 // argument of OpSubgroupBlockReadINTEL
1501
1502 // Build Intel subgroups instruction
1504 IntelSubgroups->IsWrite
1505 ? MIRBuilder.buildInstr(OpCode)
1506 : MIRBuilder.buildInstr(OpCode)
1507 .addDef(Call->ReturnRegister)
1508 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1509 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1510 MIB.addUse(Call->Arguments[i]);
1511 return true;
1512}
1513
1515 MachineIRBuilder &MIRBuilder,
1516 SPIRVGlobalRegistry *GR) {
1517 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1518 MachineFunction &MF = MIRBuilder.getMF();
1519 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1520 if (!ST->canUseExtension(
1521 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1522 std::string DiagMsg = std::string(Builtin->Name) +
1523 ": the builtin requires the following SPIR-V "
1524 "extension: SPV_KHR_uniform_group_instructions";
1525 report_fatal_error(DiagMsg.c_str(), false);
1526 }
1527 const SPIRV::GroupUniformBuiltin *GroupUniform =
1528 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1529 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1530
1531 Register GroupResultReg = Call->ReturnRegister;
1532 Register ScopeReg = Call->Arguments[0];
1533 Register ValueReg = Call->Arguments[2];
1534
1535 // Group Operation
1536 Register ConstGroupOpReg = Call->Arguments[1];
1537 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1538 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1540 "expect a constant group operation for a uniform group instruction",
1541 false);
1542 const MachineOperand &ConstOperand = Const->getOperand(1);
1543 if (!ConstOperand.isCImm())
1544 report_fatal_error("uniform group instructions: group operation must be an "
1545 "integer constant",
1546 false);
1547
1548 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1549 .addDef(GroupResultReg)
1550 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1551 .addUse(ScopeReg);
1552 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1553 MIB.addUse(ValueReg);
1554
1555 return true;
1556}
1557
1559 MachineIRBuilder &MIRBuilder,
1560 SPIRVGlobalRegistry *GR) {
1561 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1562 MachineFunction &MF = MIRBuilder.getMF();
1563 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1564 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1565 std::string DiagMsg = std::string(Builtin->Name) +
1566 ": the builtin requires the following SPIR-V "
1567 "extension: SPV_KHR_shader_clock";
1568 report_fatal_error(DiagMsg.c_str(), false);
1569 }
1570
1571 Register ResultReg = Call->ReturnRegister;
1572
1573 if (Builtin->Name == "__spirv_ReadClockKHR") {
1574 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1575 .addDef(ResultReg)
1576 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1577 .addUse(Call->Arguments[0]);
1578 } else {
1579 // Deduce the `Scope` operand from the builtin function name.
1580 SPIRV::Scope::Scope ScopeArg =
1582 .EndsWith("device", SPIRV::Scope::Scope::Device)
1583 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1584 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1585 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1586
1587 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1588 .addDef(ResultReg)
1589 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1590 .addUse(ScopeReg);
1591 }
1592
1593 return true;
1594}
1595
1596// These queries ask for a single size_t result for a given dimension index,
1597// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1598// corresponding to these values are all vec3 types, so we need to extract the
1599// correct index or return DefaultValue (0 or 1 depending on the query). We also
1600// handle extending or truncating in case size_t does not match the expected
1601// result type's bitwidth.
1602//
1603// For a constant index >= 3 we generate:
1604// %res = OpConstant %SizeT DefaultValue
1605//
1606// For other indices we generate:
1607// %g = OpVariable %ptr_V3_SizeT Input
1608// OpDecorate %g BuiltIn XXX
1609// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1610// OpDecorate %g Constant
1611// %loadedVec = OpLoad %V3_SizeT %g
1612//
1613// Then, if the index is constant < 3, we generate:
1614// %res = OpCompositeExtract %SizeT %loadedVec idx
1615// If the index is dynamic, we generate:
1616// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1617// %cmp = OpULessThan %bool %idx %const_3
1618// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1619//
1620// If the bitwidth of %res does not match the expected return type, we add an
1621// extend or truncate.
1623 MachineIRBuilder &MIRBuilder,
1625 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1626 uint64_t DefaultValue) {
1627 Register IndexRegister = Call->Arguments[0];
1628 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1629 const unsigned PointerSize = GR->getPointerSize();
1630 const SPIRVType *PointerSizeType =
1631 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1632 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1633 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1634
1635 // Set up the final register to do truncation or extension on at the end.
1636 Register ToTruncate = Call->ReturnRegister;
1637
1638 // If the index is constant, we can statically determine if it is in range.
1639 bool IsConstantIndex =
1640 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1641
1642 // If it's out of range (max dimension is 3), we can just return the constant
1643 // default value (0 or 1 depending on which query function).
1644 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1645 Register DefaultReg = Call->ReturnRegister;
1646 if (PointerSize != ResultWidth) {
1647 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1648 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1649 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1650 MIRBuilder.getMF());
1651 ToTruncate = DefaultReg;
1652 }
1653 auto NewRegister =
1654 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1655 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1656 } else { // If it could be in range, we need to load from the given builtin.
1657 auto Vec3Ty =
1658 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1659 Register LoadedVector =
1660 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1661 LLT::fixed_vector(3, PointerSize));
1662 // Set up the vreg to extract the result to (possibly a new temporary one).
1663 Register Extracted = Call->ReturnRegister;
1664 if (!IsConstantIndex || PointerSize != ResultWidth) {
1665 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1666 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1667 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1668 }
1669 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1670 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1671 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1672 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1673 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1674
1675 // If the index is dynamic, need check if it's < 3, and then use a select.
1676 if (!IsConstantIndex) {
1677 updateRegType(Extracted, nullptr, PointerSizeType, GR, MIRBuilder, *MRI);
1678
1679 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1680 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1681
1682 Register CompareRegister =
1683 MRI->createGenericVirtualRegister(LLT::scalar(1));
1684 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1685 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1686
1687 // Use G_ICMP to check if idxVReg < 3.
1688 MIRBuilder.buildICmp(
1689 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1690 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1691
1692 // Get constant for the default value (0 or 1 depending on which
1693 // function).
1694 Register DefaultRegister =
1695 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1696
1697 // Get a register for the selection result (possibly a new temporary one).
1698 Register SelectionResult = Call->ReturnRegister;
1699 if (PointerSize != ResultWidth) {
1700 SelectionResult =
1701 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1702 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1703 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1704 MIRBuilder.getMF());
1705 }
1706 // Create the final G_SELECT to return the extracted value or the default.
1707 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1708 DefaultRegister);
1709 ToTruncate = SelectionResult;
1710 } else {
1711 ToTruncate = Extracted;
1712 }
1713 }
1714 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1715 if (PointerSize != ResultWidth)
1716 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1717 return true;
1718}
1719
1721 MachineIRBuilder &MIRBuilder,
1722 SPIRVGlobalRegistry *GR) {
1723 // Lookup the builtin variable record.
1724 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1725 SPIRV::BuiltIn::BuiltIn Value =
1726 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1727
1728 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1729 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1730
1731 // Build a load instruction for the builtin variable.
1732 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1733 LLT LLType;
1734 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1735 LLType =
1736 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1737 else
1738 LLType = LLT::scalar(BitWidth);
1739
1740 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1741 LLType, Call->ReturnRegister);
1742}
1743
1745 MachineIRBuilder &MIRBuilder,
1746 SPIRVGlobalRegistry *GR) {
1747 // Lookup the instruction opcode in the TableGen records.
1748 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1749 unsigned Opcode =
1750 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1751
1752 switch (Opcode) {
1753 case SPIRV::OpStore:
1754 return buildAtomicInitInst(Call, MIRBuilder);
1755 case SPIRV::OpAtomicLoad:
1756 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1757 case SPIRV::OpAtomicStore:
1758 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1759 case SPIRV::OpAtomicCompareExchange:
1760 case SPIRV::OpAtomicCompareExchangeWeak:
1761 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1762 GR);
1763 case SPIRV::OpAtomicIAdd:
1764 case SPIRV::OpAtomicISub:
1765 case SPIRV::OpAtomicOr:
1766 case SPIRV::OpAtomicXor:
1767 case SPIRV::OpAtomicAnd:
1768 case SPIRV::OpAtomicExchange:
1769 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1770 case SPIRV::OpMemoryBarrier:
1771 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1772 case SPIRV::OpAtomicFlagTestAndSet:
1773 case SPIRV::OpAtomicFlagClear:
1774 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1775 default:
1776 if (Call->isSpirvOp())
1777 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1778 GR->getSPIRVTypeID(Call->ReturnType));
1779 return false;
1780 }
1781}
1782
1784 MachineIRBuilder &MIRBuilder,
1785 SPIRVGlobalRegistry *GR) {
1786 // Lookup the instruction opcode in the TableGen records.
1787 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1788 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1789
1790 switch (Opcode) {
1791 case SPIRV::OpAtomicFAddEXT:
1792 case SPIRV::OpAtomicFMinEXT:
1793 case SPIRV::OpAtomicFMaxEXT:
1794 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1795 default:
1796 return false;
1797 }
1798}
1799
1801 MachineIRBuilder &MIRBuilder,
1802 SPIRVGlobalRegistry *GR) {
1803 // Lookup the instruction opcode in the TableGen records.
1804 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1805 unsigned Opcode =
1806 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1807
1808 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1809}
1810
1812 MachineIRBuilder &MIRBuilder,
1813 SPIRVGlobalRegistry *GR) {
1814 // Lookup the instruction opcode in the TableGen records.
1815 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1816 unsigned Opcode =
1817 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1818
1819 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1820 SPIRV::StorageClass::StorageClass ResSC =
1821 GR->getPointerStorageClass(Call->ReturnRegister);
1822 if (!isGenericCastablePtr(ResSC))
1823 return false;
1824
1825 MIRBuilder.buildInstr(Opcode)
1826 .addDef(Call->ReturnRegister)
1827 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1828 .addUse(Call->Arguments[0])
1829 .addImm(ResSC);
1830 } else {
1831 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1832 .addDef(Call->ReturnRegister)
1833 .addUse(Call->Arguments[0]);
1834 }
1835 return true;
1836}
1837
1838static bool generateDotOrFMulInst(const StringRef DemangledCall,
1840 MachineIRBuilder &MIRBuilder,
1841 SPIRVGlobalRegistry *GR) {
1842 if (Call->isSpirvOp())
1843 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1844 GR->getSPIRVTypeID(Call->ReturnType));
1845
1846 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1847 SPIRV::OpTypeVector;
1848 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1849 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1850 bool IsSwapReq = false;
1851
1852 const auto *ST =
1853 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1854 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1855 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1856 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1857 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1858 const SPIRV::IntegerDotProductBuiltin *IntDot =
1859 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1860 if (IntDot) {
1861 OC = IntDot->Opcode;
1862 IsSwapReq = IntDot->IsSwapReq;
1863 } else if (IsVec) {
1864 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1865 // integers.
1866 LLVMContext &Ctx = MIRBuilder.getContext();
1868 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1869 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1870 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1871
1872 if (Call->BuiltinName == "dot") {
1873 if (IsFirstSigned && IsSecondSigned)
1874 OC = SPIRV::OpSDot;
1875 else if (!IsFirstSigned && !IsSecondSigned)
1876 OC = SPIRV::OpUDot;
1877 else {
1878 OC = SPIRV::OpSUDot;
1879 if (!IsFirstSigned)
1880 IsSwapReq = true;
1881 }
1882 } else if (Call->BuiltinName == "dot_acc_sat") {
1883 if (IsFirstSigned && IsSecondSigned)
1884 OC = SPIRV::OpSDotAccSat;
1885 else if (!IsFirstSigned && !IsSecondSigned)
1886 OC = SPIRV::OpUDotAccSat;
1887 else {
1888 OC = SPIRV::OpSUDotAccSat;
1889 if (!IsFirstSigned)
1890 IsSwapReq = true;
1891 }
1892 }
1893 }
1894 }
1895
1896 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1897 .addDef(Call->ReturnRegister)
1898 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1899
1900 if (IsSwapReq) {
1901 MIB.addUse(Call->Arguments[1]);
1902 MIB.addUse(Call->Arguments[0]);
1903 // needed for dot_acc_sat* builtins
1904 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1905 MIB.addUse(Call->Arguments[i]);
1906 } else {
1907 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1908 MIB.addUse(Call->Arguments[i]);
1909 }
1910
1911 // Add Packed Vector Format for Integer dot product builtins if arguments are
1912 // scalar
1913 if (!IsVec && OC != SPIRV::OpFMulS)
1914 MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
1915
1916 return true;
1917}
1918
1920 MachineIRBuilder &MIRBuilder,
1921 SPIRVGlobalRegistry *GR) {
1922 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1923 SPIRV::BuiltIn::BuiltIn Value =
1924 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1925
1926 // For now, we only support a single Wave intrinsic with a single return type.
1927 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1928 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1929
1931 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1932 /* isConst= */ false, /* LinkageType= */ std::nullopt);
1933}
1934
1935// We expect a builtin
1936// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1937// where %result is a pointer to where the result of the builtin execution
1938// is to be stored, and generate the following instructions:
1939// Res = Opcode RetType Operand1 Operand1
1940// OpStore RetVariable Res
1942 MachineIRBuilder &MIRBuilder,
1943 SPIRVGlobalRegistry *GR) {
1944 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1945 unsigned Opcode =
1946 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1947
1948 Register SRetReg = Call->Arguments[0];
1949 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1950 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1951 if (!RetType)
1952 report_fatal_error("The first parameter must be a pointer");
1953 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1954 report_fatal_error("Expected struct type result for the arithmetic with "
1955 "overflow builtins");
1956
1957 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1958 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1959 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1960 report_fatal_error("Operands must have the same type");
1961 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1962 switch (Opcode) {
1963 case SPIRV::OpIAddCarryS:
1964 Opcode = SPIRV::OpIAddCarryV;
1965 break;
1966 case SPIRV::OpISubBorrowS:
1967 Opcode = SPIRV::OpISubBorrowV;
1968 break;
1969 }
1970
1971 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1972 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1973 if (const TargetRegisterClass *DstRC =
1974 MRI->getRegClassOrNull(Call->Arguments[1])) {
1975 MRI->setRegClass(ResReg, DstRC);
1976 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1977 } else {
1978 MRI->setType(ResReg, LLT::scalar(64));
1979 }
1980 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1981 MIRBuilder.buildInstr(Opcode)
1982 .addDef(ResReg)
1983 .addUse(GR->getSPIRVTypeID(RetType))
1984 .addUse(Call->Arguments[1])
1985 .addUse(Call->Arguments[2]);
1986 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1987 return true;
1988}
1989
1991 MachineIRBuilder &MIRBuilder,
1992 SPIRVGlobalRegistry *GR) {
1993 // Lookup the builtin record.
1994 SPIRV::BuiltIn::BuiltIn Value =
1995 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1996 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
1997 Value == SPIRV::BuiltIn::NumWorkgroups ||
1998 Value == SPIRV::BuiltIn::WorkgroupSize ||
1999 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
2000 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
2001}
2002
2004 MachineIRBuilder &MIRBuilder,
2005 SPIRVGlobalRegistry *GR) {
2006 // Lookup the image size query component number in the TableGen records.
2007 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2008 uint32_t Component =
2009 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
2010 // Query result may either be a vector or a scalar. If return type is not a
2011 // vector, expect only a single size component. Otherwise get the number of
2012 // expected components.
2013 unsigned NumExpectedRetComponents =
2014 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
2015 ? Call->ReturnType->getOperand(2).getImm()
2016 : 1;
2017 // Get the actual number of query result/size components.
2018 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2019 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
2020 Register QueryResult = Call->ReturnRegister;
2021 SPIRVType *QueryResultType = Call->ReturnType;
2022 if (NumExpectedRetComponents != NumActualRetComponents) {
2023 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
2024 ? Call->ReturnType->getOperand(1).getImm()
2025 : 32;
2026 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
2027 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
2028 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
2029 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
2030 QueryResultType = GR->getOrCreateSPIRVVectorType(
2031 IntTy, NumActualRetComponents, MIRBuilder, true);
2032 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
2033 }
2034 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
2035 unsigned Opcode =
2036 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
2037 auto MIB = MIRBuilder.buildInstr(Opcode)
2038 .addDef(QueryResult)
2039 .addUse(GR->getSPIRVTypeID(QueryResultType))
2040 .addUse(Call->Arguments[0]);
2041 if (!IsDimBuf)
2042 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
2043 if (NumExpectedRetComponents == NumActualRetComponents)
2044 return true;
2045 if (NumExpectedRetComponents == 1) {
2046 // Only 1 component is expected, build OpCompositeExtract instruction.
2047 unsigned ExtractedComposite =
2048 Component == 3 ? NumActualRetComponents - 1 : Component;
2049 assert(ExtractedComposite < NumActualRetComponents &&
2050 "Invalid composite index!");
2051 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2052 SPIRVType *NewType = nullptr;
2053 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
2054 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
2055 if (TypeReg != NewTypeReg &&
2056 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
2057 TypeReg = NewTypeReg;
2058 }
2059 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2060 .addDef(Call->ReturnRegister)
2061 .addUse(TypeReg)
2062 .addUse(QueryResult)
2063 .addImm(ExtractedComposite);
2064 if (NewType != nullptr)
2065 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2066 MIRBuilder.getMF().getRegInfo());
2067 } else {
2068 // More than 1 component is expected, fill a new vector.
2069 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
2070 .addDef(Call->ReturnRegister)
2071 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2072 .addUse(QueryResult)
2073 .addUse(QueryResult);
2074 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
2075 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
2076 }
2077 return true;
2078}
2079
2081 MachineIRBuilder &MIRBuilder,
2082 SPIRVGlobalRegistry *GR) {
2083 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2084 "Image samples query result must be of int type!");
2085
2086 // Lookup the instruction opcode in the TableGen records.
2087 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2088 unsigned Opcode =
2089 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2090
2091 Register Image = Call->Arguments[0];
2092 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2093 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
2094 (void)ImageDimensionality;
2095
2096 switch (Opcode) {
2097 case SPIRV::OpImageQuerySamples:
2098 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2099 "Image must be of 2D dimensionality");
2100 break;
2101 case SPIRV::OpImageQueryLevels:
2102 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2103 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2104 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2105 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2106 "Image must be of 1D/2D/3D/Cube dimensionality");
2107 break;
2108 }
2109
2110 MIRBuilder.buildInstr(Opcode)
2111 .addDef(Call->ReturnRegister)
2112 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2113 .addUse(Image);
2114 return true;
2115}
2116
2117// TODO: Move to TableGen.
2118static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2120 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2121 case SPIRV::CLK_ADDRESS_CLAMP:
2122 return SPIRV::SamplerAddressingMode::Clamp;
2123 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2124 return SPIRV::SamplerAddressingMode::ClampToEdge;
2125 case SPIRV::CLK_ADDRESS_REPEAT:
2126 return SPIRV::SamplerAddressingMode::Repeat;
2127 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2128 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2129 case SPIRV::CLK_ADDRESS_NONE:
2130 return SPIRV::SamplerAddressingMode::None;
2131 default:
2132 report_fatal_error("Unknown CL address mode");
2133 }
2134}
2135
2136static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2137 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2138}
2139
2140static SPIRV::SamplerFilterMode::SamplerFilterMode
2142 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2143 return SPIRV::SamplerFilterMode::Linear;
2144 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2145 return SPIRV::SamplerFilterMode::Nearest;
2146 return SPIRV::SamplerFilterMode::Nearest;
2147}
2148
2149static bool generateReadImageInst(const StringRef DemangledCall,
2151 MachineIRBuilder &MIRBuilder,
2152 SPIRVGlobalRegistry *GR) {
2153 if (Call->isSpirvOp())
2154 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2155 GR->getSPIRVTypeID(Call->ReturnType));
2156 Register Image = Call->Arguments[0];
2157 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2158 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2159 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2160 if (HasOclSampler) {
2161 Register Sampler = Call->Arguments[1];
2162
2163 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2164 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2165 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2166 Sampler = GR->buildConstantSampler(
2168 getSamplerParamFromBitmask(SamplerMask),
2169 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2170 }
2171 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2172 SPIRVType *SampledImageType =
2173 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2174 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2175
2176 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2177 .addDef(SampledImage)
2178 .addUse(GR->getSPIRVTypeID(SampledImageType))
2179 .addUse(Image)
2180 .addUse(Sampler);
2181
2183 MIRBuilder);
2184
2185 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2186 SPIRVType *TempType =
2187 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2188 Register TempRegister =
2189 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2190 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2191 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2192 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2193 .addDef(TempRegister)
2194 .addUse(GR->getSPIRVTypeID(TempType))
2195 .addUse(SampledImage)
2196 .addUse(Call->Arguments[2]) // Coordinate.
2197 .addImm(SPIRV::ImageOperand::Lod)
2198 .addUse(Lod);
2199 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2200 .addDef(Call->ReturnRegister)
2201 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2202 .addUse(TempRegister)
2203 .addImm(0);
2204 } else {
2205 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2206 .addDef(Call->ReturnRegister)
2207 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2208 .addUse(SampledImage)
2209 .addUse(Call->Arguments[2]) // Coordinate.
2210 .addImm(SPIRV::ImageOperand::Lod)
2211 .addUse(Lod);
2212 }
2213 } else if (HasMsaa) {
2214 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2215 .addDef(Call->ReturnRegister)
2216 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2217 .addUse(Image)
2218 .addUse(Call->Arguments[1]) // Coordinate.
2219 .addImm(SPIRV::ImageOperand::Sample)
2220 .addUse(Call->Arguments[2]);
2221 } else {
2222 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2223 .addDef(Call->ReturnRegister)
2224 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2225 .addUse(Image)
2226 .addUse(Call->Arguments[1]); // Coordinate.
2227 }
2228 return true;
2229}
2230
2232 MachineIRBuilder &MIRBuilder,
2233 SPIRVGlobalRegistry *GR) {
2234 if (Call->isSpirvOp())
2235 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2236 Register(0));
2237 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2238 .addUse(Call->Arguments[0]) // Image.
2239 .addUse(Call->Arguments[1]) // Coordinate.
2240 .addUse(Call->Arguments[2]); // Texel.
2241 return true;
2242}
2243
2244static bool generateSampleImageInst(const StringRef DemangledCall,
2246 MachineIRBuilder &MIRBuilder,
2247 SPIRVGlobalRegistry *GR) {
2248 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2249 if (Call->Builtin->Name.contains_insensitive(
2250 "__translate_sampler_initializer")) {
2251 // Build sampler literal.
2252 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2253 Register Sampler = GR->buildConstantSampler(
2254 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2256 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2257 return Sampler.isValid();
2258 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2259 // Create OpSampledImage.
2260 Register Image = Call->Arguments[0];
2261 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2262 SPIRVType *SampledImageType =
2263 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2264 Register SampledImage =
2265 Call->ReturnRegister.isValid()
2266 ? Call->ReturnRegister
2267 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2268 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2269 .addDef(SampledImage)
2270 .addUse(GR->getSPIRVTypeID(SampledImageType))
2271 .addUse(Image)
2272 .addUse(Call->Arguments[1]); // Sampler.
2273 return true;
2274 } else if (Call->Builtin->Name.contains_insensitive(
2275 "__spirv_ImageSampleExplicitLod")) {
2276 // Sample an image using an explicit level of detail.
2277 std::string ReturnType = DemangledCall.str();
2278 if (DemangledCall.contains("_R")) {
2279 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2280 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2281 }
2282 SPIRVType *Type =
2283 Call->ReturnType
2284 ? Call->ReturnType
2285 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder, true);
2286 if (!Type) {
2287 std::string DiagMsg =
2288 "Unable to recognize SPIRV type name: " + ReturnType;
2289 report_fatal_error(DiagMsg.c_str());
2290 }
2291 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2292 .addDef(Call->ReturnRegister)
2294 .addUse(Call->Arguments[0]) // Image.
2295 .addUse(Call->Arguments[1]) // Coordinate.
2296 .addImm(SPIRV::ImageOperand::Lod)
2297 .addUse(Call->Arguments[3]);
2298 return true;
2299 }
2300 return false;
2301}
2302
2304 MachineIRBuilder &MIRBuilder) {
2305 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2306 Call->Arguments[1], Call->Arguments[2]);
2307 return true;
2308}
2309
2311 MachineIRBuilder &MIRBuilder,
2312 SPIRVGlobalRegistry *GR) {
2313 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2314 SPIRV::OpCompositeConstructContinuedINTEL,
2315 Call->Arguments, Call->ReturnRegister,
2316 GR->getSPIRVTypeID(Call->ReturnType));
2317 return true;
2318}
2319
2321 MachineIRBuilder &MIRBuilder,
2322 SPIRVGlobalRegistry *GR) {
2323 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2324 unsigned Opcode =
2325 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2326 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2327 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2328 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2329 unsigned ArgSz = Call->Arguments.size();
2330 unsigned LiteralIdx = 0;
2331 switch (Opcode) {
2332 // Memory operand is optional and is literal.
2333 case SPIRV::OpCooperativeMatrixLoadKHR:
2334 LiteralIdx = ArgSz > 3 ? 3 : 0;
2335 break;
2336 case SPIRV::OpCooperativeMatrixStoreKHR:
2337 LiteralIdx = ArgSz > 4 ? 4 : 0;
2338 break;
2339 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2340 LiteralIdx = ArgSz > 7 ? 7 : 0;
2341 break;
2342 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2343 LiteralIdx = ArgSz > 8 ? 8 : 0;
2344 break;
2345 // Cooperative Matrix Operands operand is optional and is literal.
2346 case SPIRV::OpCooperativeMatrixMulAddKHR:
2347 LiteralIdx = ArgSz > 3 ? 3 : 0;
2348 break;
2349 };
2350
2352 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2353 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2354 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2355 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2356 .addUse(Call->Arguments[0]) // pointer
2357 .addUse(Call->Arguments[1]) // rows
2358 .addUse(Call->Arguments[2]) // columns
2359 .addImm(CacheLevel) // cache level
2360 .addUse(Call->Arguments[4]); // memory layout
2361 if (ArgSz > 5)
2362 MIB.addUse(Call->Arguments[5]); // stride
2363 if (ArgSz > 6) {
2364 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2365 MIB.addImm(MemOp); // memory operand
2366 }
2367 return true;
2368 }
2369 if (LiteralIdx > 0)
2370 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2371 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2372 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2373 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2374 if (!CoopMatrType)
2375 report_fatal_error("Can't find a register's type definition");
2376 MIRBuilder.buildInstr(Opcode)
2377 .addDef(Call->ReturnRegister)
2378 .addUse(TypeReg)
2379 .addUse(CoopMatrType->getOperand(0).getReg());
2380 return true;
2381 }
2382 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2383 IsSet ? TypeReg : Register(0), ImmArgs);
2384}
2385
2387 MachineIRBuilder &MIRBuilder,
2388 SPIRVGlobalRegistry *GR) {
2389 // Lookup the instruction opcode in the TableGen records.
2390 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2391 unsigned Opcode =
2392 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2393 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2394
2395 switch (Opcode) {
2396 case SPIRV::OpSpecConstant: {
2397 // Build the SpecID decoration.
2398 unsigned SpecId =
2399 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2400 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2401 {SpecId});
2402 // Determine the constant MI.
2403 Register ConstRegister = Call->Arguments[1];
2404 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2405 assert(Const &&
2406 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2407 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2408 "Argument should be either an int or floating-point constant");
2409 // Determine the opcode and built the OpSpec MI.
2410 const MachineOperand &ConstOperand = Const->getOperand(1);
2411 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2412 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2413 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2414 ? SPIRV::OpSpecConstantTrue
2415 : SPIRV::OpSpecConstantFalse;
2416 }
2417 auto MIB = MIRBuilder.buildInstr(Opcode)
2418 .addDef(Call->ReturnRegister)
2419 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2420
2421 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2422 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2423 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2424 else
2425 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2426 }
2427 return true;
2428 }
2429 case SPIRV::OpSpecConstantComposite: {
2430 createContinuedInstructions(MIRBuilder, Opcode, 3,
2431 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2432 Call->Arguments, Call->ReturnRegister,
2433 GR->getSPIRVTypeID(Call->ReturnType));
2434 return true;
2435 }
2436 default:
2437 return false;
2438 }
2439}
2440
2442 MachineIRBuilder &MIRBuilder,
2443 SPIRVGlobalRegistry *GR) {
2444 // Lookup the instruction opcode in the TableGen records.
2445 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2446 unsigned Opcode =
2447 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2448
2449 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2450}
2451
2453 MachineIRBuilder &MIRBuilder,
2454 SPIRVGlobalRegistry *GR) {
2455 // Lookup the instruction opcode in the TableGen records.
2456 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2457 unsigned Opcode =
2458 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2459
2460 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2461}
2462
2464 MachineIRBuilder &MIRBuilder,
2465 SPIRVGlobalRegistry *GR) {
2466 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2467 unsigned Opcode =
2468 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2469 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
2470}
2471
2473 unsigned Opcode, MachineIRBuilder &MIRBuilder,
2474 SPIRVGlobalRegistry *GR) {
2475 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2477 Register InputReg = Call->Arguments[0];
2478 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
2479 bool IsSRet = RetTy->isVoidTy();
2480
2481 if (IsSRet) {
2482 const LLT ValTy = MRI->getType(InputReg);
2483 Register ActualRetValReg = MRI->createGenericVirtualRegister(ValTy);
2484 SPIRVType *InstructionType =
2485 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2486 InputReg = Call->Arguments[1];
2487 auto InputType = GR->getTypeForSPIRVType(GR->getSPIRVTypeForVReg(InputReg));
2488 Register PtrInputReg;
2489 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2490 LLT InputLLT = MRI->getType(InputReg);
2491 PtrInputReg = MRI->createGenericVirtualRegister(InputLLT);
2492 SPIRVType *PtrType =
2493 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2494 MachineMemOperand *MMO1 = MIRBuilder.getMF().getMachineMemOperand(
2496 InputLLT.getSizeInBytes(), Align(4));
2497 MIRBuilder.buildLoad(PtrInputReg, InputReg, *MMO1);
2498 MRI->setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2499 GR->assignSPIRVTypeToVReg(PtrType, PtrInputReg, MIRBuilder.getMF());
2500 }
2501
2502 for (unsigned index = 2; index < 7; index++) {
2503 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2504 }
2505
2506 // Emit the instruction
2507 auto MIB = MIRBuilder.buildInstr(Opcode)
2508 .addDef(ActualRetValReg)
2509 .addUse(GR->getSPIRVTypeID(InstructionType));
2510 if (PtrInputReg)
2511 MIB.addUse(PtrInputReg);
2512 else
2513 MIB.addUse(InputReg);
2514
2515 for (uint32_t Imm : ImmArgs)
2516 MIB.addImm(Imm);
2517 unsigned Size = ValTy.getSizeInBytes();
2518 // Store result to the pointer passed in Arg[0]
2519 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
2521 MRI->setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2522 MIRBuilder.buildStore(ActualRetValReg, Call->Arguments[0], *MMO);
2523 return true;
2524 } else {
2525 for (unsigned index = 1; index < 6; index++)
2526 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2527
2528 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2529 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
2530 }
2531}
2532
2534 MachineIRBuilder &MIRBuilder,
2535 SPIRVGlobalRegistry *GR) {
2536 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2537 unsigned Opcode =
2538 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2539
2540 return buildAPFixedPointInst(Call, Opcode, MIRBuilder, GR);
2541}
2542
2543static bool
2545 MachineIRBuilder &MIRBuilder,
2546 SPIRVGlobalRegistry *GR) {
2547 // Lookup the instruction opcode in the TableGen records.
2548 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2549 unsigned Opcode =
2550 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2551
2552 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2553}
2554
2556 MachineIRBuilder &MIRBuilder,
2557 SPIRVGlobalRegistry *GR) {
2558 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2559 unsigned Opcode =
2560 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2561
2562 return buildImageChannelDataTypeInst(Call, Opcode, MIRBuilder, GR);
2563}
2564
2566 MachineIRBuilder &MIRBuilder,
2567 SPIRVGlobalRegistry *GR) {
2568 // Lookup the instruction opcode in the TableGen records.
2569 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2570 unsigned Opcode =
2571 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2572
2573 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2574}
2575
2577 MachineIRBuilder &MIRBuilder,
2578 SPIRVGlobalRegistry *GR) {
2579 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2580 unsigned Opcode =
2581 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2582
2583 unsigned Scope = SPIRV::Scope::Workgroup;
2584 if (Builtin->Name.contains("sub_group"))
2585 Scope = SPIRV::Scope::Subgroup;
2586
2587 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2588}
2589
2591 MachineIRBuilder &MIRBuilder,
2592 SPIRVGlobalRegistry *GR) {
2593 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2594 unsigned Opcode =
2595 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2596
2597 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2598 unsigned ArgSz = Call->Arguments.size();
2600 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2601 // Memory operand is optional and is literal.
2602 if (ArgSz > 3)
2603 ImmArgs.push_back(
2604 getConstFromIntrinsic(Call->Arguments[/*Literal index*/ 3], MRI));
2605
2606 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2607 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2608 IsSet ? TypeReg : Register(0), ImmArgs);
2609}
2610
2612 MachineIRBuilder &MIRBuilder,
2613 SPIRVGlobalRegistry *GR) {
2614 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2615 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2616 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2617 PtrType->getOperand(2).isReg());
2618 Register TypeReg = PtrType->getOperand(2).getReg();
2620 MachineFunction &MF = MIRBuilder.getMF();
2621 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2622 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2623 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2624 // three other arguments, so pass zero constant on absence.
2625 unsigned NumArgs = Call->Arguments.size();
2626 assert(NumArgs >= 2);
2627 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2628 Register LocalWorkSize =
2629 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2630 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2631 if (NumArgs < 4) {
2632 Register Const;
2633 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2634 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2635 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2636 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2637 DefInstr->getOperand(3).isReg());
2638 Register GWSPtr = DefInstr->getOperand(3).getReg();
2639 // TODO: Maybe simplify generation of the type of the fields.
2640 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2641 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2643 Type *FieldTy = ArrayType::get(BaseTy, Size);
2644 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2645 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
2646 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2647 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2648 MIRBuilder.buildInstr(SPIRV::OpLoad)
2649 .addDef(GlobalWorkSize)
2650 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2651 .addUse(GWSPtr);
2652 const SPIRVSubtarget &ST =
2653 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2654 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2655 SpvFieldTy, *ST.getInstrInfo());
2656 } else {
2657 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy, true);
2658 }
2659 if (!LocalWorkSize.isValid())
2660 LocalWorkSize = Const;
2661 if (!GlobalWorkOffset.isValid())
2662 GlobalWorkOffset = Const;
2663 }
2664 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2665 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2666 .addDef(TmpReg)
2667 .addUse(TypeReg)
2668 .addUse(GlobalWorkSize)
2669 .addUse(LocalWorkSize)
2670 .addUse(GlobalWorkOffset);
2671 return MIRBuilder.buildInstr(SPIRV::OpStore)
2672 .addUse(Call->Arguments[0])
2673 .addUse(TmpReg);
2674}
2675
2676// TODO: maybe move to the global register.
2677static SPIRVType *
2679 SPIRVGlobalRegistry *GR) {
2680 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2681 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2682 Type *PtrType = PointerType::get(Context, SC1);
2683 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2684 SPIRV::AccessQualifier::ReadWrite, true);
2685}
2686
2688 MachineIRBuilder &MIRBuilder,
2689 SPIRVGlobalRegistry *GR) {
2690 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2691 const DataLayout &DL = MIRBuilder.getDataLayout();
2692 bool IsSpirvOp = Call->isSpirvOp();
2693 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2694 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2695
2696 // Make vararg instructions before OpEnqueueKernel.
2697 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2698 // local size operands as an array, so we need to unpack them.
2699 SmallVector<Register, 16> LocalSizes;
2700 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2701 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2702 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2703 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2704 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2705 GepMI->getOperand(3).isReg());
2706 Register ArrayReg = GepMI->getOperand(3).getReg();
2707 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2708 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2709 assert(LocalSizeTy && "Local size type is expected");
2710 const uint64_t LocalSizeNum =
2711 cast<ArrayType>(LocalSizeTy)->getNumElements();
2712 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2713 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2714 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2715 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2716 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2717 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2718 MRI->setType(Reg, LLType);
2719 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2720 auto GEPInst = MIRBuilder.buildIntrinsic(
2721 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2722 GEPInst
2723 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2724 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2725 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2726 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2727 LocalSizes.push_back(Reg);
2728 }
2729 }
2730
2731 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2732 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2733 .addDef(Call->ReturnRegister)
2735
2736 // Copy all arguments before block invoke function pointer.
2737 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2738 for (unsigned i = 0; i < BlockFIdx; i++)
2739 MIB.addUse(Call->Arguments[i]);
2740
2741 // If there are no event arguments in the original call, add dummy ones.
2742 if (!HasEvents) {
2743 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2744 Register NullPtr = GR->getOrCreateConstNullPtr(
2745 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2746 MIB.addUse(NullPtr); // Dummy wait events.
2747 MIB.addUse(NullPtr); // Dummy ret event.
2748 }
2749
2750 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2751 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2752 // Invoke: Pointer to invoke function.
2753 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2754
2755 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2756 // Param: Pointer to block literal.
2757 MIB.addUse(BlockLiteralReg);
2758
2759 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2760 // TODO: these numbers should be obtained from block literal structure.
2761 // Param Size: Size of block literal structure.
2762 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2763 // Param Aligment: Aligment of block literal structure.
2764 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2765 MIRBuilder, GR));
2766
2767 for (unsigned i = 0; i < LocalSizes.size(); i++)
2768 MIB.addUse(LocalSizes[i]);
2769 return true;
2770}
2771
2773 MachineIRBuilder &MIRBuilder,
2774 SPIRVGlobalRegistry *GR) {
2775 // Lookup the instruction opcode in the TableGen records.
2776 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2777 unsigned Opcode =
2778 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2779
2780 switch (Opcode) {
2781 case SPIRV::OpRetainEvent:
2782 case SPIRV::OpReleaseEvent:
2783 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2784 case SPIRV::OpCreateUserEvent:
2785 case SPIRV::OpGetDefaultQueue:
2786 return MIRBuilder.buildInstr(Opcode)
2787 .addDef(Call->ReturnRegister)
2788 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2789 case SPIRV::OpIsValidEvent:
2790 return MIRBuilder.buildInstr(Opcode)
2791 .addDef(Call->ReturnRegister)
2792 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2793 .addUse(Call->Arguments[0]);
2794 case SPIRV::OpSetUserEventStatus:
2795 return MIRBuilder.buildInstr(Opcode)
2796 .addUse(Call->Arguments[0])
2797 .addUse(Call->Arguments[1]);
2798 case SPIRV::OpCaptureEventProfilingInfo:
2799 return MIRBuilder.buildInstr(Opcode)
2800 .addUse(Call->Arguments[0])
2801 .addUse(Call->Arguments[1])
2802 .addUse(Call->Arguments[2]);
2803 case SPIRV::OpBuildNDRange:
2804 return buildNDRange(Call, MIRBuilder, GR);
2805 case SPIRV::OpEnqueueKernel:
2806 return buildEnqueueKernel(Call, MIRBuilder, GR);
2807 default:
2808 return false;
2809 }
2810}
2811
2813 MachineIRBuilder &MIRBuilder,
2814 SPIRVGlobalRegistry *GR) {
2815 // Lookup the instruction opcode in the TableGen records.
2816 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2817 unsigned Opcode =
2818 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2819
2820 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2821 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2822 if (Call->isSpirvOp())
2823 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2824 IsSet ? TypeReg : Register(0));
2825
2826 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2827
2828 switch (Opcode) {
2829 case SPIRV::OpGroupAsyncCopy: {
2830 SPIRVType *NewType =
2831 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2832 ? nullptr
2833 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2834 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2835 unsigned NumArgs = Call->Arguments.size();
2836 Register EventReg = Call->Arguments[NumArgs - 1];
2837 bool Res = MIRBuilder.buildInstr(Opcode)
2838 .addDef(Call->ReturnRegister)
2839 .addUse(TypeReg)
2840 .addUse(Scope)
2841 .addUse(Call->Arguments[0])
2842 .addUse(Call->Arguments[1])
2843 .addUse(Call->Arguments[2])
2844 .addUse(Call->Arguments.size() > 4
2845 ? Call->Arguments[3]
2846 : buildConstantIntReg32(1, MIRBuilder, GR))
2847 .addUse(EventReg);
2848 if (NewType != nullptr)
2849 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2850 MIRBuilder.getMF().getRegInfo());
2851 return Res;
2852 }
2853 case SPIRV::OpGroupWaitEvents:
2854 return MIRBuilder.buildInstr(Opcode)
2855 .addUse(Scope)
2856 .addUse(Call->Arguments[0])
2857 .addUse(Call->Arguments[1]);
2858 default:
2859 return false;
2860 }
2861}
2862
2863static bool generateConvertInst(const StringRef DemangledCall,
2865 MachineIRBuilder &MIRBuilder,
2866 SPIRVGlobalRegistry *GR) {
2867 // Lookup the conversion builtin in the TableGen records.
2868 const SPIRV::ConvertBuiltin *Builtin =
2869 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2870
2871 if (!Builtin && Call->isSpirvOp()) {
2872 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2873 unsigned Opcode =
2874 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2875 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2876 GR->getSPIRVTypeID(Call->ReturnType));
2877 }
2878
2879 assert(Builtin && "Conversion builtin not found.");
2880 if (Builtin->IsSaturated)
2881 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2882 SPIRV::Decoration::SaturatedConversion, {});
2883 if (Builtin->IsRounded)
2884 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2885 SPIRV::Decoration::FPRoundingMode,
2886 {(unsigned)Builtin->RoundingMode});
2887
2888 std::string NeedExtMsg; // no errors if empty
2889 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2890 unsigned Opcode = SPIRV::OpNop;
2891 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2892 // Int -> ...
2893 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2894 // Int -> Int
2895 if (Builtin->IsSaturated)
2896 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2897 : SPIRV::OpSatConvertSToU;
2898 else
2899 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2900 : SPIRV::OpSConvert;
2901 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2902 SPIRV::OpTypeFloat)) {
2903 // Int -> Float
2904 if (Builtin->IsBfloat16) {
2905 const auto *ST = static_cast<const SPIRVSubtarget *>(
2906 &MIRBuilder.getMF().getSubtarget());
2907 if (!ST->canUseExtension(
2908 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2909 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2910 IsRightComponentsNumber =
2911 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2912 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2913 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2914 } else {
2915 bool IsSourceSigned =
2916 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2917 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2918 }
2919 }
2920 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2921 SPIRV::OpTypeFloat)) {
2922 // Float -> ...
2923 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2924 // Float -> Int
2925 if (Builtin->IsBfloat16) {
2926 const auto *ST = static_cast<const SPIRVSubtarget *>(
2927 &MIRBuilder.getMF().getSubtarget());
2928 if (!ST->canUseExtension(
2929 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2930 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2931 IsRightComponentsNumber =
2932 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2933 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2934 Opcode = SPIRV::OpConvertFToBF16INTEL;
2935 } else {
2936 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2937 : SPIRV::OpConvertFToU;
2938 }
2939 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2940 SPIRV::OpTypeFloat)) {
2941 if (Builtin->IsTF32) {
2942 const auto *ST = static_cast<const SPIRVSubtarget *>(
2943 &MIRBuilder.getMF().getSubtarget());
2944 if (!ST->canUseExtension(
2945 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2946 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2947 IsRightComponentsNumber =
2948 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2949 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2950 Opcode = SPIRV::OpRoundFToTF32INTEL;
2951 } else {
2952 // Float -> Float
2953 Opcode = SPIRV::OpFConvert;
2954 }
2955 }
2956 }
2957
2958 if (!NeedExtMsg.empty()) {
2959 std::string DiagMsg = std::string(Builtin->Name) +
2960 ": the builtin requires the following SPIR-V "
2961 "extension: " +
2962 NeedExtMsg;
2963 report_fatal_error(DiagMsg.c_str(), false);
2964 }
2965 if (!IsRightComponentsNumber) {
2966 std::string DiagMsg =
2967 std::string(Builtin->Name) +
2968 ": result and argument must have the same number of components";
2969 report_fatal_error(DiagMsg.c_str(), false);
2970 }
2971 assert(Opcode != SPIRV::OpNop &&
2972 "Conversion between the types not implemented!");
2973
2974 MIRBuilder.buildInstr(Opcode)
2975 .addDef(Call->ReturnRegister)
2976 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2977 .addUse(Call->Arguments[0]);
2978 return true;
2979}
2980
2982 MachineIRBuilder &MIRBuilder,
2983 SPIRVGlobalRegistry *GR) {
2984 // Lookup the vector load/store builtin in the TableGen records.
2985 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2986 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2987 Call->Builtin->Set);
2988 // Build extended instruction.
2989 auto MIB =
2990 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2991 .addDef(Call->ReturnRegister)
2992 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2993 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2994 .addImm(Builtin->Number);
2995 for (auto Argument : Call->Arguments)
2996 MIB.addUse(Argument);
2997 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2998 MIB.addImm(Builtin->ElementCount);
2999
3000 // Rounding mode should be passed as a last argument in the MI for builtins
3001 // like "vstorea_halfn_r".
3002 if (Builtin->IsRounded)
3003 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
3004 return true;
3005}
3006
3008 MachineIRBuilder &MIRBuilder,
3009 SPIRVGlobalRegistry *GR) {
3010 const auto *Builtin = Call->Builtin;
3011 auto *MRI = MIRBuilder.getMRI();
3012 unsigned Opcode =
3013 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
3014 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
3015 bool IsVoid = RetTy->isVoidTy();
3016 auto MIB = MIRBuilder.buildInstr(Opcode);
3017 Register DestReg;
3018 if (IsVoid) {
3019 LLT PtrTy = MRI->getType(Call->Arguments[0]);
3020 DestReg = MRI->createGenericVirtualRegister(PtrTy);
3021 MRI->setRegClass(DestReg, &SPIRV::pIDRegClass);
3022 SPIRVType *PointeeTy =
3023 GR->getPointeeType(GR->getSPIRVTypeForVReg(Call->Arguments[0]));
3024 MIB.addDef(DestReg);
3025 MIB.addUse(GR->getSPIRVTypeID(PointeeTy));
3026 } else {
3027 MIB.addDef(Call->ReturnRegister);
3028 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
3029 }
3030 for (unsigned i = IsVoid ? 1 : 0; i < Call->Arguments.size(); ++i) {
3031 Register Arg = Call->Arguments[i];
3032 MachineInstr *DefMI = MRI->getUniqueVRegDef(Arg);
3033 if (DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
3034 DefMI->getOperand(1).isCImm()) {
3035 MIB.addImm(getConstFromIntrinsic(Arg, MRI));
3036 } else {
3037 MIB.addUse(Arg);
3038 }
3039 }
3040 if (IsVoid) {
3041 LLT PtrTy = MRI->getType(Call->Arguments[0]);
3042 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
3044 PtrTy.getSizeInBytes(), Align(4));
3045 MIRBuilder.buildStore(DestReg, Call->Arguments[0], *MMO);
3046 }
3047 return true;
3048}
3049
3051 MachineIRBuilder &MIRBuilder,
3052 SPIRVGlobalRegistry *GR) {
3053 // Lookup the instruction opcode in the TableGen records.
3054 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
3055 unsigned Opcode =
3056 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
3057 bool IsLoad = Opcode == SPIRV::OpLoad;
3058 // Build the instruction.
3059 auto MIB = MIRBuilder.buildInstr(Opcode);
3060 if (IsLoad) {
3061 MIB.addDef(Call->ReturnRegister);
3062 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
3063 }
3064 // Add a pointer to the value to load/store.
3065 MIB.addUse(Call->Arguments[0]);
3066 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
3067 // Add a value to store.
3068 if (!IsLoad)
3069 MIB.addUse(Call->Arguments[1]);
3070 // Add optional memory attributes and an alignment.
3071 unsigned NumArgs = Call->Arguments.size();
3072 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
3073 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
3074 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
3075 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
3076 return true;
3077}
3078
3079namespace SPIRV {
3080// Try to find a builtin function attributes by a demangled function name and
3081// return a tuple <builtin group, op code, ext instruction number>, or a special
3082// tuple value <-1, 0, 0> if the builtin function is not found.
3083// Not all builtin functions are supported, only those with a ready-to-use op
3084// code or instruction number defined in TableGen.
3085// TODO: consider a major rework of mapping demangled calls into a builtin
3086// functions to unify search and decrease number of individual cases.
3087std::tuple<int, unsigned, unsigned>
3088mapBuiltinToOpcode(const StringRef DemangledCall,
3089 SPIRV::InstructionSet::InstructionSet Set) {
3090 Register Reg;
3092 std::unique_ptr<const IncomingCall> Call =
3093 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
3094 if (!Call)
3095 return std::make_tuple(-1, 0, 0);
3096
3097 switch (Call->Builtin->Group) {
3098 case SPIRV::Relational:
3099 case SPIRV::Atomic:
3100 case SPIRV::Barrier:
3101 case SPIRV::CastToPtr:
3102 case SPIRV::ImageMiscQuery:
3103 case SPIRV::SpecConstant:
3104 case SPIRV::Enqueue:
3105 case SPIRV::AsyncCopy:
3106 case SPIRV::LoadStore:
3107 case SPIRV::CoopMatr:
3108 if (const auto *R =
3109 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
3110 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3111 break;
3112 case SPIRV::Extended:
3113 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
3114 Call->Builtin->Set))
3115 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
3116 break;
3117 case SPIRV::VectorLoadStore:
3118 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
3119 Call->Builtin->Set))
3120 return std::make_tuple(SPIRV::Extended, 0, R->Number);
3121 break;
3122 case SPIRV::Group:
3123 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
3124 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3125 break;
3126 case SPIRV::AtomicFloating:
3127 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
3128 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3129 break;
3130 case SPIRV::IntelSubgroups:
3131 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
3132 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3133 break;
3134 case SPIRV::GroupUniform:
3135 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
3136 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3137 break;
3138 case SPIRV::IntegerDot:
3139 if (const auto *R =
3140 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
3141 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3142 break;
3143 case SPIRV::WriteImage:
3144 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3145 case SPIRV::Select:
3146 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3147 case SPIRV::Construct:
3148 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3149 0);
3150 case SPIRV::KernelClock:
3151 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3152 default:
3153 return std::make_tuple(-1, 0, 0);
3154 }
3155 return std::make_tuple(-1, 0, 0);
3156}
3157
3158std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
3159 SPIRV::InstructionSet::InstructionSet Set,
3160 MachineIRBuilder &MIRBuilder,
3161 const Register OrigRet, const Type *OrigRetTy,
3162 const SmallVectorImpl<Register> &Args,
3163 SPIRVGlobalRegistry *GR, const CallBase &CB) {
3164 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
3165
3166 // Lookup the builtin in the TableGen records.
3167 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
3168 assert(SpvType && "Inconsistent return register: expected valid type info");
3169 std::unique_ptr<const IncomingCall> Call =
3170 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
3171
3172 if (!Call) {
3173 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
3174 return std::nullopt;
3175 }
3176
3177 // TODO: check if the provided args meet the builtin requirments.
3178 assert(Args.size() >= Call->Builtin->MinNumArgs &&
3179 "Too few arguments to generate the builtin");
3180 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
3181 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
3182
3183 // Match the builtin with implementation based on the grouping.
3184 switch (Call->Builtin->Group) {
3185 case SPIRV::Extended:
3186 return generateExtInst(Call.get(), MIRBuilder, GR, CB);
3187 case SPIRV::Relational:
3188 return generateRelationalInst(Call.get(), MIRBuilder, GR);
3189 case SPIRV::Group:
3190 return generateGroupInst(Call.get(), MIRBuilder, GR);
3191 case SPIRV::Variable:
3192 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
3193 case SPIRV::Atomic:
3194 return generateAtomicInst(Call.get(), MIRBuilder, GR);
3195 case SPIRV::AtomicFloating:
3196 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
3197 case SPIRV::Barrier:
3198 return generateBarrierInst(Call.get(), MIRBuilder, GR);
3199 case SPIRV::CastToPtr:
3200 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
3201 case SPIRV::Dot:
3202 case SPIRV::IntegerDot:
3203 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
3204 case SPIRV::Wave:
3205 return generateWaveInst(Call.get(), MIRBuilder, GR);
3206 case SPIRV::ICarryBorrow:
3207 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
3208 case SPIRV::GetQuery:
3209 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
3210 case SPIRV::ImageSizeQuery:
3211 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
3212 case SPIRV::ImageMiscQuery:
3213 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
3214 case SPIRV::ReadImage:
3215 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3216 case SPIRV::WriteImage:
3217 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
3218 case SPIRV::SampleImage:
3219 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3220 case SPIRV::Select:
3221 return generateSelectInst(Call.get(), MIRBuilder);
3222 case SPIRV::Construct:
3223 return generateConstructInst(Call.get(), MIRBuilder, GR);
3224 case SPIRV::SpecConstant:
3225 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
3226 case SPIRV::Enqueue:
3227 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
3228 case SPIRV::AsyncCopy:
3229 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
3230 case SPIRV::Convert:
3231 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
3232 case SPIRV::VectorLoadStore:
3233 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
3234 case SPIRV::LoadStore:
3235 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
3236 case SPIRV::IntelSubgroups:
3237 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
3238 case SPIRV::GroupUniform:
3239 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
3240 case SPIRV::KernelClock:
3241 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
3242 case SPIRV::CoopMatr:
3243 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
3244 case SPIRV::ExtendedBitOps:
3245 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
3246 case SPIRV::BindlessINTEL:
3247 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
3248 case SPIRV::TernaryBitwiseINTEL:
3249 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
3250 case SPIRV::Block2DLoadStore:
3251 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
3252 case SPIRV::Pipe:
3253 return generatePipeInst(Call.get(), MIRBuilder, GR);
3254 case SPIRV::PredicatedLoadStore:
3255 return generatePredicatedLoadStoreInst(Call.get(), MIRBuilder, GR);
3256 case SPIRV::BlockingPipes:
3257 return generateBlockingPipesInst(Call.get(), MIRBuilder, GR);
3258 case SPIRV::ArbitraryPrecisionFixedPoint:
3259 return generateAPFixedPointInst(Call.get(), MIRBuilder, GR);
3260 case SPIRV::ImageChannelDataTypes:
3261 return generateImageChannelDataTypeInst(Call.get(), MIRBuilder, GR);
3262 case SPIRV::ArbitraryFloatingPoint:
3263 return generateAFPInst(Call.get(), MIRBuilder, GR);
3264 }
3265 return false;
3266}
3267
3269 // Parse strings representing OpenCL builtin types.
3270 if (hasBuiltinTypePrefix(TypeStr)) {
3271 // OpenCL builtin types in demangled call strings have the following format:
3272 // e.g. ocl_image2d_ro
3273 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
3274 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3275
3276 // Check if this is pointer to a builtin type and not just pointer
3277 // representing a builtin type. In case it is a pointer to builtin type,
3278 // this will require additional handling in the method calling
3279 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3280 // base types.
3281 if (TypeStr.ends_with("*"))
3282 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
3283
3284 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
3285 Ctx);
3286 }
3287
3288 // Parse type name in either "typeN" or "type vector[N]" format, where
3289 // N is the number of elements of the vector.
3290 Type *BaseType;
3291 unsigned VecElts = 0;
3292
3293 BaseType = parseBasicTypeName(TypeStr, Ctx);
3294 if (!BaseType)
3295 // Unable to recognize SPIRV type name.
3296 return nullptr;
3297
3298 // Handle "typeN*" or "type vector[N]*".
3299 TypeStr.consume_back("*");
3300
3301 if (TypeStr.consume_front(" vector["))
3302 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
3303
3304 TypeStr.getAsInteger(10, VecElts);
3305 if (VecElts > 0)
3307 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
3308
3309 return BaseType;
3310}
3311
3313 const StringRef DemangledCall, LLVMContext &Ctx) {
3314 auto Pos1 = DemangledCall.find('(');
3315 if (Pos1 == StringRef::npos)
3316 return false;
3317 auto Pos2 = DemangledCall.find(')');
3318 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3319 return false;
3320 DemangledCall.slice(Pos1 + 1, Pos2)
3321 .split(BuiltinArgsTypeStrs, ',', -1, false);
3322 return true;
3323}
3324
3326 unsigned ArgIdx, LLVMContext &Ctx) {
3327 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3328 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3329 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3330 return nullptr;
3331 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3332 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3333}
3334
3339
3340#define GET_BuiltinTypes_DECL
3341#define GET_BuiltinTypes_IMPL
3342
3347
3348#define GET_OpenCLTypes_DECL
3349#define GET_OpenCLTypes_IMPL
3350
3351#include "SPIRVGenTables.inc"
3352} // namespace SPIRV
3353
3354//===----------------------------------------------------------------------===//
3355// Misc functions for parsing builtin types.
3356//===----------------------------------------------------------------------===//
3357
3358static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3359 if (Name.starts_with("void"))
3360 return Type::getVoidTy(Context);
3361 else if (Name.starts_with("int") || Name.starts_with("uint"))
3362 return Type::getInt32Ty(Context);
3363 else if (Name.starts_with("float"))
3364 return Type::getFloatTy(Context);
3365 else if (Name.starts_with("half"))
3366 return Type::getHalfTy(Context);
3367 report_fatal_error("Unable to recognize type!");
3368}
3369
3370//===----------------------------------------------------------------------===//
3371// Implementation functions for builtin types.
3372//===----------------------------------------------------------------------===//
3373
3375 const SPIRV::BuiltinType *TypeRecord,
3376 MachineIRBuilder &MIRBuilder,
3377 SPIRVGlobalRegistry *GR) {
3378 unsigned Opcode = TypeRecord->Opcode;
3379 // Create or get an existing type from GlobalRegistry.
3380 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3381}
3382
3384 SPIRVGlobalRegistry *GR) {
3385 // Create or get an existing type from GlobalRegistry.
3386 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3387}
3388
3389static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3390 MachineIRBuilder &MIRBuilder,
3391 SPIRVGlobalRegistry *GR) {
3392 assert(ExtensionType->getNumIntParameters() == 1 &&
3393 "Invalid number of parameters for SPIR-V pipe builtin!");
3394 // Create or get an existing type from GlobalRegistry.
3395 return GR->getOrCreateOpTypePipe(MIRBuilder,
3396 SPIRV::AccessQualifier::AccessQualifier(
3397 ExtensionType->getIntParameter(0)));
3398}
3399
3400static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3401 MachineIRBuilder &MIRBuilder,
3402 SPIRVGlobalRegistry *GR) {
3403 assert(ExtensionType->getNumIntParameters() == 4 &&
3404 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3405 assert(ExtensionType->getNumTypeParameters() == 1 &&
3406 "SPIR-V coop matrices builtin type must have a type parameter!");
3407 const SPIRVType *ElemType =
3408 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3409 SPIRV::AccessQualifier::ReadWrite, true);
3410 // Create or get an existing type from GlobalRegistry.
3411 return GR->getOrCreateOpTypeCoopMatr(
3412 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3413 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3414 ExtensionType->getIntParameter(3), true);
3415}
3416
3418 MachineIRBuilder &MIRBuilder,
3419 SPIRVGlobalRegistry *GR) {
3420 SPIRVType *OpaqueImageType = GR->getImageType(
3421 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3422 // Create or get an existing type from GlobalRegistry.
3423 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3424}
3425
3426static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3427 MachineIRBuilder &MIRBuilder,
3428 SPIRVGlobalRegistry *GR) {
3429 assert(ExtensionType->getNumIntParameters() == 3 &&
3430 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3431 "parameter");
3432 auto Opcode = ExtensionType->getIntParameter(0);
3433
3434 SmallVector<MCOperand> Operands;
3435 for (Type *Param : ExtensionType->type_params()) {
3436 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3437 if (ParamEType->getName() == "spirv.IntegralConstant") {
3438 assert(ParamEType->getNumTypeParameters() == 1 &&
3439 "Inline SPIR-V integral constant builtin must have a type "
3440 "parameter");
3441 assert(ParamEType->getNumIntParameters() == 1 &&
3442 "Inline SPIR-V integral constant builtin must have a "
3443 "value parameter");
3444
3445 auto OperandValue = ParamEType->getIntParameter(0);
3446 auto *OperandType = ParamEType->getTypeParameter(0);
3447
3448 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3449 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3450
3452 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3453 continue;
3454 } else if (ParamEType->getName() == "spirv.Literal") {
3455 assert(ParamEType->getNumTypeParameters() == 0 &&
3456 "Inline SPIR-V literal builtin does not take type "
3457 "parameters");
3458 assert(ParamEType->getNumIntParameters() == 1 &&
3459 "Inline SPIR-V literal builtin must have an integer "
3460 "parameter");
3461
3462 auto OperandValue = ParamEType->getIntParameter(0);
3463
3464 Operands.push_back(MCOperand::createImm(OperandValue));
3465 continue;
3466 }
3467 }
3468 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3469 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3470 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3471 }
3472
3473 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3474 Operands);
3475}
3476
3477static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3478 MachineIRBuilder &MIRBuilder,
3479 SPIRVGlobalRegistry *GR) {
3480 assert(ExtensionType->getNumTypeParameters() == 1 &&
3481 "Vulkan buffers have exactly one type for the type of the buffer.");
3482 assert(ExtensionType->getNumIntParameters() == 2 &&
3483 "Vulkan buffer have 2 integer parameters: storage class and is "
3484 "writable.");
3485
3486 auto *T = ExtensionType->getTypeParameter(0);
3487 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3488 ExtensionType->getIntParameter(0));
3489 bool IsWritable = ExtensionType->getIntParameter(1);
3490 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3491}
3492
3494 MachineIRBuilder &MIRBuilder,
3495 SPIRVGlobalRegistry *GR) {
3496 assert(ExtensionType->getNumTypeParameters() == 1 &&
3497 "Vulkan push constants have exactly one type as argument.");
3498 auto *T = ExtensionType->getTypeParameter(0);
3499 return GR->getOrCreateVulkanPushConstantType(MIRBuilder, T);
3500}
3501
3502static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3503 MachineIRBuilder &MIRBuilder,
3504 SPIRVGlobalRegistry *GR) {
3505 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3506}
3507
3508namespace SPIRV {
3510 LLVMContext &Context) {
3511 StringRef NameWithParameters = TypeName;
3512
3513 // Pointers-to-opaque-structs representing OpenCL types are first translated
3514 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3515 // following format: e.g. %opencl.event_t
3516 if (NameWithParameters.starts_with("opencl.")) {
3517 const SPIRV::OpenCLType *OCLTypeRecord =
3518 SPIRV::lookupOpenCLType(NameWithParameters);
3519 if (!OCLTypeRecord)
3520 report_fatal_error("Missing TableGen record for OpenCL type: " +
3521 NameWithParameters);
3522 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3523 // Continue with the SPIR-V builtin type...
3524 }
3525
3526 // Names of the opaque structs representing a SPIR-V builtins without
3527 // parameters should have the following format: e.g. %spirv.Event
3528 assert(NameWithParameters.starts_with("spirv.") &&
3529 "Unknown builtin opaque type!");
3530
3531 // Parameterized SPIR-V builtins names follow this format:
3532 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3533 if (!NameWithParameters.contains('_'))
3534 return TargetExtType::get(Context, NameWithParameters);
3535
3536 SmallVector<StringRef> Parameters;
3537 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3538 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3539
3540 SmallVector<Type *, 1> TypeParameters;
3541 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3542 if (HasTypeParameter)
3543 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3544 SmallVector<unsigned> IntParameters;
3545 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3546 unsigned IntParameter = 0;
3547 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3548 (void)ValidLiteral;
3549 assert(ValidLiteral &&
3550 "Invalid format of SPIR-V builtin parameter literal!");
3551 IntParameters.push_back(IntParameter);
3552 }
3553 return TargetExtType::get(Context,
3554 NameWithParameters.substr(0, BaseNameLength),
3555 TypeParameters, IntParameters);
3556}
3557
3559 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3560 MachineIRBuilder &MIRBuilder,
3561 SPIRVGlobalRegistry *GR) {
3562 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3563 // target(...) target extension types or pointers-to-opaque-structs. The
3564 // approach relying on structs is deprecated and works only in the non-opaque
3565 // pointer mode (-opaque-pointers=0).
3566 // In order to maintain compatibility with LLVM IR generated by older versions
3567 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3568 // "translated" to target extension types. This translation is temporary and
3569 // will be removed in the future release of LLVM.
3571 if (!BuiltinType)
3573 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3574
3575 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3576
3577 const StringRef Name = BuiltinType->getName();
3578 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3579
3580 SPIRVType *TargetType;
3581 if (Name == "spirv.Type") {
3582 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3583 } else if (Name == "spirv.VulkanBuffer") {
3584 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3585 } else if (Name == "spirv.Padding") {
3586 TargetType = GR->getOrCreatePaddingType(MIRBuilder);
3587 } else if (Name == "spirv.PushConstant") {
3588 TargetType = getVulkanPushConstantType(BuiltinType, MIRBuilder, GR);
3589 } else if (Name == "spirv.Layout") {
3590 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3591 } else {
3592 // Lookup the demangled builtin type in the TableGen records.
3593 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3594 if (!TypeRecord)
3595 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3596
3597 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3598 // methods use the implementation details from TableGen records or
3599 // TargetExtType parameters to either create a new OpType<...> machine
3600 // instruction or get an existing equivalent SPIRVType from
3601 // GlobalRegistry.
3602
3603 switch (TypeRecord->Opcode) {
3604 case SPIRV::OpTypeImage:
3605 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3606 break;
3607 case SPIRV::OpTypePipe:
3608 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3609 break;
3610 case SPIRV::OpTypeDeviceEvent:
3611 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3612 break;
3613 case SPIRV::OpTypeSampler:
3614 TargetType = getSamplerType(MIRBuilder, GR);
3615 break;
3616 case SPIRV::OpTypeSampledImage:
3617 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3618 break;
3619 case SPIRV::OpTypeCooperativeMatrixKHR:
3620 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3621 break;
3622 default:
3623 TargetType =
3624 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3625 break;
3626 }
3627 }
3628
3629 // Emit OpName instruction if a new OpType<...> instruction was added
3630 // (equivalent type was not found in GlobalRegistry).
3631 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3632 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3633
3634 return TargetType;
3635}
3636} // namespace SPIRV
3637} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
APInt bitcastToAPInt() const
Definition APFloat.h:1416
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1142
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1549
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
const APFloat & getValueAPF() const
Definition Constants.h:325
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Tagged union holding either a T or a Error.
Definition Error.h:485
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateVulkanPushConstantType(MachineIRBuilder &MIRBuilder, Type *ElemType)
SPIRVType * getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:712
static constexpr size_t npos
Definition StringRef.h:57
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:667
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:438
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:696
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:426
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition StringRef.h:376
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:293
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
bool consume_front(char Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition Core.cpp:883
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
Definition Types.h:26
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition SPIRVUtils.h:551
void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for assigning SPIRVType to a register, ensuring the register class and type ...
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition SPIRVUtils.h:549
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:244
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildImageChannelDataTypeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getVulkanPushConstantType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:163
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:229
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SmallVector< Register > getBuiltinCallArguments(const SPIRV::IncomingCall *Call, uint32_t BuiltinNumber, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1915
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAFPInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode