LLVM 22.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 std::string BuiltinName;
190 // Itanium Demangler result may have "(anonymous namespace)::" prefix
191 if (DemangledCall.starts_with(PassPrefix))
192 BuiltinName = DemangledCall.substr(PassPrefix.size());
193 else
194 BuiltinName = DemangledCall;
195 // Extract the builtin function name and types of arguments from the call
196 // skeleton.
197 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
198
199 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
200 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
201 BuiltinName = BuiltinName.substr(12);
202
203 // Check if the extracted name contains type information between angle
204 // brackets. If so, the builtin is an instantiated template - needs to have
205 // the information after angle brackets and return type removed.
206 std::size_t Pos1 = BuiltinName.rfind('<');
207 if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
208 std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
209 if (Pos2 == std::string::npos)
210 Pos2 = 0;
211 else
212 ++Pos2;
213 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
214 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
215 }
216
217 // Check if the extracted name begins with:
218 // - "__spirv_ImageSampleExplicitLod"
219 // - "__spirv_ImageRead"
220 // - "__spirv_ImageWrite"
221 // - "__spirv_ImageQuerySizeLod"
222 // - "__spirv_UDotKHR"
223 // - "__spirv_SDotKHR"
224 // - "__spirv_SUDotKHR"
225 // - "__spirv_SDotAccSatKHR"
226 // - "__spirv_UDotAccSatKHR"
227 // - "__spirv_SUDotAccSatKHR"
228 // - "__spirv_ReadClockKHR"
229 // - "__spirv_SubgroupBlockReadINTEL"
230 // - "__spirv_SubgroupImageBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
232 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
233 // - "__spirv_Convert"
234 // - "__spirv_Round"
235 // - "__spirv_UConvert"
236 // - "__spirv_SConvert"
237 // - "__spirv_FConvert"
238 // - "__spirv_SatConvert"
239 // and maybe contains return type information at the end "_R<type>".
240 // If so, extract the plain builtin name without the type information.
241 static const std::regex SpvWithR(
242 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
243 "UDotKHR|"
244 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
245 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
246 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
247 "Convert|Round|"
248 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
249 std::smatch Match;
250 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
251 std::ssub_match SubMatch;
252 if (DecorationId && Match.size() > 3) {
253 SubMatch = Match[4];
254 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
255 }
256 SubMatch = Match[1];
257 BuiltinName = SubMatch.str();
258 }
259
260 return BuiltinName;
261}
262} // namespace SPIRV
263
264/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
265/// the provided \p DemangledCall and specified \p Set.
266///
267/// The lookup follows the following algorithm, returning the first successful
268/// match:
269/// 1. Search with the plain demangled name (expecting a 1:1 match).
270/// 2. Search with the prefix before or suffix after the demangled name
271/// signyfying the type of the first argument.
272///
273/// \returns Wrapper around the demangled call and found builtin definition.
274static std::unique_ptr<const SPIRV::IncomingCall>
276 SPIRV::InstructionSet::InstructionSet Set,
277 Register ReturnRegister, const SPIRVType *ReturnType,
279 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
280
281 SmallVector<StringRef, 10> BuiltinArgumentTypes;
282 StringRef BuiltinArgs =
283 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
284 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
285
286 // Look up the builtin in the defined set. Start with the plain demangled
287 // name, expecting a 1:1 match in the defined builtin set.
288 const SPIRV::DemangledBuiltin *Builtin;
289 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
290 return std::make_unique<SPIRV::IncomingCall>(
291 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
292
293 // If the initial look up was unsuccessful and the demangled call takes at
294 // least 1 argument, add a prefix or suffix signifying the type of the first
295 // argument and repeat the search.
296 if (BuiltinArgumentTypes.size() >= 1) {
297 char FirstArgumentType = BuiltinArgumentTypes[0][0];
298 // Prefix to be added to the builtin's name for lookup.
299 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
300 std::string Prefix;
301
302 switch (FirstArgumentType) {
303 // Unsigned:
304 case 'u':
305 if (Set == SPIRV::InstructionSet::OpenCL_std)
306 Prefix = "u_";
307 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
308 Prefix = "u";
309 break;
310 // Signed:
311 case 'c':
312 case 's':
313 case 'i':
314 case 'l':
315 if (Set == SPIRV::InstructionSet::OpenCL_std)
316 Prefix = "s_";
317 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
318 Prefix = "s";
319 break;
320 // Floating-point:
321 case 'f':
322 case 'd':
323 case 'h':
324 if (Set == SPIRV::InstructionSet::OpenCL_std ||
325 Set == SPIRV::InstructionSet::GLSL_std_450)
326 Prefix = "f";
327 break;
328 }
329
330 // If argument-type name prefix was added, look up the builtin again.
331 if (!Prefix.empty() &&
332 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
333 return std::make_unique<SPIRV::IncomingCall>(
334 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
335
336 // If lookup with a prefix failed, find a suffix to be added to the
337 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
338 // an unsigned value has a suffix "u".
339 std::string Suffix;
340
341 switch (FirstArgumentType) {
342 // Unsigned:
343 case 'u':
344 Suffix = "u";
345 break;
346 // Signed:
347 case 'c':
348 case 's':
349 case 'i':
350 case 'l':
351 Suffix = "s";
352 break;
353 // Floating-point:
354 case 'f':
355 case 'd':
356 case 'h':
357 Suffix = "f";
358 break;
359 }
360
361 // If argument-type name suffix was added, look up the builtin again.
362 if (!Suffix.empty() &&
363 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
364 return std::make_unique<SPIRV::IncomingCall>(
365 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
366 }
367
368 // No builtin with such name was found in the set.
369 return nullptr;
370}
371
374 // We expect the following sequence of instructions:
375 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
376 // or = G_GLOBAL_VALUE @block_literal_global
377 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
378 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
379 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
380 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
381 MI->getOperand(1).isReg());
382 Register BitcastReg = MI->getOperand(1).getReg();
383 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
384 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
385 BitcastMI->getOperand(2).isReg());
386 Register ValueReg = BitcastMI->getOperand(2).getReg();
387 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
388 return ValueMI;
389}
390
391// Return an integer constant corresponding to the given register and
392// defined in spv_track_constant.
393// TODO: maybe unify with prelegalizer pass.
395 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
396 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
397 DefMI->getOperand(1).isCImm());
398 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
399}
400
401// Return type of the instruction result from spv_assign_type intrinsic.
402// TODO: maybe unify with prelegalizer pass.
404 MachineInstr *NextMI = MI->getNextNode();
405 if (!NextMI)
406 return nullptr;
407 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
408 if ((NextMI = NextMI->getNextNode()) == nullptr)
409 return nullptr;
410 Register ValueReg = MI->getOperand(0).getReg();
411 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
412 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
413 NextMI->getOperand(1).getReg() != ValueReg)
414 return nullptr;
415 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
416 assert(Ty && "Type is expected");
417 return Ty;
418}
419
420static const Type *getBlockStructType(Register ParamReg,
422 // In principle, this information should be passed to us from Clang via
423 // an elementtype attribute. However, said attribute requires that
424 // the function call be an intrinsic, which is not. Instead, we rely on being
425 // able to trace this to the declaration of a variable: OpenCL C specification
426 // section 6.12.5 should guarantee that we can do this.
428 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
429 return MI->getOperand(1).getGlobal()->getType();
430 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
431 "Blocks in OpenCL C must be traceable to allocation site");
432 return getMachineInstrType(MI);
433}
434
435//===----------------------------------------------------------------------===//
436// Helper functions for building misc instructions
437//===----------------------------------------------------------------------===//
438
439/// Helper function building either a resulting scalar or vector bool register
440/// depending on the expected \p ResultType.
441///
442/// \returns Tuple of the resulting register and its type.
443static std::tuple<Register, SPIRVType *>
444buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
446 LLT Type;
447 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
448
449 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
450 unsigned VectorElements = ResultType->getOperand(2).getImm();
451 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
452 MIRBuilder, true);
455 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
456 } else {
457 Type = LLT::scalar(1);
458 }
459
460 Register ResultRegister =
462 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
463 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
464 return std::make_tuple(ResultRegister, BoolType);
465}
466
467/// Helper function for building either a vector or scalar select instruction
468/// depending on the expected \p ResultType.
469static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
470 Register ReturnRegister, Register SourceRegister,
471 const SPIRVType *ReturnType,
473 Register TrueConst, FalseConst;
474
475 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
476 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
478 TrueConst =
479 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
480 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
481 } else {
482 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
483 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
484 }
485
486 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
487 FalseConst);
488}
489
490/// Helper function for building a load instruction loading into the
491/// \p DestinationReg.
493 MachineIRBuilder &MIRBuilder,
494 SPIRVGlobalRegistry *GR, LLT LowLevelType,
495 Register DestinationReg = Register(0)) {
496 if (!DestinationReg.isValid())
497 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
498 // TODO: consider using correct address space and alignment (p0 is canonical
499 // type for selection though).
501 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
502 return DestinationReg;
503}
504
505/// Helper function for building a load instruction for loading a builtin global
506/// variable of \p BuiltinValue value.
508 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
509 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
510 Register Reg = Register(0), bool isConst = true,
511 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
512 SPIRV::LinkageType::Import}) {
513 Register NewRegister =
514 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
515 MIRBuilder.getMRI()->setType(
516 NewRegister,
517 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
518 GR->getPointerSize()));
520 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
521 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
522
523 // Set up the global OpVariable with the necessary builtin decorations.
524 Register Variable = GR->buildGlobalVariable(
525 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
526 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst, LinkageTy,
527 MIRBuilder, false);
528
529 // Load the value from the global variable.
530 Register LoadedRegister =
531 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
532 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
533 return LoadedRegister;
534}
535
536/// Helper external function for assigning SPIRVType to a register, ensuring the
537/// register class and type are set in MRI. Defined in SPIRVPreLegalizer.cpp.
538extern void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy,
541
542// TODO: Move to TableGen.
543static SPIRV::MemorySemantics::MemorySemantics
544getSPIRVMemSemantics(std::memory_order MemOrder) {
545 switch (MemOrder) {
546 case std::memory_order_relaxed:
547 return SPIRV::MemorySemantics::None;
548 case std::memory_order_acquire:
549 return SPIRV::MemorySemantics::Acquire;
550 case std::memory_order_release:
551 return SPIRV::MemorySemantics::Release;
552 case std::memory_order_acq_rel:
553 return SPIRV::MemorySemantics::AcquireRelease;
554 case std::memory_order_seq_cst:
555 return SPIRV::MemorySemantics::SequentiallyConsistent;
556 default:
557 report_fatal_error("Unknown CL memory scope");
558 }
559}
560
561static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
562 switch (ClScope) {
563 case SPIRV::CLMemoryScope::memory_scope_work_item:
564 return SPIRV::Scope::Invocation;
565 case SPIRV::CLMemoryScope::memory_scope_work_group:
566 return SPIRV::Scope::Workgroup;
567 case SPIRV::CLMemoryScope::memory_scope_device:
568 return SPIRV::Scope::Device;
569 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
570 return SPIRV::Scope::CrossDevice;
571 case SPIRV::CLMemoryScope::memory_scope_sub_group:
572 return SPIRV::Scope::Subgroup;
573 }
574 report_fatal_error("Unknown CL memory scope");
575}
576
578 MachineIRBuilder &MIRBuilder,
580 return GR->buildConstantInt(
581 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
582}
583
584static Register buildScopeReg(Register CLScopeRegister,
585 SPIRV::Scope::Scope Scope,
586 MachineIRBuilder &MIRBuilder,
589 if (CLScopeRegister.isValid()) {
590 auto CLScope =
591 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
592 Scope = getSPIRVScope(CLScope);
593
594 if (CLScope == static_cast<unsigned>(Scope)) {
595 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
596 return CLScopeRegister;
597 }
598 }
599 return buildConstantIntReg32(Scope, MIRBuilder, GR);
600}
601
604 if (MRI->getRegClassOrNull(Reg))
605 return;
606 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
607 MRI->setRegClass(Reg,
608 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
609}
610
611static Register buildMemSemanticsReg(Register SemanticsRegister,
612 Register PtrRegister, unsigned &Semantics,
613 MachineIRBuilder &MIRBuilder,
615 if (SemanticsRegister.isValid()) {
616 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
617 std::memory_order Order =
618 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
619 Semantics =
620 getSPIRVMemSemantics(Order) |
622 if (static_cast<unsigned>(Order) == Semantics) {
623 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
624 return SemanticsRegister;
625 }
626 }
627 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
628}
629
630static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
632 Register TypeReg,
633 ArrayRef<uint32_t> ImmArgs = {}) {
634 auto MIB = MIRBuilder.buildInstr(Opcode);
635 if (TypeReg.isValid())
636 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
637 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
638 for (unsigned i = 0; i < Sz; ++i)
639 MIB.addUse(Call->Arguments[i]);
640 for (uint32_t ImmArg : ImmArgs)
641 MIB.addImm(ImmArg);
642 return true;
643}
644
645/// Helper function for translating atomic init to OpStore.
647 MachineIRBuilder &MIRBuilder) {
648 if (Call->isSpirvOp())
649 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
650
651 assert(Call->Arguments.size() == 2 &&
652 "Need 2 arguments for atomic init translation");
653 MIRBuilder.buildInstr(SPIRV::OpStore)
654 .addUse(Call->Arguments[0])
655 .addUse(Call->Arguments[1]);
656 return true;
657}
658
659/// Helper function for building an atomic load instruction.
661 MachineIRBuilder &MIRBuilder,
663 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
664 if (Call->isSpirvOp())
665 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
666
667 Register PtrRegister = Call->Arguments[0];
668 // TODO: if true insert call to __translate_ocl_memory_sccope before
669 // OpAtomicLoad and the function implementation. We can use Translator's
670 // output for transcoding/atomic_explicit_arguments.cl as an example.
671 Register ScopeRegister =
672 Call->Arguments.size() > 1
673 ? Call->Arguments[1]
674 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
675 Register MemSemanticsReg;
676 if (Call->Arguments.size() > 2) {
677 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
678 MemSemanticsReg = Call->Arguments[2];
679 } else {
680 int Semantics =
681 SPIRV::MemorySemantics::SequentiallyConsistent |
683 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
684 }
685
686 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
687 .addDef(Call->ReturnRegister)
688 .addUse(TypeReg)
689 .addUse(PtrRegister)
690 .addUse(ScopeRegister)
691 .addUse(MemSemanticsReg);
692 return true;
693}
694
695/// Helper function for building an atomic store instruction.
697 MachineIRBuilder &MIRBuilder,
699 if (Call->isSpirvOp())
700 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
701 Register(0));
702
703 Register ScopeRegister =
704 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
705 Register PtrRegister = Call->Arguments[0];
706 int Semantics =
707 SPIRV::MemorySemantics::SequentiallyConsistent |
709 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
710 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
711 .addUse(PtrRegister)
712 .addUse(ScopeRegister)
713 .addUse(MemSemanticsReg)
714 .addUse(Call->Arguments[1]);
715 return true;
716}
717
718/// Helper function for building an atomic compare-exchange instruction.
720 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
721 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
722 if (Call->isSpirvOp())
723 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
724 GR->getSPIRVTypeID(Call->ReturnType));
725
726 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
727 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
728
729 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
730 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
731 Register Desired = Call->Arguments[2]; // Value (C Desired).
732 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
733 LLT DesiredLLT = MRI->getType(Desired);
734
735 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
736 SPIRV::OpTypePointer);
737 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
738 (void)ExpectedType;
739 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
740 : ExpectedType == SPIRV::OpTypePointer);
741 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
742
743 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
744 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
745 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
746 SpvObjectPtrTy->getOperand(1).getImm());
747 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
748
749 Register MemSemEqualReg;
750 Register MemSemUnequalReg;
751 uint64_t MemSemEqual =
752 IsCmpxchg
753 ? SPIRV::MemorySemantics::None
754 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
755 uint64_t MemSemUnequal =
756 IsCmpxchg
757 ? SPIRV::MemorySemantics::None
758 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
759 if (Call->Arguments.size() >= 4) {
760 assert(Call->Arguments.size() >= 5 &&
761 "Need 5+ args for explicit atomic cmpxchg");
762 auto MemOrdEq =
763 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
764 auto MemOrdNeq =
765 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
766 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
767 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
768 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
769 MemSemEqualReg = Call->Arguments[3];
770 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
771 MemSemUnequalReg = Call->Arguments[4];
772 }
773 if (!MemSemEqualReg.isValid())
774 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
775 if (!MemSemUnequalReg.isValid())
776 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
777
778 Register ScopeReg;
779 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
780 if (Call->Arguments.size() >= 6) {
781 assert(Call->Arguments.size() == 6 &&
782 "Extra args for explicit atomic cmpxchg");
783 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
784 getIConstVal(Call->Arguments[5], MRI));
785 Scope = getSPIRVScope(ClScope);
786 if (ClScope == static_cast<unsigned>(Scope))
787 ScopeReg = Call->Arguments[5];
788 }
789 if (!ScopeReg.isValid())
790 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
791
792 Register Expected = IsCmpxchg
793 ? ExpectedArg
794 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
795 GR, LLT::scalar(64));
796 MRI->setType(Expected, DesiredLLT);
797 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
798 : Call->ReturnRegister;
799 if (!MRI->getRegClassOrNull(Tmp))
800 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
801 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
802
803 MIRBuilder.buildInstr(Opcode)
804 .addDef(Tmp)
805 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
806 .addUse(ObjectPtr)
807 .addUse(ScopeReg)
808 .addUse(MemSemEqualReg)
809 .addUse(MemSemUnequalReg)
810 .addUse(Desired)
812 if (!IsCmpxchg) {
813 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
814 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
815 }
816 return true;
817}
818
819/// Helper function for building atomic instructions.
820static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
821 MachineIRBuilder &MIRBuilder,
823 if (Call->isSpirvOp())
824 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
825 GR->getSPIRVTypeID(Call->ReturnType));
826
827 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
828 Register ScopeRegister =
829 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
830
831 assert(Call->Arguments.size() <= 4 &&
832 "Too many args for explicit atomic RMW");
833 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
834 MIRBuilder, GR, MRI);
835
836 Register PtrRegister = Call->Arguments[0];
837 unsigned Semantics = SPIRV::MemorySemantics::None;
838 Register MemSemanticsReg =
839 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
840 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
841 Semantics, MIRBuilder, GR);
842 Register ValueReg = Call->Arguments[1];
843 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
844 // support cl_ext_float_atomics
845 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
846 if (Opcode == SPIRV::OpAtomicIAdd) {
847 Opcode = SPIRV::OpAtomicFAddEXT;
848 } else if (Opcode == SPIRV::OpAtomicISub) {
849 // Translate OpAtomicISub applied to a floating type argument to
850 // OpAtomicFAddEXT with the negative value operand
851 Opcode = SPIRV::OpAtomicFAddEXT;
852 Register NegValueReg =
853 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
854 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
855 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
856 MIRBuilder.getMF());
857 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
858 .addDef(NegValueReg)
859 .addUse(ValueReg);
860 updateRegType(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
861 MIRBuilder.getMF().getRegInfo());
862 ValueReg = NegValueReg;
863 }
864 }
865 MIRBuilder.buildInstr(Opcode)
866 .addDef(Call->ReturnRegister)
867 .addUse(ValueTypeReg)
868 .addUse(PtrRegister)
869 .addUse(ScopeRegister)
870 .addUse(MemSemanticsReg)
871 .addUse(ValueReg);
872 return true;
873}
874
875/// Helper function for building an atomic floating-type instruction.
877 unsigned Opcode,
878 MachineIRBuilder &MIRBuilder,
880 assert(Call->Arguments.size() == 4 &&
881 "Wrong number of atomic floating-type builtin");
882 Register PtrReg = Call->Arguments[0];
883 Register ScopeReg = Call->Arguments[1];
884 Register MemSemanticsReg = Call->Arguments[2];
885 Register ValueReg = Call->Arguments[3];
886 MIRBuilder.buildInstr(Opcode)
887 .addDef(Call->ReturnRegister)
888 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
889 .addUse(PtrReg)
890 .addUse(ScopeReg)
891 .addUse(MemSemanticsReg)
892 .addUse(ValueReg);
893 return true;
894}
895
896/// Helper function for building atomic flag instructions (e.g.
897/// OpAtomicFlagTestAndSet).
899 unsigned Opcode, MachineIRBuilder &MIRBuilder,
901 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
902 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
903 if (Call->isSpirvOp())
904 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
905 IsSet ? TypeReg : Register(0));
906
907 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
908 Register PtrRegister = Call->Arguments[0];
909 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
910 Register MemSemanticsReg =
911 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
912 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
913 Semantics, MIRBuilder, GR);
914
915 assert((Opcode != SPIRV::OpAtomicFlagClear ||
916 (Semantics != SPIRV::MemorySemantics::Acquire &&
917 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
918 "Invalid memory order argument!");
919
920 Register ScopeRegister =
921 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
922 ScopeRegister =
923 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
924
925 auto MIB = MIRBuilder.buildInstr(Opcode);
926 if (IsSet)
927 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
928
929 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
930 return true;
931}
932
933/// Helper function for building barriers, i.e., memory/control ordering
934/// operations.
935static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
936 MachineIRBuilder &MIRBuilder,
938 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
939 const auto *ST =
940 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
941 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
942 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
943 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
944 std::string DiagMsg = std::string(Builtin->Name) +
945 ": the builtin requires the following SPIR-V "
946 "extension: SPV_INTEL_split_barrier";
947 report_fatal_error(DiagMsg.c_str(), false);
948 }
949
950 if (Call->isSpirvOp())
951 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
952
953 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
954 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
955 unsigned MemSemantics = SPIRV::MemorySemantics::None;
956
957 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
958 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
959
960 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
961 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
962
963 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
964 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
965
966 if (Opcode == SPIRV::OpMemoryBarrier)
967 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
968 getIConstVal(Call->Arguments[1], MRI))) |
969 MemSemantics;
970 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
971 MemSemantics |= SPIRV::MemorySemantics::Release;
972 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
973 MemSemantics |= SPIRV::MemorySemantics::Acquire;
974 else
975 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
976
977 Register MemSemanticsReg =
978 MemFlags == MemSemantics
979 ? Call->Arguments[0]
980 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
981 Register ScopeReg;
982 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
983 SPIRV::Scope::Scope MemScope = Scope;
984 if (Call->Arguments.size() >= 2) {
985 assert(
986 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
987 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
988 "Extra args for explicitly scoped barrier");
989 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
990 : Call->Arguments[1];
991 SPIRV::CLMemoryScope CLScope =
992 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
993 MemScope = getSPIRVScope(CLScope);
994 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
995 (Opcode == SPIRV::OpMemoryBarrier))
996 Scope = MemScope;
997 if (CLScope == static_cast<unsigned>(Scope))
998 ScopeReg = Call->Arguments[1];
999 }
1000
1001 if (!ScopeReg.isValid())
1002 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1003
1004 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1005 if (Opcode != SPIRV::OpMemoryBarrier)
1006 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1007 MIB.addUse(MemSemanticsReg);
1008 return true;
1009}
1010
1011/// Helper function for building extended bit operations.
1013 unsigned Opcode,
1014 MachineIRBuilder &MIRBuilder,
1015 SPIRVGlobalRegistry *GR) {
1016 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1017 const auto *ST =
1018 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1019 if ((Opcode == SPIRV::OpBitFieldInsert ||
1020 Opcode == SPIRV::OpBitFieldSExtract ||
1021 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1022 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1023 std::string DiagMsg = std::string(Builtin->Name) +
1024 ": the builtin requires the following SPIR-V "
1025 "extension: SPV_KHR_bit_instructions";
1026 report_fatal_error(DiagMsg.c_str(), false);
1027 }
1028
1029 // Generate SPIRV instruction accordingly.
1030 if (Call->isSpirvOp())
1031 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1032 GR->getSPIRVTypeID(Call->ReturnType));
1033
1034 auto MIB = MIRBuilder.buildInstr(Opcode)
1035 .addDef(Call->ReturnRegister)
1036 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1037 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1038 MIB.addUse(Call->Arguments[i]);
1039
1040 return true;
1041}
1042
1043/// Helper function for building Intel's bindless image instructions.
1045 unsigned Opcode,
1046 MachineIRBuilder &MIRBuilder,
1047 SPIRVGlobalRegistry *GR) {
1048 // Generate SPIRV instruction accordingly.
1049 if (Call->isSpirvOp())
1050 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1051 GR->getSPIRVTypeID(Call->ReturnType));
1052
1053 MIRBuilder.buildInstr(Opcode)
1054 .addDef(Call->ReturnRegister)
1055 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1056 .addUse(Call->Arguments[0]);
1057
1058 return true;
1059}
1060
1061/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1063 const SPIRV::IncomingCall *Call, unsigned Opcode,
1064 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1065 // Generate SPIRV instruction accordingly.
1066 if (Call->isSpirvOp())
1067 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1068 GR->getSPIRVTypeID(Call->ReturnType));
1069
1070 auto MIB = MIRBuilder.buildInstr(Opcode)
1071 .addDef(Call->ReturnRegister)
1072 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1073 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1074 MIB.addUse(Call->Arguments[i]);
1075
1076 return true;
1077}
1078
1079/// Helper function for building Intel's 2d block io instructions.
1081 unsigned Opcode,
1082 MachineIRBuilder &MIRBuilder,
1083 SPIRVGlobalRegistry *GR) {
1084 // Generate SPIRV instruction accordingly.
1085 if (Call->isSpirvOp())
1086 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1087
1088 auto MIB = MIRBuilder.buildInstr(Opcode)
1089 .addDef(Call->ReturnRegister)
1090 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1091 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1092 MIB.addUse(Call->Arguments[i]);
1093
1094 return true;
1095}
1096
1097static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1098 unsigned Scope, MachineIRBuilder &MIRBuilder,
1099 SPIRVGlobalRegistry *GR) {
1100 switch (Opcode) {
1101 case SPIRV::OpCommitReadPipe:
1102 case SPIRV::OpCommitWritePipe:
1103 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1104 case SPIRV::OpGroupCommitReadPipe:
1105 case SPIRV::OpGroupCommitWritePipe:
1106 case SPIRV::OpGroupReserveReadPipePackets:
1107 case SPIRV::OpGroupReserveWritePipePackets: {
1108 Register ScopeConstReg =
1109 MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
1110 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1111 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1113 MIB = MIRBuilder.buildInstr(Opcode);
1114 // Add Return register and type.
1115 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1116 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1117 MIB.addDef(Call->ReturnRegister)
1118 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1119
1120 MIB.addUse(ScopeConstReg);
1121 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1122 MIB.addUse(Call->Arguments[i]);
1123
1124 return true;
1125 }
1126 default:
1127 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1128 GR->getSPIRVTypeID(Call->ReturnType));
1129 }
1130}
1131
1132static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1133 switch (dim) {
1134 case SPIRV::Dim::DIM_1D:
1135 case SPIRV::Dim::DIM_Buffer:
1136 return 1;
1137 case SPIRV::Dim::DIM_2D:
1138 case SPIRV::Dim::DIM_Cube:
1139 case SPIRV::Dim::DIM_Rect:
1140 return 2;
1141 case SPIRV::Dim::DIM_3D:
1142 return 3;
1143 default:
1144 report_fatal_error("Cannot get num components for given Dim");
1145 }
1146}
1147
1148/// Helper function for obtaining the number of size components.
1149static unsigned getNumSizeComponents(SPIRVType *imgType) {
1150 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1151 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1152 unsigned numComps = getNumComponentsForDim(dim);
1153 bool arrayed = imgType->getOperand(4).getImm() == 1;
1154 return arrayed ? numComps + 1 : numComps;
1155}
1156
1157static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber) {
1158 switch (BuiltinNumber) {
1159 case SPIRV::OpenCLExtInst::s_min:
1160 case SPIRV::OpenCLExtInst::u_min:
1161 case SPIRV::OpenCLExtInst::s_max:
1162 case SPIRV::OpenCLExtInst::u_max:
1163 case SPIRV::OpenCLExtInst::fmax:
1164 case SPIRV::OpenCLExtInst::fmin:
1165 case SPIRV::OpenCLExtInst::fmax_common:
1166 case SPIRV::OpenCLExtInst::fmin_common:
1167 case SPIRV::OpenCLExtInst::s_clamp:
1168 case SPIRV::OpenCLExtInst::fclamp:
1169 case SPIRV::OpenCLExtInst::u_clamp:
1170 case SPIRV::OpenCLExtInst::mix:
1171 case SPIRV::OpenCLExtInst::step:
1172 case SPIRV::OpenCLExtInst::smoothstep:
1173 return true;
1174 default:
1175 break;
1176 }
1177 return false;
1178}
1179
1180//===----------------------------------------------------------------------===//
1181// Implementation functions for each builtin group
1182//===----------------------------------------------------------------------===//
1183
1186 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1187
1188 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1189 unsigned ResultElementCount =
1190 GR->getScalarOrVectorComponentCount(ReturnTypeId);
1191 bool MayNeedPromotionToVec =
1192 builtinMayNeedPromotionToVec(BuiltinNumber) && ResultElementCount > 1;
1193
1194 if (!MayNeedPromotionToVec)
1195 return {Call->Arguments.begin(), Call->Arguments.end()};
1196
1198 for (Register Argument : Call->Arguments) {
1199 Register VecArg = Argument;
1200 SPIRVType *ArgumentType = GR->getSPIRVTypeForVReg(Argument);
1201 if (ArgumentType != Call->ReturnType) {
1202 VecArg = createVirtualRegister(Call->ReturnType, GR, MIRBuilder);
1203 auto VecSplat = MIRBuilder.buildInstr(SPIRV::OpCompositeConstruct)
1204 .addDef(VecArg)
1205 .addUse(ReturnTypeId);
1206 for (unsigned I = 0; I != ResultElementCount; ++I)
1207 VecSplat.addUse(Argument);
1208 }
1209 Arguments.push_back(VecArg);
1210 }
1211 return Arguments;
1212}
1213
1215 MachineIRBuilder &MIRBuilder,
1216 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1217 // Lookup the extended instruction number in the TableGen records.
1218 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1220 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1221 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1222 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1223 // later the NoNans and NoInfs flags.
1224 uint32_t OrigNumber = Number;
1225 const SPIRVSubtarget &ST =
1226 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
1227 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1228 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1229 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1230 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1231 ? SPIRV::OpenCLExtInst::fmin
1232 : SPIRV::OpenCLExtInst::fmax;
1233 }
1234
1235 Register ReturnTypeId = GR->getSPIRVTypeID(Call->ReturnType);
1237 getBuiltinCallArguments(Call, Number, MIRBuilder, GR);
1238
1239 // Build extended instruction.
1240 auto MIB =
1241 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1242 .addDef(Call->ReturnRegister)
1243 .addUse(ReturnTypeId)
1244 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1245 .addImm(Number);
1246
1248 MIB.addUse(Argument);
1249
1250 MIB.getInstr()->copyIRFlags(CB);
1251 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1252 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1253 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1254 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans);
1255 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs);
1256 }
1257 return true;
1258}
1259
1261 MachineIRBuilder &MIRBuilder,
1262 SPIRVGlobalRegistry *GR) {
1263 // Lookup the instruction opcode in the TableGen records.
1264 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1265 unsigned Opcode =
1266 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1267
1268 Register CompareRegister;
1269 SPIRVType *RelationType;
1270 std::tie(CompareRegister, RelationType) =
1271 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1272
1273 // Build relational instruction.
1274 auto MIB = MIRBuilder.buildInstr(Opcode)
1275 .addDef(CompareRegister)
1276 .addUse(GR->getSPIRVTypeID(RelationType));
1277
1278 for (auto Argument : Call->Arguments)
1279 MIB.addUse(Argument);
1280
1281 // Build select instruction.
1282 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1283 Call->ReturnType, GR);
1284}
1285
1287 MachineIRBuilder &MIRBuilder,
1288 SPIRVGlobalRegistry *GR) {
1289 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1290 const SPIRV::GroupBuiltin *GroupBuiltin =
1291 SPIRV::lookupGroupBuiltin(Builtin->Name);
1292
1293 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1294 if (Call->isSpirvOp()) {
1295 if (GroupBuiltin->NoGroupOperation) {
1297 if (GroupBuiltin->Opcode ==
1298 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1299 Call->Arguments.size() > 4)
1300 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1301 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1302 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1303 }
1304
1305 // Group Operation is a literal
1306 Register GroupOpReg = Call->Arguments[1];
1307 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1308 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1310 "Group Operation parameter must be an integer constant");
1311 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1312 Register ScopeReg = Call->Arguments[0];
1313 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1314 .addDef(Call->ReturnRegister)
1315 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1316 .addUse(ScopeReg)
1317 .addImm(GrpOp);
1318 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1319 MIB.addUse(Call->Arguments[i]);
1320 return true;
1321 }
1322
1323 Register Arg0;
1324 if (GroupBuiltin->HasBoolArg) {
1325 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1326 Register BoolReg = Call->Arguments[0];
1327 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1328 if (!BoolRegType)
1329 report_fatal_error("Can't find a register's type definition");
1330 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1331 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1332 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1333 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1334 BoolType, true);
1335 } else {
1336 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1337 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1338 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1339 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1340 MIRBuilder.buildICmp(
1341 CmpInst::ICMP_NE, Arg0, BoolReg,
1342 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1343 updateRegType(Arg0, nullptr, BoolType, GR, MIRBuilder,
1344 MIRBuilder.getMF().getRegInfo());
1345 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1346 report_fatal_error("Expect a boolean argument");
1347 }
1348 // if BoolReg is a boolean register, we don't need to do anything
1349 }
1350 }
1351
1352 Register GroupResultRegister = Call->ReturnRegister;
1353 SPIRVType *GroupResultType = Call->ReturnType;
1354
1355 // TODO: maybe we need to check whether the result type is already boolean
1356 // and in this case do not insert select instruction.
1357 const bool HasBoolReturnTy =
1358 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1359 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1360 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1361
1362 if (HasBoolReturnTy)
1363 std::tie(GroupResultRegister, GroupResultType) =
1364 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1365
1366 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1367 : SPIRV::Scope::Workgroup;
1368 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1369
1370 Register VecReg;
1371 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1372 Call->Arguments.size() > 2) {
1373 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1374 // scalar, a vector with 2 components, or a vector with 3 components.",
1375 // meaning that we must create a vector from the function arguments if
1376 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1377 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1378 Register ElemReg = Call->Arguments[1];
1379 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1380 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1381 report_fatal_error("Expect an integer <LocalId> argument");
1382 unsigned VecLen = Call->Arguments.size() - 1;
1383 VecReg = MRI->createGenericVirtualRegister(
1384 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1385 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1386 SPIRVType *VecType =
1387 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1388 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1389 auto MIB =
1390 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1391 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1392 MIB.addUse(Call->Arguments[i]);
1393 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1394 }
1395 updateRegType(VecReg, nullptr, VecType, GR, MIRBuilder,
1396 MIRBuilder.getMF().getRegInfo());
1397 }
1398
1399 // Build work/sub group instruction.
1400 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1401 .addDef(GroupResultRegister)
1402 .addUse(GR->getSPIRVTypeID(GroupResultType))
1403 .addUse(ScopeRegister);
1404
1405 if (!GroupBuiltin->NoGroupOperation)
1406 MIB.addImm(GroupBuiltin->GroupOperation);
1407 if (Call->Arguments.size() > 0) {
1408 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1409 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1410 if (VecReg.isValid())
1411 MIB.addUse(VecReg);
1412 else
1413 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1414 MIB.addUse(Call->Arguments[i]);
1415 }
1416
1417 // Build select instruction.
1418 if (HasBoolReturnTy)
1419 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1420 Call->ReturnType, GR);
1421 return true;
1422}
1423
1425 MachineIRBuilder &MIRBuilder,
1426 SPIRVGlobalRegistry *GR) {
1427 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1428 MachineFunction &MF = MIRBuilder.getMF();
1429 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1430 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1431 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1432
1433 if (IntelSubgroups->IsMedia &&
1434 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1435 std::string DiagMsg = std::string(Builtin->Name) +
1436 ": the builtin requires the following SPIR-V "
1437 "extension: SPV_INTEL_media_block_io";
1438 report_fatal_error(DiagMsg.c_str(), false);
1439 } else if (!IntelSubgroups->IsMedia &&
1440 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1441 std::string DiagMsg = std::string(Builtin->Name) +
1442 ": the builtin requires the following SPIR-V "
1443 "extension: SPV_INTEL_subgroups";
1444 report_fatal_error(DiagMsg.c_str(), false);
1445 }
1446
1447 uint32_t OpCode = IntelSubgroups->Opcode;
1448 if (Call->isSpirvOp()) {
1449 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1450 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1451 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1452 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1453 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1454 : Register(0));
1455 }
1456
1457 if (IntelSubgroups->IsBlock) {
1458 // Minimal number or arguments set in TableGen records is 1
1459 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1460 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1461 // TODO: add required validation from the specification:
1462 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1463 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1464 // dimensions require a capability."
1465 switch (OpCode) {
1466 case SPIRV::OpSubgroupBlockReadINTEL:
1467 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1468 break;
1469 case SPIRV::OpSubgroupBlockWriteINTEL:
1470 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1471 break;
1472 }
1473 }
1474 }
1475 }
1476
1477 // TODO: opaque pointers types should be eventually resolved in such a way
1478 // that validation of block read is enabled with respect to the following
1479 // specification requirement:
1480 // "'Result Type' may be a scalar or vector type, and its component type must
1481 // be equal to the type pointed to by 'Ptr'."
1482 // For example, function parameter type should not be default i8 pointer, but
1483 // depend on the result type of the instruction where it is used as a pointer
1484 // argument of OpSubgroupBlockReadINTEL
1485
1486 // Build Intel subgroups instruction
1488 IntelSubgroups->IsWrite
1489 ? MIRBuilder.buildInstr(OpCode)
1490 : MIRBuilder.buildInstr(OpCode)
1491 .addDef(Call->ReturnRegister)
1492 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1493 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1494 MIB.addUse(Call->Arguments[i]);
1495 return true;
1496}
1497
1499 MachineIRBuilder &MIRBuilder,
1500 SPIRVGlobalRegistry *GR) {
1501 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1502 MachineFunction &MF = MIRBuilder.getMF();
1503 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1504 if (!ST->canUseExtension(
1505 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1506 std::string DiagMsg = std::string(Builtin->Name) +
1507 ": the builtin requires the following SPIR-V "
1508 "extension: SPV_KHR_uniform_group_instructions";
1509 report_fatal_error(DiagMsg.c_str(), false);
1510 }
1511 const SPIRV::GroupUniformBuiltin *GroupUniform =
1512 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1513 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1514
1515 Register GroupResultReg = Call->ReturnRegister;
1516 Register ScopeReg = Call->Arguments[0];
1517 Register ValueReg = Call->Arguments[2];
1518
1519 // Group Operation
1520 Register ConstGroupOpReg = Call->Arguments[1];
1521 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1522 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1524 "expect a constant group operation for a uniform group instruction",
1525 false);
1526 const MachineOperand &ConstOperand = Const->getOperand(1);
1527 if (!ConstOperand.isCImm())
1528 report_fatal_error("uniform group instructions: group operation must be an "
1529 "integer constant",
1530 false);
1531
1532 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1533 .addDef(GroupResultReg)
1534 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1535 .addUse(ScopeReg);
1536 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1537 MIB.addUse(ValueReg);
1538
1539 return true;
1540}
1541
1543 MachineIRBuilder &MIRBuilder,
1544 SPIRVGlobalRegistry *GR) {
1545 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1546 MachineFunction &MF = MIRBuilder.getMF();
1547 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1548 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1549 std::string DiagMsg = std::string(Builtin->Name) +
1550 ": the builtin requires the following SPIR-V "
1551 "extension: SPV_KHR_shader_clock";
1552 report_fatal_error(DiagMsg.c_str(), false);
1553 }
1554
1555 Register ResultReg = Call->ReturnRegister;
1556
1557 if (Builtin->Name == "__spirv_ReadClockKHR") {
1558 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1559 .addDef(ResultReg)
1560 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1561 .addUse(Call->Arguments[0]);
1562 } else {
1563 // Deduce the `Scope` operand from the builtin function name.
1564 SPIRV::Scope::Scope ScopeArg =
1566 .EndsWith("device", SPIRV::Scope::Scope::Device)
1567 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1568 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1569 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1570
1571 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1572 .addDef(ResultReg)
1573 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1574 .addUse(ScopeReg);
1575 }
1576
1577 return true;
1578}
1579
1580// These queries ask for a single size_t result for a given dimension index,
1581// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1582// corresponding to these values are all vec3 types, so we need to extract the
1583// correct index or return DefaultValue (0 or 1 depending on the query). We also
1584// handle extending or truncating in case size_t does not match the expected
1585// result type's bitwidth.
1586//
1587// For a constant index >= 3 we generate:
1588// %res = OpConstant %SizeT DefaultValue
1589//
1590// For other indices we generate:
1591// %g = OpVariable %ptr_V3_SizeT Input
1592// OpDecorate %g BuiltIn XXX
1593// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1594// OpDecorate %g Constant
1595// %loadedVec = OpLoad %V3_SizeT %g
1596//
1597// Then, if the index is constant < 3, we generate:
1598// %res = OpCompositeExtract %SizeT %loadedVec idx
1599// If the index is dynamic, we generate:
1600// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1601// %cmp = OpULessThan %bool %idx %const_3
1602// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1603//
1604// If the bitwidth of %res does not match the expected return type, we add an
1605// extend or truncate.
1607 MachineIRBuilder &MIRBuilder,
1609 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1610 uint64_t DefaultValue) {
1611 Register IndexRegister = Call->Arguments[0];
1612 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1613 const unsigned PointerSize = GR->getPointerSize();
1614 const SPIRVType *PointerSizeType =
1615 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1616 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1617 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1618
1619 // Set up the final register to do truncation or extension on at the end.
1620 Register ToTruncate = Call->ReturnRegister;
1621
1622 // If the index is constant, we can statically determine if it is in range.
1623 bool IsConstantIndex =
1624 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1625
1626 // If it's out of range (max dimension is 3), we can just return the constant
1627 // default value (0 or 1 depending on which query function).
1628 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1629 Register DefaultReg = Call->ReturnRegister;
1630 if (PointerSize != ResultWidth) {
1631 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1632 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1633 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1634 MIRBuilder.getMF());
1635 ToTruncate = DefaultReg;
1636 }
1637 auto NewRegister =
1638 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1639 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1640 } else { // If it could be in range, we need to load from the given builtin.
1641 auto Vec3Ty =
1642 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1643 Register LoadedVector =
1644 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1645 LLT::fixed_vector(3, PointerSize));
1646 // Set up the vreg to extract the result to (possibly a new temporary one).
1647 Register Extracted = Call->ReturnRegister;
1648 if (!IsConstantIndex || PointerSize != ResultWidth) {
1649 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1650 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1651 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1652 }
1653 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1654 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1655 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1656 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1657 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1658
1659 // If the index is dynamic, need check if it's < 3, and then use a select.
1660 if (!IsConstantIndex) {
1661 updateRegType(Extracted, nullptr, PointerSizeType, GR, MIRBuilder, *MRI);
1662
1663 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1664 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1665
1666 Register CompareRegister =
1667 MRI->createGenericVirtualRegister(LLT::scalar(1));
1668 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1669 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1670
1671 // Use G_ICMP to check if idxVReg < 3.
1672 MIRBuilder.buildICmp(
1673 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1674 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1675
1676 // Get constant for the default value (0 or 1 depending on which
1677 // function).
1678 Register DefaultRegister =
1679 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1680
1681 // Get a register for the selection result (possibly a new temporary one).
1682 Register SelectionResult = Call->ReturnRegister;
1683 if (PointerSize != ResultWidth) {
1684 SelectionResult =
1685 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1686 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1687 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1688 MIRBuilder.getMF());
1689 }
1690 // Create the final G_SELECT to return the extracted value or the default.
1691 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1692 DefaultRegister);
1693 ToTruncate = SelectionResult;
1694 } else {
1695 ToTruncate = Extracted;
1696 }
1697 }
1698 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1699 if (PointerSize != ResultWidth)
1700 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1701 return true;
1702}
1703
1705 MachineIRBuilder &MIRBuilder,
1706 SPIRVGlobalRegistry *GR) {
1707 // Lookup the builtin variable record.
1708 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1709 SPIRV::BuiltIn::BuiltIn Value =
1710 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1711
1712 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1713 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1714
1715 // Build a load instruction for the builtin variable.
1716 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1717 LLT LLType;
1718 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1719 LLType =
1720 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1721 else
1722 LLType = LLT::scalar(BitWidth);
1723
1724 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1725 LLType, Call->ReturnRegister);
1726}
1727
1729 MachineIRBuilder &MIRBuilder,
1730 SPIRVGlobalRegistry *GR) {
1731 // Lookup the instruction opcode in the TableGen records.
1732 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1733 unsigned Opcode =
1734 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1735
1736 switch (Opcode) {
1737 case SPIRV::OpStore:
1738 return buildAtomicInitInst(Call, MIRBuilder);
1739 case SPIRV::OpAtomicLoad:
1740 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1741 case SPIRV::OpAtomicStore:
1742 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1743 case SPIRV::OpAtomicCompareExchange:
1744 case SPIRV::OpAtomicCompareExchangeWeak:
1745 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1746 GR);
1747 case SPIRV::OpAtomicIAdd:
1748 case SPIRV::OpAtomicISub:
1749 case SPIRV::OpAtomicOr:
1750 case SPIRV::OpAtomicXor:
1751 case SPIRV::OpAtomicAnd:
1752 case SPIRV::OpAtomicExchange:
1753 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1754 case SPIRV::OpMemoryBarrier:
1755 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1756 case SPIRV::OpAtomicFlagTestAndSet:
1757 case SPIRV::OpAtomicFlagClear:
1758 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1759 default:
1760 if (Call->isSpirvOp())
1761 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1762 GR->getSPIRVTypeID(Call->ReturnType));
1763 return false;
1764 }
1765}
1766
1768 MachineIRBuilder &MIRBuilder,
1769 SPIRVGlobalRegistry *GR) {
1770 // Lookup the instruction opcode in the TableGen records.
1771 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1772 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1773
1774 switch (Opcode) {
1775 case SPIRV::OpAtomicFAddEXT:
1776 case SPIRV::OpAtomicFMinEXT:
1777 case SPIRV::OpAtomicFMaxEXT:
1778 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1779 default:
1780 return false;
1781 }
1782}
1783
1785 MachineIRBuilder &MIRBuilder,
1786 SPIRVGlobalRegistry *GR) {
1787 // Lookup the instruction opcode in the TableGen records.
1788 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1789 unsigned Opcode =
1790 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1791
1792 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1793}
1794
1796 MachineIRBuilder &MIRBuilder,
1797 SPIRVGlobalRegistry *GR) {
1798 // Lookup the instruction opcode in the TableGen records.
1799 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1800 unsigned Opcode =
1801 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1802
1803 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1804 SPIRV::StorageClass::StorageClass ResSC =
1805 GR->getPointerStorageClass(Call->ReturnRegister);
1806 if (!isGenericCastablePtr(ResSC))
1807 return false;
1808
1809 MIRBuilder.buildInstr(Opcode)
1810 .addDef(Call->ReturnRegister)
1811 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1812 .addUse(Call->Arguments[0])
1813 .addImm(ResSC);
1814 } else {
1815 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1816 .addDef(Call->ReturnRegister)
1817 .addUse(Call->Arguments[0]);
1818 }
1819 return true;
1820}
1821
1822static bool generateDotOrFMulInst(const StringRef DemangledCall,
1824 MachineIRBuilder &MIRBuilder,
1825 SPIRVGlobalRegistry *GR) {
1826 if (Call->isSpirvOp())
1827 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1828 GR->getSPIRVTypeID(Call->ReturnType));
1829
1830 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1831 SPIRV::OpTypeVector;
1832 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1833 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1834 bool IsSwapReq = false;
1835
1836 const auto *ST =
1837 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1838 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1839 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1840 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1841 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1842 const SPIRV::IntegerDotProductBuiltin *IntDot =
1843 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1844 if (IntDot) {
1845 OC = IntDot->Opcode;
1846 IsSwapReq = IntDot->IsSwapReq;
1847 } else if (IsVec) {
1848 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1849 // integers.
1850 LLVMContext &Ctx = MIRBuilder.getContext();
1852 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1853 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1854 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1855
1856 if (Call->BuiltinName == "dot") {
1857 if (IsFirstSigned && IsSecondSigned)
1858 OC = SPIRV::OpSDot;
1859 else if (!IsFirstSigned && !IsSecondSigned)
1860 OC = SPIRV::OpUDot;
1861 else {
1862 OC = SPIRV::OpSUDot;
1863 if (!IsFirstSigned)
1864 IsSwapReq = true;
1865 }
1866 } else if (Call->BuiltinName == "dot_acc_sat") {
1867 if (IsFirstSigned && IsSecondSigned)
1868 OC = SPIRV::OpSDotAccSat;
1869 else if (!IsFirstSigned && !IsSecondSigned)
1870 OC = SPIRV::OpUDotAccSat;
1871 else {
1872 OC = SPIRV::OpSUDotAccSat;
1873 if (!IsFirstSigned)
1874 IsSwapReq = true;
1875 }
1876 }
1877 }
1878 }
1879
1880 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1881 .addDef(Call->ReturnRegister)
1882 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1883
1884 if (IsSwapReq) {
1885 MIB.addUse(Call->Arguments[1]);
1886 MIB.addUse(Call->Arguments[0]);
1887 // needed for dot_acc_sat* builtins
1888 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1889 MIB.addUse(Call->Arguments[i]);
1890 } else {
1891 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1892 MIB.addUse(Call->Arguments[i]);
1893 }
1894
1895 // Add Packed Vector Format for Integer dot product builtins if arguments are
1896 // scalar
1897 if (!IsVec && OC != SPIRV::OpFMulS)
1898 MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
1899
1900 return true;
1901}
1902
1904 MachineIRBuilder &MIRBuilder,
1905 SPIRVGlobalRegistry *GR) {
1906 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1907 SPIRV::BuiltIn::BuiltIn Value =
1908 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1909
1910 // For now, we only support a single Wave intrinsic with a single return type.
1911 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1912 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1913
1915 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1916 /* isConst= */ false, /* LinkageType= */ std::nullopt);
1917}
1918
1919// We expect a builtin
1920// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1921// where %result is a pointer to where the result of the builtin execution
1922// is to be stored, and generate the following instructions:
1923// Res = Opcode RetType Operand1 Operand1
1924// OpStore RetVariable Res
1926 MachineIRBuilder &MIRBuilder,
1927 SPIRVGlobalRegistry *GR) {
1928 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1929 unsigned Opcode =
1930 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1931
1932 Register SRetReg = Call->Arguments[0];
1933 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1934 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1935 if (!RetType)
1936 report_fatal_error("The first parameter must be a pointer");
1937 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1938 report_fatal_error("Expected struct type result for the arithmetic with "
1939 "overflow builtins");
1940
1941 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1942 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1943 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1944 report_fatal_error("Operands must have the same type");
1945 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1946 switch (Opcode) {
1947 case SPIRV::OpIAddCarryS:
1948 Opcode = SPIRV::OpIAddCarryV;
1949 break;
1950 case SPIRV::OpISubBorrowS:
1951 Opcode = SPIRV::OpISubBorrowV;
1952 break;
1953 }
1954
1955 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1956 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1957 if (const TargetRegisterClass *DstRC =
1958 MRI->getRegClassOrNull(Call->Arguments[1])) {
1959 MRI->setRegClass(ResReg, DstRC);
1960 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1961 } else {
1962 MRI->setType(ResReg, LLT::scalar(64));
1963 }
1964 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1965 MIRBuilder.buildInstr(Opcode)
1966 .addDef(ResReg)
1967 .addUse(GR->getSPIRVTypeID(RetType))
1968 .addUse(Call->Arguments[1])
1969 .addUse(Call->Arguments[2]);
1970 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1971 return true;
1972}
1973
1975 MachineIRBuilder &MIRBuilder,
1976 SPIRVGlobalRegistry *GR) {
1977 // Lookup the builtin record.
1978 SPIRV::BuiltIn::BuiltIn Value =
1979 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1980 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
1981 Value == SPIRV::BuiltIn::NumWorkgroups ||
1982 Value == SPIRV::BuiltIn::WorkgroupSize ||
1983 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1984 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
1985}
1986
1988 MachineIRBuilder &MIRBuilder,
1989 SPIRVGlobalRegistry *GR) {
1990 // Lookup the image size query component number in the TableGen records.
1991 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1992 uint32_t Component =
1993 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1994 // Query result may either be a vector or a scalar. If return type is not a
1995 // vector, expect only a single size component. Otherwise get the number of
1996 // expected components.
1997 unsigned NumExpectedRetComponents =
1998 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
1999 ? Call->ReturnType->getOperand(2).getImm()
2000 : 1;
2001 // Get the actual number of query result/size components.
2002 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2003 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
2004 Register QueryResult = Call->ReturnRegister;
2005 SPIRVType *QueryResultType = Call->ReturnType;
2006 if (NumExpectedRetComponents != NumActualRetComponents) {
2007 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
2008 ? Call->ReturnType->getOperand(1).getImm()
2009 : 32;
2010 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
2011 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
2012 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
2013 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
2014 QueryResultType = GR->getOrCreateSPIRVVectorType(
2015 IntTy, NumActualRetComponents, MIRBuilder, true);
2016 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
2017 }
2018 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
2019 unsigned Opcode =
2020 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
2021 auto MIB = MIRBuilder.buildInstr(Opcode)
2022 .addDef(QueryResult)
2023 .addUse(GR->getSPIRVTypeID(QueryResultType))
2024 .addUse(Call->Arguments[0]);
2025 if (!IsDimBuf)
2026 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
2027 if (NumExpectedRetComponents == NumActualRetComponents)
2028 return true;
2029 if (NumExpectedRetComponents == 1) {
2030 // Only 1 component is expected, build OpCompositeExtract instruction.
2031 unsigned ExtractedComposite =
2032 Component == 3 ? NumActualRetComponents - 1 : Component;
2033 assert(ExtractedComposite < NumActualRetComponents &&
2034 "Invalid composite index!");
2035 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2036 SPIRVType *NewType = nullptr;
2037 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
2038 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
2039 if (TypeReg != NewTypeReg &&
2040 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
2041 TypeReg = NewTypeReg;
2042 }
2043 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2044 .addDef(Call->ReturnRegister)
2045 .addUse(TypeReg)
2046 .addUse(QueryResult)
2047 .addImm(ExtractedComposite);
2048 if (NewType != nullptr)
2049 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2050 MIRBuilder.getMF().getRegInfo());
2051 } else {
2052 // More than 1 component is expected, fill a new vector.
2053 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
2054 .addDef(Call->ReturnRegister)
2055 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2056 .addUse(QueryResult)
2057 .addUse(QueryResult);
2058 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
2059 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
2060 }
2061 return true;
2062}
2063
2065 MachineIRBuilder &MIRBuilder,
2066 SPIRVGlobalRegistry *GR) {
2067 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2068 "Image samples query result must be of int type!");
2069
2070 // Lookup the instruction opcode in the TableGen records.
2071 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2072 unsigned Opcode =
2073 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2074
2075 Register Image = Call->Arguments[0];
2076 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2077 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
2078 (void)ImageDimensionality;
2079
2080 switch (Opcode) {
2081 case SPIRV::OpImageQuerySamples:
2082 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2083 "Image must be of 2D dimensionality");
2084 break;
2085 case SPIRV::OpImageQueryLevels:
2086 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2087 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2088 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2089 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2090 "Image must be of 1D/2D/3D/Cube dimensionality");
2091 break;
2092 }
2093
2094 MIRBuilder.buildInstr(Opcode)
2095 .addDef(Call->ReturnRegister)
2096 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2097 .addUse(Image);
2098 return true;
2099}
2100
2101// TODO: Move to TableGen.
2102static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2104 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2105 case SPIRV::CLK_ADDRESS_CLAMP:
2106 return SPIRV::SamplerAddressingMode::Clamp;
2107 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2108 return SPIRV::SamplerAddressingMode::ClampToEdge;
2109 case SPIRV::CLK_ADDRESS_REPEAT:
2110 return SPIRV::SamplerAddressingMode::Repeat;
2111 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2112 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2113 case SPIRV::CLK_ADDRESS_NONE:
2114 return SPIRV::SamplerAddressingMode::None;
2115 default:
2116 report_fatal_error("Unknown CL address mode");
2117 }
2118}
2119
2120static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2121 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2122}
2123
2124static SPIRV::SamplerFilterMode::SamplerFilterMode
2126 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2127 return SPIRV::SamplerFilterMode::Linear;
2128 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2129 return SPIRV::SamplerFilterMode::Nearest;
2130 return SPIRV::SamplerFilterMode::Nearest;
2131}
2132
2133static bool generateReadImageInst(const StringRef DemangledCall,
2135 MachineIRBuilder &MIRBuilder,
2136 SPIRVGlobalRegistry *GR) {
2137 if (Call->isSpirvOp())
2138 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2139 GR->getSPIRVTypeID(Call->ReturnType));
2140 Register Image = Call->Arguments[0];
2141 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2142 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2143 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2144 if (HasOclSampler) {
2145 Register Sampler = Call->Arguments[1];
2146
2147 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2148 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2149 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2150 Sampler = GR->buildConstantSampler(
2152 getSamplerParamFromBitmask(SamplerMask),
2153 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2154 }
2155 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2156 SPIRVType *SampledImageType =
2157 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2158 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2159
2160 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2161 .addDef(SampledImage)
2162 .addUse(GR->getSPIRVTypeID(SampledImageType))
2163 .addUse(Image)
2164 .addUse(Sampler);
2165
2167 MIRBuilder);
2168
2169 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2170 SPIRVType *TempType =
2171 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2172 Register TempRegister =
2173 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2174 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2175 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2176 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2177 .addDef(TempRegister)
2178 .addUse(GR->getSPIRVTypeID(TempType))
2179 .addUse(SampledImage)
2180 .addUse(Call->Arguments[2]) // Coordinate.
2181 .addImm(SPIRV::ImageOperand::Lod)
2182 .addUse(Lod);
2183 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2184 .addDef(Call->ReturnRegister)
2185 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2186 .addUse(TempRegister)
2187 .addImm(0);
2188 } else {
2189 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2190 .addDef(Call->ReturnRegister)
2191 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2192 .addUse(SampledImage)
2193 .addUse(Call->Arguments[2]) // Coordinate.
2194 .addImm(SPIRV::ImageOperand::Lod)
2195 .addUse(Lod);
2196 }
2197 } else if (HasMsaa) {
2198 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2199 .addDef(Call->ReturnRegister)
2200 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2201 .addUse(Image)
2202 .addUse(Call->Arguments[1]) // Coordinate.
2203 .addImm(SPIRV::ImageOperand::Sample)
2204 .addUse(Call->Arguments[2]);
2205 } else {
2206 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2207 .addDef(Call->ReturnRegister)
2208 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2209 .addUse(Image)
2210 .addUse(Call->Arguments[1]); // Coordinate.
2211 }
2212 return true;
2213}
2214
2216 MachineIRBuilder &MIRBuilder,
2217 SPIRVGlobalRegistry *GR) {
2218 if (Call->isSpirvOp())
2219 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2220 Register(0));
2221 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2222 .addUse(Call->Arguments[0]) // Image.
2223 .addUse(Call->Arguments[1]) // Coordinate.
2224 .addUse(Call->Arguments[2]); // Texel.
2225 return true;
2226}
2227
2228static bool generateSampleImageInst(const StringRef DemangledCall,
2230 MachineIRBuilder &MIRBuilder,
2231 SPIRVGlobalRegistry *GR) {
2232 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2233 if (Call->Builtin->Name.contains_insensitive(
2234 "__translate_sampler_initializer")) {
2235 // Build sampler literal.
2236 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2237 Register Sampler = GR->buildConstantSampler(
2238 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2240 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2241 return Sampler.isValid();
2242 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2243 // Create OpSampledImage.
2244 Register Image = Call->Arguments[0];
2245 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2246 SPIRVType *SampledImageType =
2247 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2248 Register SampledImage =
2249 Call->ReturnRegister.isValid()
2250 ? Call->ReturnRegister
2251 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2252 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2253 .addDef(SampledImage)
2254 .addUse(GR->getSPIRVTypeID(SampledImageType))
2255 .addUse(Image)
2256 .addUse(Call->Arguments[1]); // Sampler.
2257 return true;
2258 } else if (Call->Builtin->Name.contains_insensitive(
2259 "__spirv_ImageSampleExplicitLod")) {
2260 // Sample an image using an explicit level of detail.
2261 std::string ReturnType = DemangledCall.str();
2262 if (DemangledCall.contains("_R")) {
2263 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2264 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2265 }
2266 SPIRVType *Type =
2267 Call->ReturnType
2268 ? Call->ReturnType
2269 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder, true);
2270 if (!Type) {
2271 std::string DiagMsg =
2272 "Unable to recognize SPIRV type name: " + ReturnType;
2273 report_fatal_error(DiagMsg.c_str());
2274 }
2275 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2276 .addDef(Call->ReturnRegister)
2278 .addUse(Call->Arguments[0]) // Image.
2279 .addUse(Call->Arguments[1]) // Coordinate.
2280 .addImm(SPIRV::ImageOperand::Lod)
2281 .addUse(Call->Arguments[3]);
2282 return true;
2283 }
2284 return false;
2285}
2286
2288 MachineIRBuilder &MIRBuilder) {
2289 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2290 Call->Arguments[1], Call->Arguments[2]);
2291 return true;
2292}
2293
2295 MachineIRBuilder &MIRBuilder,
2296 SPIRVGlobalRegistry *GR) {
2297 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2298 SPIRV::OpCompositeConstructContinuedINTEL,
2299 Call->Arguments, Call->ReturnRegister,
2300 GR->getSPIRVTypeID(Call->ReturnType));
2301 return true;
2302}
2303
2305 MachineIRBuilder &MIRBuilder,
2306 SPIRVGlobalRegistry *GR) {
2307 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2308 unsigned Opcode =
2309 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2310 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2311 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2312 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2313 unsigned ArgSz = Call->Arguments.size();
2314 unsigned LiteralIdx = 0;
2315 switch (Opcode) {
2316 // Memory operand is optional and is literal.
2317 case SPIRV::OpCooperativeMatrixLoadKHR:
2318 LiteralIdx = ArgSz > 3 ? 3 : 0;
2319 break;
2320 case SPIRV::OpCooperativeMatrixStoreKHR:
2321 LiteralIdx = ArgSz > 4 ? 4 : 0;
2322 break;
2323 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2324 LiteralIdx = ArgSz > 7 ? 7 : 0;
2325 break;
2326 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2327 LiteralIdx = ArgSz > 8 ? 8 : 0;
2328 break;
2329 // Cooperative Matrix Operands operand is optional and is literal.
2330 case SPIRV::OpCooperativeMatrixMulAddKHR:
2331 LiteralIdx = ArgSz > 3 ? 3 : 0;
2332 break;
2333 };
2334
2336 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2337 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2338 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2339 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2340 .addUse(Call->Arguments[0]) // pointer
2341 .addUse(Call->Arguments[1]) // rows
2342 .addUse(Call->Arguments[2]) // columns
2343 .addImm(CacheLevel) // cache level
2344 .addUse(Call->Arguments[4]); // memory layout
2345 if (ArgSz > 5)
2346 MIB.addUse(Call->Arguments[5]); // stride
2347 if (ArgSz > 6) {
2348 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2349 MIB.addImm(MemOp); // memory operand
2350 }
2351 return true;
2352 }
2353 if (LiteralIdx > 0)
2354 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2355 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2356 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2357 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2358 if (!CoopMatrType)
2359 report_fatal_error("Can't find a register's type definition");
2360 MIRBuilder.buildInstr(Opcode)
2361 .addDef(Call->ReturnRegister)
2362 .addUse(TypeReg)
2363 .addUse(CoopMatrType->getOperand(0).getReg());
2364 return true;
2365 }
2366 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2367 IsSet ? TypeReg : Register(0), ImmArgs);
2368}
2369
2371 MachineIRBuilder &MIRBuilder,
2372 SPIRVGlobalRegistry *GR) {
2373 // Lookup the instruction opcode in the TableGen records.
2374 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2375 unsigned Opcode =
2376 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2377 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2378
2379 switch (Opcode) {
2380 case SPIRV::OpSpecConstant: {
2381 // Build the SpecID decoration.
2382 unsigned SpecId =
2383 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2384 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2385 {SpecId});
2386 // Determine the constant MI.
2387 Register ConstRegister = Call->Arguments[1];
2388 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2389 assert(Const &&
2390 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2391 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2392 "Argument should be either an int or floating-point constant");
2393 // Determine the opcode and built the OpSpec MI.
2394 const MachineOperand &ConstOperand = Const->getOperand(1);
2395 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2396 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2397 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2398 ? SPIRV::OpSpecConstantTrue
2399 : SPIRV::OpSpecConstantFalse;
2400 }
2401 auto MIB = MIRBuilder.buildInstr(Opcode)
2402 .addDef(Call->ReturnRegister)
2403 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2404
2405 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2406 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2407 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2408 else
2409 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2410 }
2411 return true;
2412 }
2413 case SPIRV::OpSpecConstantComposite: {
2414 createContinuedInstructions(MIRBuilder, Opcode, 3,
2415 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2416 Call->Arguments, Call->ReturnRegister,
2417 GR->getSPIRVTypeID(Call->ReturnType));
2418 return true;
2419 }
2420 default:
2421 return false;
2422 }
2423}
2424
2426 MachineIRBuilder &MIRBuilder,
2427 SPIRVGlobalRegistry *GR) {
2428 // Lookup the instruction opcode in the TableGen records.
2429 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2430 unsigned Opcode =
2431 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2432
2433 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2434}
2435
2437 MachineIRBuilder &MIRBuilder,
2438 SPIRVGlobalRegistry *GR) {
2439 // Lookup the instruction opcode in the TableGen records.
2440 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2441 unsigned Opcode =
2442 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2443
2444 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2445}
2446
2448 MachineIRBuilder &MIRBuilder,
2449 SPIRVGlobalRegistry *GR) {
2450 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2451 unsigned Opcode =
2452 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2453 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
2454}
2455
2457 unsigned Opcode, MachineIRBuilder &MIRBuilder,
2458 SPIRVGlobalRegistry *GR) {
2459 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2461 Register InputReg = Call->Arguments[0];
2462 const Type *RetTy = GR->getTypeForSPIRVType(Call->ReturnType);
2463 bool IsSRet = RetTy->isVoidTy();
2464
2465 if (IsSRet) {
2466 const LLT ValTy = MRI->getType(InputReg);
2467 Register ActualRetValReg = MRI->createGenericVirtualRegister(ValTy);
2468 SPIRVType *InstructionType =
2469 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2470 InputReg = Call->Arguments[1];
2471 auto InputType = GR->getTypeForSPIRVType(GR->getSPIRVTypeForVReg(InputReg));
2472 Register PtrInputReg;
2473 if (InputType->getTypeID() == llvm::Type::TypeID::TypedPointerTyID) {
2474 LLT InputLLT = MRI->getType(InputReg);
2475 PtrInputReg = MRI->createGenericVirtualRegister(InputLLT);
2476 SPIRVType *PtrType =
2477 GR->getPointeeType(GR->getSPIRVTypeForVReg(InputReg));
2478 MachineMemOperand *MMO1 = MIRBuilder.getMF().getMachineMemOperand(
2480 InputLLT.getSizeInBytes(), Align(4));
2481 MIRBuilder.buildLoad(PtrInputReg, InputReg, *MMO1);
2482 MRI->setRegClass(PtrInputReg, &SPIRV::iIDRegClass);
2483 GR->assignSPIRVTypeToVReg(PtrType, PtrInputReg, MIRBuilder.getMF());
2484 }
2485
2486 for (unsigned index = 2; index < 7; index++) {
2487 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2488 }
2489
2490 // Emit the instruction
2491 auto MIB = MIRBuilder.buildInstr(Opcode)
2492 .addDef(ActualRetValReg)
2493 .addUse(GR->getSPIRVTypeID(InstructionType));
2494 if (PtrInputReg)
2495 MIB.addUse(PtrInputReg);
2496 else
2497 MIB.addUse(InputReg);
2498
2499 for (uint32_t Imm : ImmArgs)
2500 MIB.addImm(Imm);
2501 unsigned Size = ValTy.getSizeInBytes();
2502 // Store result to the pointer passed in Arg[0]
2503 MachineMemOperand *MMO = MIRBuilder.getMF().getMachineMemOperand(
2505 MRI->setRegClass(ActualRetValReg, &SPIRV::pIDRegClass);
2506 MIRBuilder.buildStore(ActualRetValReg, Call->Arguments[0], *MMO);
2507 return true;
2508 } else {
2509 for (unsigned index = 1; index < 6; index++)
2510 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[index], MRI));
2511
2512 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2513 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
2514 }
2515}
2516
2518 MachineIRBuilder &MIRBuilder,
2519 SPIRVGlobalRegistry *GR) {
2520 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2521 unsigned Opcode =
2522 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2523
2524 return buildAPFixedPointInst(Call, Opcode, MIRBuilder, GR);
2525}
2526
2527static bool
2529 MachineIRBuilder &MIRBuilder,
2530 SPIRVGlobalRegistry *GR) {
2531 // Lookup the instruction opcode in the TableGen records.
2532 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2533 unsigned Opcode =
2534 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2535
2536 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2537}
2538
2540 MachineIRBuilder &MIRBuilder,
2541 SPIRVGlobalRegistry *GR) {
2542 // Lookup the instruction opcode in the TableGen records.
2543 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2544 unsigned Opcode =
2545 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2546
2547 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2548}
2549
2551 MachineIRBuilder &MIRBuilder,
2552 SPIRVGlobalRegistry *GR) {
2553 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2554 unsigned Opcode =
2555 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2556
2557 unsigned Scope = SPIRV::Scope::Workgroup;
2558 if (Builtin->Name.contains("sub_group"))
2559 Scope = SPIRV::Scope::Subgroup;
2560
2561 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2562}
2563
2565 MachineIRBuilder &MIRBuilder,
2566 SPIRVGlobalRegistry *GR) {
2567 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2568 unsigned Opcode =
2569 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2570
2571 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2572 unsigned ArgSz = Call->Arguments.size();
2574 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2575 // Memory operand is optional and is literal.
2576 if (ArgSz > 3)
2577 ImmArgs.push_back(
2578 getConstFromIntrinsic(Call->Arguments[/*Literal index*/ 3], MRI));
2579
2580 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2581 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2582 IsSet ? TypeReg : Register(0), ImmArgs);
2583}
2584
2586 MachineIRBuilder &MIRBuilder,
2587 SPIRVGlobalRegistry *GR) {
2588 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2589 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2590 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2591 PtrType->getOperand(2).isReg());
2592 Register TypeReg = PtrType->getOperand(2).getReg();
2594 MachineFunction &MF = MIRBuilder.getMF();
2595 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2596 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2597 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2598 // three other arguments, so pass zero constant on absence.
2599 unsigned NumArgs = Call->Arguments.size();
2600 assert(NumArgs >= 2);
2601 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2602 Register LocalWorkSize =
2603 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2604 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2605 if (NumArgs < 4) {
2606 Register Const;
2607 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2608 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2609 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2610 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2611 DefInstr->getOperand(3).isReg());
2612 Register GWSPtr = DefInstr->getOperand(3).getReg();
2613 // TODO: Maybe simplify generation of the type of the fields.
2614 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2615 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2617 Type *FieldTy = ArrayType::get(BaseTy, Size);
2618 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2619 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
2620 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2621 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2622 MIRBuilder.buildInstr(SPIRV::OpLoad)
2623 .addDef(GlobalWorkSize)
2624 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2625 .addUse(GWSPtr);
2626 const SPIRVSubtarget &ST =
2627 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2628 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2629 SpvFieldTy, *ST.getInstrInfo());
2630 } else {
2631 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy, true);
2632 }
2633 if (!LocalWorkSize.isValid())
2634 LocalWorkSize = Const;
2635 if (!GlobalWorkOffset.isValid())
2636 GlobalWorkOffset = Const;
2637 }
2638 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2639 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2640 .addDef(TmpReg)
2641 .addUse(TypeReg)
2642 .addUse(GlobalWorkSize)
2643 .addUse(LocalWorkSize)
2644 .addUse(GlobalWorkOffset);
2645 return MIRBuilder.buildInstr(SPIRV::OpStore)
2646 .addUse(Call->Arguments[0])
2647 .addUse(TmpReg);
2648}
2649
2650// TODO: maybe move to the global register.
2651static SPIRVType *
2653 SPIRVGlobalRegistry *GR) {
2654 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2655 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2656 Type *PtrType = PointerType::get(Context, SC1);
2657 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2658 SPIRV::AccessQualifier::ReadWrite, true);
2659}
2660
2662 MachineIRBuilder &MIRBuilder,
2663 SPIRVGlobalRegistry *GR) {
2664 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2665 const DataLayout &DL = MIRBuilder.getDataLayout();
2666 bool IsSpirvOp = Call->isSpirvOp();
2667 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2668 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2669
2670 // Make vararg instructions before OpEnqueueKernel.
2671 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2672 // local size operands as an array, so we need to unpack them.
2673 SmallVector<Register, 16> LocalSizes;
2674 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2675 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2676 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2677 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2678 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2679 GepMI->getOperand(3).isReg());
2680 Register ArrayReg = GepMI->getOperand(3).getReg();
2681 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2682 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2683 assert(LocalSizeTy && "Local size type is expected");
2684 const uint64_t LocalSizeNum =
2685 cast<ArrayType>(LocalSizeTy)->getNumElements();
2686 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2687 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2688 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2689 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2690 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2691 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2692 MRI->setType(Reg, LLType);
2693 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2694 auto GEPInst = MIRBuilder.buildIntrinsic(
2695 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2696 GEPInst
2697 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2698 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2699 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2700 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2701 LocalSizes.push_back(Reg);
2702 }
2703 }
2704
2705 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2706 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2707 .addDef(Call->ReturnRegister)
2709
2710 // Copy all arguments before block invoke function pointer.
2711 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2712 for (unsigned i = 0; i < BlockFIdx; i++)
2713 MIB.addUse(Call->Arguments[i]);
2714
2715 // If there are no event arguments in the original call, add dummy ones.
2716 if (!HasEvents) {
2717 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2718 Register NullPtr = GR->getOrCreateConstNullPtr(
2719 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2720 MIB.addUse(NullPtr); // Dummy wait events.
2721 MIB.addUse(NullPtr); // Dummy ret event.
2722 }
2723
2724 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2725 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2726 // Invoke: Pointer to invoke function.
2727 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2728
2729 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2730 // Param: Pointer to block literal.
2731 MIB.addUse(BlockLiteralReg);
2732
2733 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2734 // TODO: these numbers should be obtained from block literal structure.
2735 // Param Size: Size of block literal structure.
2736 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2737 // Param Aligment: Aligment of block literal structure.
2738 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2739 MIRBuilder, GR));
2740
2741 for (unsigned i = 0; i < LocalSizes.size(); i++)
2742 MIB.addUse(LocalSizes[i]);
2743 return true;
2744}
2745
2747 MachineIRBuilder &MIRBuilder,
2748 SPIRVGlobalRegistry *GR) {
2749 // Lookup the instruction opcode in the TableGen records.
2750 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2751 unsigned Opcode =
2752 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2753
2754 switch (Opcode) {
2755 case SPIRV::OpRetainEvent:
2756 case SPIRV::OpReleaseEvent:
2757 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2758 case SPIRV::OpCreateUserEvent:
2759 case SPIRV::OpGetDefaultQueue:
2760 return MIRBuilder.buildInstr(Opcode)
2761 .addDef(Call->ReturnRegister)
2762 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2763 case SPIRV::OpIsValidEvent:
2764 return MIRBuilder.buildInstr(Opcode)
2765 .addDef(Call->ReturnRegister)
2766 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2767 .addUse(Call->Arguments[0]);
2768 case SPIRV::OpSetUserEventStatus:
2769 return MIRBuilder.buildInstr(Opcode)
2770 .addUse(Call->Arguments[0])
2771 .addUse(Call->Arguments[1]);
2772 case SPIRV::OpCaptureEventProfilingInfo:
2773 return MIRBuilder.buildInstr(Opcode)
2774 .addUse(Call->Arguments[0])
2775 .addUse(Call->Arguments[1])
2776 .addUse(Call->Arguments[2]);
2777 case SPIRV::OpBuildNDRange:
2778 return buildNDRange(Call, MIRBuilder, GR);
2779 case SPIRV::OpEnqueueKernel:
2780 return buildEnqueueKernel(Call, MIRBuilder, GR);
2781 default:
2782 return false;
2783 }
2784}
2785
2787 MachineIRBuilder &MIRBuilder,
2788 SPIRVGlobalRegistry *GR) {
2789 // Lookup the instruction opcode in the TableGen records.
2790 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2791 unsigned Opcode =
2792 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2793
2794 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2795 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2796 if (Call->isSpirvOp())
2797 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2798 IsSet ? TypeReg : Register(0));
2799
2800 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2801
2802 switch (Opcode) {
2803 case SPIRV::OpGroupAsyncCopy: {
2804 SPIRVType *NewType =
2805 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2806 ? nullptr
2807 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2808 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2809 unsigned NumArgs = Call->Arguments.size();
2810 Register EventReg = Call->Arguments[NumArgs - 1];
2811 bool Res = MIRBuilder.buildInstr(Opcode)
2812 .addDef(Call->ReturnRegister)
2813 .addUse(TypeReg)
2814 .addUse(Scope)
2815 .addUse(Call->Arguments[0])
2816 .addUse(Call->Arguments[1])
2817 .addUse(Call->Arguments[2])
2818 .addUse(Call->Arguments.size() > 4
2819 ? Call->Arguments[3]
2820 : buildConstantIntReg32(1, MIRBuilder, GR))
2821 .addUse(EventReg);
2822 if (NewType != nullptr)
2823 updateRegType(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2824 MIRBuilder.getMF().getRegInfo());
2825 return Res;
2826 }
2827 case SPIRV::OpGroupWaitEvents:
2828 return MIRBuilder.buildInstr(Opcode)
2829 .addUse(Scope)
2830 .addUse(Call->Arguments[0])
2831 .addUse(Call->Arguments[1]);
2832 default:
2833 return false;
2834 }
2835}
2836
2837static bool generateConvertInst(const StringRef DemangledCall,
2839 MachineIRBuilder &MIRBuilder,
2840 SPIRVGlobalRegistry *GR) {
2841 // Lookup the conversion builtin in the TableGen records.
2842 const SPIRV::ConvertBuiltin *Builtin =
2843 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2844
2845 if (!Builtin && Call->isSpirvOp()) {
2846 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2847 unsigned Opcode =
2848 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2849 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2850 GR->getSPIRVTypeID(Call->ReturnType));
2851 }
2852
2853 assert(Builtin && "Conversion builtin not found.");
2854 if (Builtin->IsSaturated)
2855 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2856 SPIRV::Decoration::SaturatedConversion, {});
2857 if (Builtin->IsRounded)
2858 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2859 SPIRV::Decoration::FPRoundingMode,
2860 {(unsigned)Builtin->RoundingMode});
2861
2862 std::string NeedExtMsg; // no errors if empty
2863 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2864 unsigned Opcode = SPIRV::OpNop;
2865 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2866 // Int -> ...
2867 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2868 // Int -> Int
2869 if (Builtin->IsSaturated)
2870 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2871 : SPIRV::OpSatConvertSToU;
2872 else
2873 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2874 : SPIRV::OpSConvert;
2875 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2876 SPIRV::OpTypeFloat)) {
2877 // Int -> Float
2878 if (Builtin->IsBfloat16) {
2879 const auto *ST = static_cast<const SPIRVSubtarget *>(
2880 &MIRBuilder.getMF().getSubtarget());
2881 if (!ST->canUseExtension(
2882 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2883 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2884 IsRightComponentsNumber =
2885 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2886 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2887 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2888 } else {
2889 bool IsSourceSigned =
2890 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2891 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2892 }
2893 }
2894 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2895 SPIRV::OpTypeFloat)) {
2896 // Float -> ...
2897 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2898 // Float -> Int
2899 if (Builtin->IsBfloat16) {
2900 const auto *ST = static_cast<const SPIRVSubtarget *>(
2901 &MIRBuilder.getMF().getSubtarget());
2902 if (!ST->canUseExtension(
2903 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2904 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2905 IsRightComponentsNumber =
2906 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2907 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2908 Opcode = SPIRV::OpConvertFToBF16INTEL;
2909 } else {
2910 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2911 : SPIRV::OpConvertFToU;
2912 }
2913 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2914 SPIRV::OpTypeFloat)) {
2915 if (Builtin->IsTF32) {
2916 const auto *ST = static_cast<const SPIRVSubtarget *>(
2917 &MIRBuilder.getMF().getSubtarget());
2918 if (!ST->canUseExtension(
2919 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2920 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2921 IsRightComponentsNumber =
2922 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2923 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2924 Opcode = SPIRV::OpRoundFToTF32INTEL;
2925 } else {
2926 // Float -> Float
2927 Opcode = SPIRV::OpFConvert;
2928 }
2929 }
2930 }
2931
2932 if (!NeedExtMsg.empty()) {
2933 std::string DiagMsg = std::string(Builtin->Name) +
2934 ": the builtin requires the following SPIR-V "
2935 "extension: " +
2936 NeedExtMsg;
2937 report_fatal_error(DiagMsg.c_str(), false);
2938 }
2939 if (!IsRightComponentsNumber) {
2940 std::string DiagMsg =
2941 std::string(Builtin->Name) +
2942 ": result and argument must have the same number of components";
2943 report_fatal_error(DiagMsg.c_str(), false);
2944 }
2945 assert(Opcode != SPIRV::OpNop &&
2946 "Conversion between the types not implemented!");
2947
2948 MIRBuilder.buildInstr(Opcode)
2949 .addDef(Call->ReturnRegister)
2950 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2951 .addUse(Call->Arguments[0]);
2952 return true;
2953}
2954
2956 MachineIRBuilder &MIRBuilder,
2957 SPIRVGlobalRegistry *GR) {
2958 // Lookup the vector load/store builtin in the TableGen records.
2959 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2960 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2961 Call->Builtin->Set);
2962 // Build extended instruction.
2963 auto MIB =
2964 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2965 .addDef(Call->ReturnRegister)
2966 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2967 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2968 .addImm(Builtin->Number);
2969 for (auto Argument : Call->Arguments)
2970 MIB.addUse(Argument);
2971 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2972 MIB.addImm(Builtin->ElementCount);
2973
2974 // Rounding mode should be passed as a last argument in the MI for builtins
2975 // like "vstorea_halfn_r".
2976 if (Builtin->IsRounded)
2977 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2978 return true;
2979}
2980
2982 MachineIRBuilder &MIRBuilder,
2983 SPIRVGlobalRegistry *GR) {
2984 // Lookup the instruction opcode in the TableGen records.
2985 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2986 unsigned Opcode =
2987 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2988 bool IsLoad = Opcode == SPIRV::OpLoad;
2989 // Build the instruction.
2990 auto MIB = MIRBuilder.buildInstr(Opcode);
2991 if (IsLoad) {
2992 MIB.addDef(Call->ReturnRegister);
2993 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2994 }
2995 // Add a pointer to the value to load/store.
2996 MIB.addUse(Call->Arguments[0]);
2997 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2998 // Add a value to store.
2999 if (!IsLoad)
3000 MIB.addUse(Call->Arguments[1]);
3001 // Add optional memory attributes and an alignment.
3002 unsigned NumArgs = Call->Arguments.size();
3003 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
3004 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
3005 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
3006 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
3007 return true;
3008}
3009
3010namespace SPIRV {
3011// Try to find a builtin function attributes by a demangled function name and
3012// return a tuple <builtin group, op code, ext instruction number>, or a special
3013// tuple value <-1, 0, 0> if the builtin function is not found.
3014// Not all builtin functions are supported, only those with a ready-to-use op
3015// code or instruction number defined in TableGen.
3016// TODO: consider a major rework of mapping demangled calls into a builtin
3017// functions to unify search and decrease number of individual cases.
3018std::tuple<int, unsigned, unsigned>
3019mapBuiltinToOpcode(const StringRef DemangledCall,
3020 SPIRV::InstructionSet::InstructionSet Set) {
3021 Register Reg;
3023 std::unique_ptr<const IncomingCall> Call =
3024 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
3025 if (!Call)
3026 return std::make_tuple(-1, 0, 0);
3027
3028 switch (Call->Builtin->Group) {
3029 case SPIRV::Relational:
3030 case SPIRV::Atomic:
3031 case SPIRV::Barrier:
3032 case SPIRV::CastToPtr:
3033 case SPIRV::ImageMiscQuery:
3034 case SPIRV::SpecConstant:
3035 case SPIRV::Enqueue:
3036 case SPIRV::AsyncCopy:
3037 case SPIRV::LoadStore:
3038 case SPIRV::CoopMatr:
3039 if (const auto *R =
3040 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
3041 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3042 break;
3043 case SPIRV::Extended:
3044 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
3045 Call->Builtin->Set))
3046 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
3047 break;
3048 case SPIRV::VectorLoadStore:
3049 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
3050 Call->Builtin->Set))
3051 return std::make_tuple(SPIRV::Extended, 0, R->Number);
3052 break;
3053 case SPIRV::Group:
3054 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
3055 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3056 break;
3057 case SPIRV::AtomicFloating:
3058 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
3059 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3060 break;
3061 case SPIRV::IntelSubgroups:
3062 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
3063 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3064 break;
3065 case SPIRV::GroupUniform:
3066 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
3067 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3068 break;
3069 case SPIRV::IntegerDot:
3070 if (const auto *R =
3071 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
3072 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
3073 break;
3074 case SPIRV::WriteImage:
3075 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
3076 case SPIRV::Select:
3077 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
3078 case SPIRV::Construct:
3079 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
3080 0);
3081 case SPIRV::KernelClock:
3082 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
3083 default:
3084 return std::make_tuple(-1, 0, 0);
3085 }
3086 return std::make_tuple(-1, 0, 0);
3087}
3088
3089std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
3090 SPIRV::InstructionSet::InstructionSet Set,
3091 MachineIRBuilder &MIRBuilder,
3092 const Register OrigRet, const Type *OrigRetTy,
3093 const SmallVectorImpl<Register> &Args,
3094 SPIRVGlobalRegistry *GR, const CallBase &CB) {
3095 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
3096
3097 // Lookup the builtin in the TableGen records.
3098 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
3099 assert(SpvType && "Inconsistent return register: expected valid type info");
3100 std::unique_ptr<const IncomingCall> Call =
3101 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
3102
3103 if (!Call) {
3104 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
3105 return std::nullopt;
3106 }
3107
3108 // TODO: check if the provided args meet the builtin requirments.
3109 assert(Args.size() >= Call->Builtin->MinNumArgs &&
3110 "Too few arguments to generate the builtin");
3111 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
3112 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
3113
3114 // Match the builtin with implementation based on the grouping.
3115 switch (Call->Builtin->Group) {
3116 case SPIRV::Extended:
3117 return generateExtInst(Call.get(), MIRBuilder, GR, CB);
3118 case SPIRV::Relational:
3119 return generateRelationalInst(Call.get(), MIRBuilder, GR);
3120 case SPIRV::Group:
3121 return generateGroupInst(Call.get(), MIRBuilder, GR);
3122 case SPIRV::Variable:
3123 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
3124 case SPIRV::Atomic:
3125 return generateAtomicInst(Call.get(), MIRBuilder, GR);
3126 case SPIRV::AtomicFloating:
3127 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
3128 case SPIRV::Barrier:
3129 return generateBarrierInst(Call.get(), MIRBuilder, GR);
3130 case SPIRV::CastToPtr:
3131 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
3132 case SPIRV::Dot:
3133 case SPIRV::IntegerDot:
3134 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
3135 case SPIRV::Wave:
3136 return generateWaveInst(Call.get(), MIRBuilder, GR);
3137 case SPIRV::ICarryBorrow:
3138 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
3139 case SPIRV::GetQuery:
3140 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
3141 case SPIRV::ImageSizeQuery:
3142 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
3143 case SPIRV::ImageMiscQuery:
3144 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
3145 case SPIRV::ReadImage:
3146 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3147 case SPIRV::WriteImage:
3148 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
3149 case SPIRV::SampleImage:
3150 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3151 case SPIRV::Select:
3152 return generateSelectInst(Call.get(), MIRBuilder);
3153 case SPIRV::Construct:
3154 return generateConstructInst(Call.get(), MIRBuilder, GR);
3155 case SPIRV::SpecConstant:
3156 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
3157 case SPIRV::Enqueue:
3158 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
3159 case SPIRV::AsyncCopy:
3160 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
3161 case SPIRV::Convert:
3162 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
3163 case SPIRV::VectorLoadStore:
3164 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
3165 case SPIRV::LoadStore:
3166 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
3167 case SPIRV::IntelSubgroups:
3168 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
3169 case SPIRV::GroupUniform:
3170 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
3171 case SPIRV::KernelClock:
3172 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
3173 case SPIRV::CoopMatr:
3174 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
3175 case SPIRV::ExtendedBitOps:
3176 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
3177 case SPIRV::BindlessINTEL:
3178 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
3179 case SPIRV::TernaryBitwiseINTEL:
3180 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
3181 case SPIRV::Block2DLoadStore:
3182 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
3183 case SPIRV::Pipe:
3184 return generatePipeInst(Call.get(), MIRBuilder, GR);
3185 case SPIRV::PredicatedLoadStore:
3186 return generatePredicatedLoadStoreInst(Call.get(), MIRBuilder, GR);
3187 case SPIRV::BlockingPipes:
3188 return generateBlockingPipesInst(Call.get(), MIRBuilder, GR);
3189 case SPIRV::ArbitraryPrecisionFixedPoint:
3190 return generateAPFixedPointInst(Call.get(), MIRBuilder, GR);
3191 }
3192 return false;
3193}
3194
3196 // Parse strings representing OpenCL builtin types.
3197 if (hasBuiltinTypePrefix(TypeStr)) {
3198 // OpenCL builtin types in demangled call strings have the following format:
3199 // e.g. ocl_image2d_ro
3200 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
3201 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3202
3203 // Check if this is pointer to a builtin type and not just pointer
3204 // representing a builtin type. In case it is a pointer to builtin type,
3205 // this will require additional handling in the method calling
3206 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3207 // base types.
3208 if (TypeStr.ends_with("*"))
3209 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
3210
3211 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
3212 Ctx);
3213 }
3214
3215 // Parse type name in either "typeN" or "type vector[N]" format, where
3216 // N is the number of elements of the vector.
3217 Type *BaseType;
3218 unsigned VecElts = 0;
3219
3220 BaseType = parseBasicTypeName(TypeStr, Ctx);
3221 if (!BaseType)
3222 // Unable to recognize SPIRV type name.
3223 return nullptr;
3224
3225 // Handle "typeN*" or "type vector[N]*".
3226 TypeStr.consume_back("*");
3227
3228 if (TypeStr.consume_front(" vector["))
3229 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
3230
3231 TypeStr.getAsInteger(10, VecElts);
3232 if (VecElts > 0)
3234 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
3235
3236 return BaseType;
3237}
3238
3240 const StringRef DemangledCall, LLVMContext &Ctx) {
3241 auto Pos1 = DemangledCall.find('(');
3242 if (Pos1 == StringRef::npos)
3243 return false;
3244 auto Pos2 = DemangledCall.find(')');
3245 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3246 return false;
3247 DemangledCall.slice(Pos1 + 1, Pos2)
3248 .split(BuiltinArgsTypeStrs, ',', -1, false);
3249 return true;
3250}
3251
3253 unsigned ArgIdx, LLVMContext &Ctx) {
3254 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3255 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3256 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3257 return nullptr;
3258 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3259 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3260}
3261
3266
3267#define GET_BuiltinTypes_DECL
3268#define GET_BuiltinTypes_IMPL
3269
3274
3275#define GET_OpenCLTypes_DECL
3276#define GET_OpenCLTypes_IMPL
3277
3278#include "SPIRVGenTables.inc"
3279} // namespace SPIRV
3280
3281//===----------------------------------------------------------------------===//
3282// Misc functions for parsing builtin types.
3283//===----------------------------------------------------------------------===//
3284
3285static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3286 if (Name.starts_with("void"))
3287 return Type::getVoidTy(Context);
3288 else if (Name.starts_with("int") || Name.starts_with("uint"))
3289 return Type::getInt32Ty(Context);
3290 else if (Name.starts_with("float"))
3291 return Type::getFloatTy(Context);
3292 else if (Name.starts_with("half"))
3293 return Type::getHalfTy(Context);
3294 report_fatal_error("Unable to recognize type!");
3295}
3296
3297//===----------------------------------------------------------------------===//
3298// Implementation functions for builtin types.
3299//===----------------------------------------------------------------------===//
3300
3302 const SPIRV::BuiltinType *TypeRecord,
3303 MachineIRBuilder &MIRBuilder,
3304 SPIRVGlobalRegistry *GR) {
3305 unsigned Opcode = TypeRecord->Opcode;
3306 // Create or get an existing type from GlobalRegistry.
3307 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3308}
3309
3311 SPIRVGlobalRegistry *GR) {
3312 // Create or get an existing type from GlobalRegistry.
3313 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3314}
3315
3316static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3317 MachineIRBuilder &MIRBuilder,
3318 SPIRVGlobalRegistry *GR) {
3319 assert(ExtensionType->getNumIntParameters() == 1 &&
3320 "Invalid number of parameters for SPIR-V pipe builtin!");
3321 // Create or get an existing type from GlobalRegistry.
3322 return GR->getOrCreateOpTypePipe(MIRBuilder,
3323 SPIRV::AccessQualifier::AccessQualifier(
3324 ExtensionType->getIntParameter(0)));
3325}
3326
3327static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3328 MachineIRBuilder &MIRBuilder,
3329 SPIRVGlobalRegistry *GR) {
3330 assert(ExtensionType->getNumIntParameters() == 4 &&
3331 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3332 assert(ExtensionType->getNumTypeParameters() == 1 &&
3333 "SPIR-V coop matrices builtin type must have a type parameter!");
3334 const SPIRVType *ElemType =
3335 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3336 SPIRV::AccessQualifier::ReadWrite, true);
3337 // Create or get an existing type from GlobalRegistry.
3338 return GR->getOrCreateOpTypeCoopMatr(
3339 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3340 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3341 ExtensionType->getIntParameter(3), true);
3342}
3343
3345 MachineIRBuilder &MIRBuilder,
3346 SPIRVGlobalRegistry *GR) {
3347 SPIRVType *OpaqueImageType = GR->getImageType(
3348 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3349 // Create or get an existing type from GlobalRegistry.
3350 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3351}
3352
3353static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3354 MachineIRBuilder &MIRBuilder,
3355 SPIRVGlobalRegistry *GR) {
3356 assert(ExtensionType->getNumIntParameters() == 3 &&
3357 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3358 "parameter");
3359 auto Opcode = ExtensionType->getIntParameter(0);
3360
3361 SmallVector<MCOperand> Operands;
3362 for (Type *Param : ExtensionType->type_params()) {
3363 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3364 if (ParamEType->getName() == "spirv.IntegralConstant") {
3365 assert(ParamEType->getNumTypeParameters() == 1 &&
3366 "Inline SPIR-V integral constant builtin must have a type "
3367 "parameter");
3368 assert(ParamEType->getNumIntParameters() == 1 &&
3369 "Inline SPIR-V integral constant builtin must have a "
3370 "value parameter");
3371
3372 auto OperandValue = ParamEType->getIntParameter(0);
3373 auto *OperandType = ParamEType->getTypeParameter(0);
3374
3375 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3376 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3377
3379 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3380 continue;
3381 } else if (ParamEType->getName() == "spirv.Literal") {
3382 assert(ParamEType->getNumTypeParameters() == 0 &&
3383 "Inline SPIR-V literal builtin does not take type "
3384 "parameters");
3385 assert(ParamEType->getNumIntParameters() == 1 &&
3386 "Inline SPIR-V literal builtin must have an integer "
3387 "parameter");
3388
3389 auto OperandValue = ParamEType->getIntParameter(0);
3390
3391 Operands.push_back(MCOperand::createImm(OperandValue));
3392 continue;
3393 }
3394 }
3395 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3396 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3397 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3398 }
3399
3400 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3401 Operands);
3402}
3403
3404static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3405 MachineIRBuilder &MIRBuilder,
3406 SPIRVGlobalRegistry *GR) {
3407 assert(ExtensionType->getNumTypeParameters() == 1 &&
3408 "Vulkan buffers have exactly one type for the type of the buffer.");
3409 assert(ExtensionType->getNumIntParameters() == 2 &&
3410 "Vulkan buffer have 2 integer parameters: storage class and is "
3411 "writable.");
3412
3413 auto *T = ExtensionType->getTypeParameter(0);
3414 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3415 ExtensionType->getIntParameter(0));
3416 bool IsWritable = ExtensionType->getIntParameter(1);
3417 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3418}
3419
3420static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3421 MachineIRBuilder &MIRBuilder,
3422 SPIRVGlobalRegistry *GR) {
3423 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3424}
3425
3426namespace SPIRV {
3428 LLVMContext &Context) {
3429 StringRef NameWithParameters = TypeName;
3430
3431 // Pointers-to-opaque-structs representing OpenCL types are first translated
3432 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3433 // following format: e.g. %opencl.event_t
3434 if (NameWithParameters.starts_with("opencl.")) {
3435 const SPIRV::OpenCLType *OCLTypeRecord =
3436 SPIRV::lookupOpenCLType(NameWithParameters);
3437 if (!OCLTypeRecord)
3438 report_fatal_error("Missing TableGen record for OpenCL type: " +
3439 NameWithParameters);
3440 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3441 // Continue with the SPIR-V builtin type...
3442 }
3443
3444 // Names of the opaque structs representing a SPIR-V builtins without
3445 // parameters should have the following format: e.g. %spirv.Event
3446 assert(NameWithParameters.starts_with("spirv.") &&
3447 "Unknown builtin opaque type!");
3448
3449 // Parameterized SPIR-V builtins names follow this format:
3450 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3451 if (!NameWithParameters.contains('_'))
3452 return TargetExtType::get(Context, NameWithParameters);
3453
3454 SmallVector<StringRef> Parameters;
3455 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3456 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3457
3458 SmallVector<Type *, 1> TypeParameters;
3459 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3460 if (HasTypeParameter)
3461 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3462 SmallVector<unsigned> IntParameters;
3463 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3464 unsigned IntParameter = 0;
3465 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3466 (void)ValidLiteral;
3467 assert(ValidLiteral &&
3468 "Invalid format of SPIR-V builtin parameter literal!");
3469 IntParameters.push_back(IntParameter);
3470 }
3471 return TargetExtType::get(Context,
3472 NameWithParameters.substr(0, BaseNameLength),
3473 TypeParameters, IntParameters);
3474}
3475
3477 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3478 MachineIRBuilder &MIRBuilder,
3479 SPIRVGlobalRegistry *GR) {
3480 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3481 // target(...) target extension types or pointers-to-opaque-structs. The
3482 // approach relying on structs is deprecated and works only in the non-opaque
3483 // pointer mode (-opaque-pointers=0).
3484 // In order to maintain compatibility with LLVM IR generated by older versions
3485 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3486 // "translated" to target extension types. This translation is temporary and
3487 // will be removed in the future release of LLVM.
3489 if (!BuiltinType)
3491 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3492
3493 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3494
3495 const StringRef Name = BuiltinType->getName();
3496 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3497
3498 SPIRVType *TargetType;
3499 if (Name == "spirv.Type") {
3500 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3501 } else if (Name == "spirv.VulkanBuffer") {
3502 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3503 } else if (Name == "spirv.Padding") {
3504 TargetType = GR->getOrCreatePaddingType(MIRBuilder);
3505 } else if (Name == "spirv.Layout") {
3506 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3507 } else {
3508 // Lookup the demangled builtin type in the TableGen records.
3509 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3510 if (!TypeRecord)
3511 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3512
3513 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3514 // methods use the implementation details from TableGen records or
3515 // TargetExtType parameters to either create a new OpType<...> machine
3516 // instruction or get an existing equivalent SPIRVType from
3517 // GlobalRegistry.
3518
3519 switch (TypeRecord->Opcode) {
3520 case SPIRV::OpTypeImage:
3521 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3522 break;
3523 case SPIRV::OpTypePipe:
3524 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3525 break;
3526 case SPIRV::OpTypeDeviceEvent:
3527 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3528 break;
3529 case SPIRV::OpTypeSampler:
3530 TargetType = getSamplerType(MIRBuilder, GR);
3531 break;
3532 case SPIRV::OpTypeSampledImage:
3533 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3534 break;
3535 case SPIRV::OpTypeCooperativeMatrixKHR:
3536 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3537 break;
3538 default:
3539 TargetType =
3540 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3541 break;
3542 }
3543 }
3544
3545 // Emit OpName instruction if a new OpType<...> instruction was added
3546 // (equivalent type was not found in GlobalRegistry).
3547 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3548 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3549
3550 return TargetType;
3551}
3552} // namespace SPIRV
3553} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:57
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
spirv structurize SPIRV
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
APInt bitcastToAPInt() const
Definition APFloat.h:1335
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1061
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:235
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1541
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:40
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
const APFloat & getValueAPF() const
Definition Constants.h:325
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:159
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:64
Tagged union holding either a T or a Error.
Definition Error.h:485
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:318
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
constexpr TypeSize getSizeInBytes() const
Returns the total size of the type in bytes, i.e.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildStore(const SrcOp &Val, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert G_STORE Val, Addr, MMO.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:20
constexpr bool isValid() const
Definition Register.h:112
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreatePaddingType(MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:657
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:438
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:686
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:426
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition StringRef.h:376
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition StringRef.h:345
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:293
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:907
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:296
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:280
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:294
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:284
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:282
bool isVoidTy() const
Return true if this is 'void'.
Definition Type.h:139
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition Core.cpp:887
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition SPIRVUtils.h:549
void updateRegType(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for assigning SPIRVType to a register, ensuring the register class and type ...
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool builtinMayNeedPromotionToVec(uint32_t BuiltinNumber)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition SPIRVUtils.h:547
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:244
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static bool buildAPFixedPointInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBlockingPipesInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:229
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SmallVector< Register > getBuiltinCallArguments(const SPIRV::IncomingCall *Call, uint32_t BuiltinNumber, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1879
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool generateAPFixedPointInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:870
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode