LLVM 22.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <regex>
22#include <string>
23#include <tuple>
24
25#define DEBUG_TYPE "spirv-builtins"
26
27namespace llvm {
28namespace SPIRV {
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
31
34 InstructionSet::InstructionSet Set;
35 BuiltinGroup Group;
38};
39
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
42
60
63 InstructionSet::InstructionSet Set;
65};
66
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
69
85
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
88
96
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
99
104
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
112
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
115
118 InstructionSet::InstructionSet Set;
119 BuiltIn::BuiltIn Value;
120};
121
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
125
128 InstructionSet::InstructionSet Set;
130};
131
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
134
140
141#define GET_IntegerDotProductBuiltins_DECL
142#define GET_IntegerDotProductBuiltins_IMPL
143
146 InstructionSet::InstructionSet Set;
151 bool IsTF32;
152 FPRoundingMode::FPRoundingMode RoundingMode;
153};
154
157 InstructionSet::InstructionSet Set;
161 FPRoundingMode::FPRoundingMode RoundingMode;
162};
163
164using namespace FPRoundingMode;
165#define GET_ConvertBuiltins_DECL
166#define GET_ConvertBuiltins_IMPL
167
168using namespace InstructionSet;
169#define GET_VectorLoadStoreBuiltins_DECL
170#define GET_VectorLoadStoreBuiltins_IMPL
171
172#define GET_CLMemoryScope_DECL
173#define GET_CLSamplerAddressingMode_DECL
174#define GET_CLMemoryFenceFlags_DECL
175#define GET_ExtendedBuiltins_DECL
176#include "SPIRVGenTables.inc"
177} // namespace SPIRV
178
179//===----------------------------------------------------------------------===//
180// Misc functions for looking up builtins and veryfying requirements using
181// TableGen records
182//===----------------------------------------------------------------------===//
183
184namespace SPIRV {
185/// Parses the name part of the demangled builtin call.
186std::string lookupBuiltinNameHelper(StringRef DemangledCall,
187 FPDecorationId *DecorationId) {
188 StringRef PassPrefix = "(anonymous namespace)::";
189 std::string BuiltinName;
190 // Itanium Demangler result may have "(anonymous namespace)::" prefix
191 if (DemangledCall.starts_with(PassPrefix))
192 BuiltinName = DemangledCall.substr(PassPrefix.size());
193 else
194 BuiltinName = DemangledCall;
195 // Extract the builtin function name and types of arguments from the call
196 // skeleton.
197 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
198
199 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
200 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
201 BuiltinName = BuiltinName.substr(12);
202
203 // Check if the extracted name contains type information between angle
204 // brackets. If so, the builtin is an instantiated template - needs to have
205 // the information after angle brackets and return type removed.
206 std::size_t Pos1 = BuiltinName.rfind('<');
207 if (Pos1 != std::string::npos && BuiltinName.back() == '>') {
208 std::size_t Pos2 = BuiltinName.rfind(' ', Pos1);
209 if (Pos2 == std::string::npos)
210 Pos2 = 0;
211 else
212 ++Pos2;
213 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
214 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
215 }
216
217 // Check if the extracted name begins with:
218 // - "__spirv_ImageSampleExplicitLod"
219 // - "__spirv_ImageRead"
220 // - "__spirv_ImageWrite"
221 // - "__spirv_ImageQuerySizeLod"
222 // - "__spirv_UDotKHR"
223 // - "__spirv_SDotKHR"
224 // - "__spirv_SUDotKHR"
225 // - "__spirv_SDotAccSatKHR"
226 // - "__spirv_UDotAccSatKHR"
227 // - "__spirv_SUDotAccSatKHR"
228 // - "__spirv_ReadClockKHR"
229 // - "__spirv_SubgroupBlockReadINTEL"
230 // - "__spirv_SubgroupImageBlockReadINTEL"
231 // - "__spirv_SubgroupImageMediaBlockReadINTEL"
232 // - "__spirv_SubgroupImageMediaBlockWriteINTEL"
233 // - "__spirv_Convert"
234 // - "__spirv_Round"
235 // - "__spirv_UConvert"
236 // - "__spirv_SConvert"
237 // - "__spirv_FConvert"
238 // - "__spirv_SatConvert"
239 // and maybe contains return type information at the end "_R<type>".
240 // If so, extract the plain builtin name without the type information.
241 static const std::regex SpvWithR(
242 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageWrite|ImageQuerySizeLod|"
243 "UDotKHR|"
244 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
245 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
246 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
247 "Convert|Round|"
248 "UConvert|SConvert|FConvert|SatConvert)[^_]*)(_R[^_]*_?(\\w+)?.*)?");
249 std::smatch Match;
250 if (std::regex_match(BuiltinName, Match, SpvWithR) && Match.size() > 1) {
251 std::ssub_match SubMatch;
252 if (DecorationId && Match.size() > 3) {
253 SubMatch = Match[4];
254 *DecorationId = demangledPostfixToDecorationId(SubMatch.str());
255 }
256 SubMatch = Match[1];
257 BuiltinName = SubMatch.str();
258 }
259
260 return BuiltinName;
261}
262} // namespace SPIRV
263
264/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
265/// the provided \p DemangledCall and specified \p Set.
266///
267/// The lookup follows the following algorithm, returning the first successful
268/// match:
269/// 1. Search with the plain demangled name (expecting a 1:1 match).
270/// 2. Search with the prefix before or suffix after the demangled name
271/// signyfying the type of the first argument.
272///
273/// \returns Wrapper around the demangled call and found builtin definition.
274static std::unique_ptr<const SPIRV::IncomingCall>
276 SPIRV::InstructionSet::InstructionSet Set,
277 Register ReturnRegister, const SPIRVType *ReturnType,
279 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
280
281 SmallVector<StringRef, 10> BuiltinArgumentTypes;
282 StringRef BuiltinArgs =
283 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
284 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
285
286 // Look up the builtin in the defined set. Start with the plain demangled
287 // name, expecting a 1:1 match in the defined builtin set.
288 const SPIRV::DemangledBuiltin *Builtin;
289 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
290 return std::make_unique<SPIRV::IncomingCall>(
291 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
292
293 // If the initial look up was unsuccessful and the demangled call takes at
294 // least 1 argument, add a prefix or suffix signifying the type of the first
295 // argument and repeat the search.
296 if (BuiltinArgumentTypes.size() >= 1) {
297 char FirstArgumentType = BuiltinArgumentTypes[0][0];
298 // Prefix to be added to the builtin's name for lookup.
299 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
300 std::string Prefix;
301
302 switch (FirstArgumentType) {
303 // Unsigned:
304 case 'u':
305 if (Set == SPIRV::InstructionSet::OpenCL_std)
306 Prefix = "u_";
307 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
308 Prefix = "u";
309 break;
310 // Signed:
311 case 'c':
312 case 's':
313 case 'i':
314 case 'l':
315 if (Set == SPIRV::InstructionSet::OpenCL_std)
316 Prefix = "s_";
317 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
318 Prefix = "s";
319 break;
320 // Floating-point:
321 case 'f':
322 case 'd':
323 case 'h':
324 if (Set == SPIRV::InstructionSet::OpenCL_std ||
325 Set == SPIRV::InstructionSet::GLSL_std_450)
326 Prefix = "f";
327 break;
328 }
329
330 // If argument-type name prefix was added, look up the builtin again.
331 if (!Prefix.empty() &&
332 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
333 return std::make_unique<SPIRV::IncomingCall>(
334 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
335
336 // If lookup with a prefix failed, find a suffix to be added to the
337 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
338 // an unsigned value has a suffix "u".
339 std::string Suffix;
340
341 switch (FirstArgumentType) {
342 // Unsigned:
343 case 'u':
344 Suffix = "u";
345 break;
346 // Signed:
347 case 'c':
348 case 's':
349 case 'i':
350 case 'l':
351 Suffix = "s";
352 break;
353 // Floating-point:
354 case 'f':
355 case 'd':
356 case 'h':
357 Suffix = "f";
358 break;
359 }
360
361 // If argument-type name suffix was added, look up the builtin again.
362 if (!Suffix.empty() &&
363 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
364 return std::make_unique<SPIRV::IncomingCall>(
365 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
366 }
367
368 // No builtin with such name was found in the set.
369 return nullptr;
370}
371
374 // We expect the following sequence of instructions:
375 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
376 // or = G_GLOBAL_VALUE @block_literal_global
377 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
378 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
379 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
380 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
381 MI->getOperand(1).isReg());
382 Register BitcastReg = MI->getOperand(1).getReg();
383 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
384 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
385 BitcastMI->getOperand(2).isReg());
386 Register ValueReg = BitcastMI->getOperand(2).getReg();
387 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
388 return ValueMI;
389}
390
391// Return an integer constant corresponding to the given register and
392// defined in spv_track_constant.
393// TODO: maybe unify with prelegalizer pass.
395 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
396 assert(DefMI->getOpcode() == TargetOpcode::G_CONSTANT &&
397 DefMI->getOperand(1).isCImm());
398 return DefMI->getOperand(1).getCImm()->getValue().getZExtValue();
399}
400
401// Return type of the instruction result from spv_assign_type intrinsic.
402// TODO: maybe unify with prelegalizer pass.
404 MachineInstr *NextMI = MI->getNextNode();
405 if (!NextMI)
406 return nullptr;
407 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
408 if ((NextMI = NextMI->getNextNode()) == nullptr)
409 return nullptr;
410 Register ValueReg = MI->getOperand(0).getReg();
411 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
412 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
413 NextMI->getOperand(1).getReg() != ValueReg)
414 return nullptr;
415 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
416 assert(Ty && "Type is expected");
417 return Ty;
418}
419
420static const Type *getBlockStructType(Register ParamReg,
422 // In principle, this information should be passed to us from Clang via
423 // an elementtype attribute. However, said attribute requires that
424 // the function call be an intrinsic, which is not. Instead, we rely on being
425 // able to trace this to the declaration of a variable: OpenCL C specification
426 // section 6.12.5 should guarantee that we can do this.
428 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
429 return MI->getOperand(1).getGlobal()->getType();
430 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
431 "Blocks in OpenCL C must be traceable to allocation site");
432 return getMachineInstrType(MI);
433}
434
435//===----------------------------------------------------------------------===//
436// Helper functions for building misc instructions
437//===----------------------------------------------------------------------===//
438
439/// Helper function building either a resulting scalar or vector bool register
440/// depending on the expected \p ResultType.
441///
442/// \returns Tuple of the resulting register and its type.
443static std::tuple<Register, SPIRVType *>
444buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
446 LLT Type;
447 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
448
449 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
450 unsigned VectorElements = ResultType->getOperand(2).getImm();
451 BoolType = GR->getOrCreateSPIRVVectorType(BoolType, VectorElements,
452 MIRBuilder, true);
455 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
456 } else {
457 Type = LLT::scalar(1);
458 }
459
460 Register ResultRegister =
462 MIRBuilder.getMRI()->setRegClass(ResultRegister, GR->getRegClass(ResultType));
463 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
464 return std::make_tuple(ResultRegister, BoolType);
465}
466
467/// Helper function for building either a vector or scalar select instruction
468/// depending on the expected \p ResultType.
469static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
470 Register ReturnRegister, Register SourceRegister,
471 const SPIRVType *ReturnType,
473 Register TrueConst, FalseConst;
474
475 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
476 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
478 TrueConst =
479 GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType, true);
480 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType, true);
481 } else {
482 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType, true);
483 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType, true);
484 }
485
486 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
487 FalseConst);
488}
489
490/// Helper function for building a load instruction loading into the
491/// \p DestinationReg.
493 MachineIRBuilder &MIRBuilder,
494 SPIRVGlobalRegistry *GR, LLT LowLevelType,
495 Register DestinationReg = Register(0)) {
496 if (!DestinationReg.isValid())
497 DestinationReg = createVirtualRegister(BaseType, GR, MIRBuilder);
498 // TODO: consider using correct address space and alignment (p0 is canonical
499 // type for selection though).
501 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
502 return DestinationReg;
503}
504
505/// Helper function for building a load instruction for loading a builtin global
506/// variable of \p BuiltinValue value.
508 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
509 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
510 Register Reg = Register(0), bool isConst = true,
511 const std::optional<SPIRV::LinkageType::LinkageType> &LinkageTy = {
512 SPIRV::LinkageType::Import}) {
513 Register NewRegister =
514 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::pIDRegClass);
515 MIRBuilder.getMRI()->setType(
516 NewRegister,
517 LLT::pointer(storageClassToAddressSpace(SPIRV::StorageClass::Function),
518 GR->getPointerSize()));
520 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
521 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
522
523 // Set up the global OpVariable with the necessary builtin decorations.
524 Register Variable = GR->buildGlobalVariable(
525 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
526 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst, LinkageTy,
527 MIRBuilder, false);
528
529 // Load the value from the global variable.
530 Register LoadedRegister =
531 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
532 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
533 return LoadedRegister;
534}
535
536/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
537/// and its definition, set the new register as a destination of the definition,
538/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
539/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
540/// SPIRVPreLegalizer.cpp.
541extern void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
544
545// TODO: Move to TableGen.
546static SPIRV::MemorySemantics::MemorySemantics
547getSPIRVMemSemantics(std::memory_order MemOrder) {
548 switch (MemOrder) {
549 case std::memory_order_relaxed:
550 return SPIRV::MemorySemantics::None;
551 case std::memory_order_acquire:
552 return SPIRV::MemorySemantics::Acquire;
553 case std::memory_order_release:
554 return SPIRV::MemorySemantics::Release;
555 case std::memory_order_acq_rel:
556 return SPIRV::MemorySemantics::AcquireRelease;
557 case std::memory_order_seq_cst:
558 return SPIRV::MemorySemantics::SequentiallyConsistent;
559 default:
560 report_fatal_error("Unknown CL memory scope");
561 }
562}
563
564static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
565 switch (ClScope) {
566 case SPIRV::CLMemoryScope::memory_scope_work_item:
567 return SPIRV::Scope::Invocation;
568 case SPIRV::CLMemoryScope::memory_scope_work_group:
569 return SPIRV::Scope::Workgroup;
570 case SPIRV::CLMemoryScope::memory_scope_device:
571 return SPIRV::Scope::Device;
572 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
573 return SPIRV::Scope::CrossDevice;
574 case SPIRV::CLMemoryScope::memory_scope_sub_group:
575 return SPIRV::Scope::Subgroup;
576 }
577 report_fatal_error("Unknown CL memory scope");
578}
579
581 MachineIRBuilder &MIRBuilder,
583 return GR->buildConstantInt(
584 Val, MIRBuilder, GR->getOrCreateSPIRVIntegerType(32, MIRBuilder), true);
585}
586
587static Register buildScopeReg(Register CLScopeRegister,
588 SPIRV::Scope::Scope Scope,
589 MachineIRBuilder &MIRBuilder,
592 if (CLScopeRegister.isValid()) {
593 auto CLScope =
594 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
595 Scope = getSPIRVScope(CLScope);
596
597 if (CLScope == static_cast<unsigned>(Scope)) {
598 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
599 return CLScopeRegister;
600 }
601 }
602 return buildConstantIntReg32(Scope, MIRBuilder, GR);
603}
604
607 if (MRI->getRegClassOrNull(Reg))
608 return;
609 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(Reg);
610 MRI->setRegClass(Reg,
611 SpvType ? GR->getRegClass(SpvType) : &SPIRV::iIDRegClass);
612}
613
614static Register buildMemSemanticsReg(Register SemanticsRegister,
615 Register PtrRegister, unsigned &Semantics,
616 MachineIRBuilder &MIRBuilder,
618 if (SemanticsRegister.isValid()) {
619 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
620 std::memory_order Order =
621 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
622 Semantics =
623 getSPIRVMemSemantics(Order) |
625 if (static_cast<unsigned>(Order) == Semantics) {
626 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
627 return SemanticsRegister;
628 }
629 }
630 return buildConstantIntReg32(Semantics, MIRBuilder, GR);
631}
632
633static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
635 Register TypeReg,
636 ArrayRef<uint32_t> ImmArgs = {}) {
637 auto MIB = MIRBuilder.buildInstr(Opcode);
638 if (TypeReg.isValid())
639 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
640 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
641 for (unsigned i = 0; i < Sz; ++i)
642 MIB.addUse(Call->Arguments[i]);
643 for (uint32_t ImmArg : ImmArgs)
644 MIB.addImm(ImmArg);
645 return true;
646}
647
648/// Helper function for translating atomic init to OpStore.
650 MachineIRBuilder &MIRBuilder) {
651 if (Call->isSpirvOp())
652 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
653
654 assert(Call->Arguments.size() == 2 &&
655 "Need 2 arguments for atomic init translation");
656 MIRBuilder.buildInstr(SPIRV::OpStore)
657 .addUse(Call->Arguments[0])
658 .addUse(Call->Arguments[1]);
659 return true;
660}
661
662/// Helper function for building an atomic load instruction.
664 MachineIRBuilder &MIRBuilder,
666 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
667 if (Call->isSpirvOp())
668 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
669
670 Register PtrRegister = Call->Arguments[0];
671 // TODO: if true insert call to __translate_ocl_memory_sccope before
672 // OpAtomicLoad and the function implementation. We can use Translator's
673 // output for transcoding/atomic_explicit_arguments.cl as an example.
674 Register ScopeRegister =
675 Call->Arguments.size() > 1
676 ? Call->Arguments[1]
677 : buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
678 Register MemSemanticsReg;
679 if (Call->Arguments.size() > 2) {
680 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
681 MemSemanticsReg = Call->Arguments[2];
682 } else {
683 int Semantics =
684 SPIRV::MemorySemantics::SequentiallyConsistent |
686 MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
687 }
688
689 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
690 .addDef(Call->ReturnRegister)
691 .addUse(TypeReg)
692 .addUse(PtrRegister)
693 .addUse(ScopeRegister)
694 .addUse(MemSemanticsReg);
695 return true;
696}
697
698/// Helper function for building an atomic store instruction.
700 MachineIRBuilder &MIRBuilder,
702 if (Call->isSpirvOp())
703 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call,
704 Register(0));
705
706 Register ScopeRegister =
707 buildConstantIntReg32(SPIRV::Scope::Device, MIRBuilder, GR);
708 Register PtrRegister = Call->Arguments[0];
709 int Semantics =
710 SPIRV::MemorySemantics::SequentiallyConsistent |
712 Register MemSemanticsReg = buildConstantIntReg32(Semantics, MIRBuilder, GR);
713 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
714 .addUse(PtrRegister)
715 .addUse(ScopeRegister)
716 .addUse(MemSemanticsReg)
717 .addUse(Call->Arguments[1]);
718 return true;
719}
720
721/// Helper function for building an atomic compare-exchange instruction.
723 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
724 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
725 if (Call->isSpirvOp())
726 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
727 GR->getSPIRVTypeID(Call->ReturnType));
728
729 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
730 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
731
732 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
733 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
734 Register Desired = Call->Arguments[2]; // Value (C Desired).
735 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
736 LLT DesiredLLT = MRI->getType(Desired);
737
738 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
739 SPIRV::OpTypePointer);
740 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
741 (void)ExpectedType;
742 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
743 : ExpectedType == SPIRV::OpTypePointer);
744 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
745
746 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
747 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
748 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
749 SpvObjectPtrTy->getOperand(1).getImm());
750 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
751
752 Register MemSemEqualReg;
753 Register MemSemUnequalReg;
754 uint64_t MemSemEqual =
755 IsCmpxchg
756 ? SPIRV::MemorySemantics::None
757 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
758 uint64_t MemSemUnequal =
759 IsCmpxchg
760 ? SPIRV::MemorySemantics::None
761 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
762 if (Call->Arguments.size() >= 4) {
763 assert(Call->Arguments.size() >= 5 &&
764 "Need 5+ args for explicit atomic cmpxchg");
765 auto MemOrdEq =
766 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
767 auto MemOrdNeq =
768 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
769 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
770 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
771 if (static_cast<unsigned>(MemOrdEq) == MemSemEqual)
772 MemSemEqualReg = Call->Arguments[3];
773 if (static_cast<unsigned>(MemOrdNeq) == MemSemEqual)
774 MemSemUnequalReg = Call->Arguments[4];
775 }
776 if (!MemSemEqualReg.isValid())
777 MemSemEqualReg = buildConstantIntReg32(MemSemEqual, MIRBuilder, GR);
778 if (!MemSemUnequalReg.isValid())
779 MemSemUnequalReg = buildConstantIntReg32(MemSemUnequal, MIRBuilder, GR);
780
781 Register ScopeReg;
782 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
783 if (Call->Arguments.size() >= 6) {
784 assert(Call->Arguments.size() == 6 &&
785 "Extra args for explicit atomic cmpxchg");
786 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
787 getIConstVal(Call->Arguments[5], MRI));
788 Scope = getSPIRVScope(ClScope);
789 if (ClScope == static_cast<unsigned>(Scope))
790 ScopeReg = Call->Arguments[5];
791 }
792 if (!ScopeReg.isValid())
793 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
794
795 Register Expected = IsCmpxchg
796 ? ExpectedArg
797 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
798 GR, LLT::scalar(64));
799 MRI->setType(Expected, DesiredLLT);
800 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
801 : Call->ReturnRegister;
802 if (!MRI->getRegClassOrNull(Tmp))
803 MRI->setRegClass(Tmp, GR->getRegClass(SpvDesiredTy));
804 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
805
806 MIRBuilder.buildInstr(Opcode)
807 .addDef(Tmp)
808 .addUse(GR->getSPIRVTypeID(SpvDesiredTy))
809 .addUse(ObjectPtr)
810 .addUse(ScopeReg)
811 .addUse(MemSemEqualReg)
812 .addUse(MemSemUnequalReg)
813 .addUse(Desired)
815 if (!IsCmpxchg) {
816 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
817 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
818 }
819 return true;
820}
821
822/// Helper function for building atomic instructions.
823static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
824 MachineIRBuilder &MIRBuilder,
826 if (Call->isSpirvOp())
827 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
828 GR->getSPIRVTypeID(Call->ReturnType));
829
830 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
831 Register ScopeRegister =
832 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
833
834 assert(Call->Arguments.size() <= 4 &&
835 "Too many args for explicit atomic RMW");
836 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
837 MIRBuilder, GR, MRI);
838
839 Register PtrRegister = Call->Arguments[0];
840 unsigned Semantics = SPIRV::MemorySemantics::None;
841 Register MemSemanticsReg =
842 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
843 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
844 Semantics, MIRBuilder, GR);
845 Register ValueReg = Call->Arguments[1];
846 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
847 // support cl_ext_float_atomics
848 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
849 if (Opcode == SPIRV::OpAtomicIAdd) {
850 Opcode = SPIRV::OpAtomicFAddEXT;
851 } else if (Opcode == SPIRV::OpAtomicISub) {
852 // Translate OpAtomicISub applied to a floating type argument to
853 // OpAtomicFAddEXT with the negative value operand
854 Opcode = SPIRV::OpAtomicFAddEXT;
855 Register NegValueReg =
856 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
857 MRI->setRegClass(NegValueReg, GR->getRegClass(Call->ReturnType));
858 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
859 MIRBuilder.getMF());
860 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
861 .addDef(NegValueReg)
862 .addUse(ValueReg);
863 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
864 MIRBuilder.getMF().getRegInfo());
865 ValueReg = NegValueReg;
866 }
867 }
868 MIRBuilder.buildInstr(Opcode)
869 .addDef(Call->ReturnRegister)
870 .addUse(ValueTypeReg)
871 .addUse(PtrRegister)
872 .addUse(ScopeRegister)
873 .addUse(MemSemanticsReg)
874 .addUse(ValueReg);
875 return true;
876}
877
878/// Helper function for building an atomic floating-type instruction.
880 unsigned Opcode,
881 MachineIRBuilder &MIRBuilder,
883 assert(Call->Arguments.size() == 4 &&
884 "Wrong number of atomic floating-type builtin");
885 Register PtrReg = Call->Arguments[0];
886 Register ScopeReg = Call->Arguments[1];
887 Register MemSemanticsReg = Call->Arguments[2];
888 Register ValueReg = Call->Arguments[3];
889 MIRBuilder.buildInstr(Opcode)
890 .addDef(Call->ReturnRegister)
891 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
892 .addUse(PtrReg)
893 .addUse(ScopeReg)
894 .addUse(MemSemanticsReg)
895 .addUse(ValueReg);
896 return true;
897}
898
899/// Helper function for building atomic flag instructions (e.g.
900/// OpAtomicFlagTestAndSet).
902 unsigned Opcode, MachineIRBuilder &MIRBuilder,
904 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
905 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
906 if (Call->isSpirvOp())
907 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
908 IsSet ? TypeReg : Register(0));
909
910 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
911 Register PtrRegister = Call->Arguments[0];
912 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
913 Register MemSemanticsReg =
914 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
915 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
916 Semantics, MIRBuilder, GR);
917
918 assert((Opcode != SPIRV::OpAtomicFlagClear ||
919 (Semantics != SPIRV::MemorySemantics::Acquire &&
920 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
921 "Invalid memory order argument!");
922
923 Register ScopeRegister =
924 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
925 ScopeRegister =
926 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
927
928 auto MIB = MIRBuilder.buildInstr(Opcode);
929 if (IsSet)
930 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
931
932 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
933 return true;
934}
935
936/// Helper function for building barriers, i.e., memory/control ordering
937/// operations.
938static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
939 MachineIRBuilder &MIRBuilder,
941 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
942 const auto *ST =
943 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
944 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
945 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
946 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
947 std::string DiagMsg = std::string(Builtin->Name) +
948 ": the builtin requires the following SPIR-V "
949 "extension: SPV_INTEL_split_barrier";
950 report_fatal_error(DiagMsg.c_str(), false);
951 }
952
953 if (Call->isSpirvOp())
954 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
955
956 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
957 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
958 unsigned MemSemantics = SPIRV::MemorySemantics::None;
959
960 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
961 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
962
963 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
964 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
965
966 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
967 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
968
969 if (Opcode == SPIRV::OpMemoryBarrier)
970 MemSemantics = getSPIRVMemSemantics(static_cast<std::memory_order>(
971 getIConstVal(Call->Arguments[1], MRI))) |
972 MemSemantics;
973 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
974 MemSemantics |= SPIRV::MemorySemantics::Release;
975 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
976 MemSemantics |= SPIRV::MemorySemantics::Acquire;
977 else
978 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
979
980 Register MemSemanticsReg =
981 MemFlags == MemSemantics
982 ? Call->Arguments[0]
983 : buildConstantIntReg32(MemSemantics, MIRBuilder, GR);
984 Register ScopeReg;
985 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
986 SPIRV::Scope::Scope MemScope = Scope;
987 if (Call->Arguments.size() >= 2) {
988 assert(
989 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
990 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
991 "Extra args for explicitly scoped barrier");
992 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
993 : Call->Arguments[1];
994 SPIRV::CLMemoryScope CLScope =
995 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
996 MemScope = getSPIRVScope(CLScope);
997 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
998 (Opcode == SPIRV::OpMemoryBarrier))
999 Scope = MemScope;
1000 if (CLScope == static_cast<unsigned>(Scope))
1001 ScopeReg = Call->Arguments[1];
1002 }
1003
1004 if (!ScopeReg.isValid())
1005 ScopeReg = buildConstantIntReg32(Scope, MIRBuilder, GR);
1006
1007 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
1008 if (Opcode != SPIRV::OpMemoryBarrier)
1009 MIB.addUse(buildConstantIntReg32(MemScope, MIRBuilder, GR));
1010 MIB.addUse(MemSemanticsReg);
1011 return true;
1012}
1013
1014/// Helper function for building extended bit operations.
1016 unsigned Opcode,
1017 MachineIRBuilder &MIRBuilder,
1018 SPIRVGlobalRegistry *GR) {
1019 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1020 const auto *ST =
1021 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1022 if ((Opcode == SPIRV::OpBitFieldInsert ||
1023 Opcode == SPIRV::OpBitFieldSExtract ||
1024 Opcode == SPIRV::OpBitFieldUExtract || Opcode == SPIRV::OpBitReverse) &&
1025 !ST->canUseExtension(SPIRV::Extension::SPV_KHR_bit_instructions)) {
1026 std::string DiagMsg = std::string(Builtin->Name) +
1027 ": the builtin requires the following SPIR-V "
1028 "extension: SPV_KHR_bit_instructions";
1029 report_fatal_error(DiagMsg.c_str(), false);
1030 }
1031
1032 // Generate SPIRV instruction accordingly.
1033 if (Call->isSpirvOp())
1034 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1035 GR->getSPIRVTypeID(Call->ReturnType));
1036
1037 auto MIB = MIRBuilder.buildInstr(Opcode)
1038 .addDef(Call->ReturnRegister)
1039 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1040 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1041 MIB.addUse(Call->Arguments[i]);
1042
1043 return true;
1044}
1045
1046/// Helper function for building Intel's bindless image instructions.
1048 unsigned Opcode,
1049 MachineIRBuilder &MIRBuilder,
1050 SPIRVGlobalRegistry *GR) {
1051 // Generate SPIRV instruction accordingly.
1052 if (Call->isSpirvOp())
1053 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1054 GR->getSPIRVTypeID(Call->ReturnType));
1055
1056 MIRBuilder.buildInstr(Opcode)
1057 .addDef(Call->ReturnRegister)
1058 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1059 .addUse(Call->Arguments[0]);
1060
1061 return true;
1062}
1063
1064/// Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
1066 const SPIRV::IncomingCall *Call, unsigned Opcode,
1067 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
1068 // Generate SPIRV instruction accordingly.
1069 if (Call->isSpirvOp())
1070 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1071 GR->getSPIRVTypeID(Call->ReturnType));
1072
1073 auto MIB = MIRBuilder.buildInstr(Opcode)
1074 .addDef(Call->ReturnRegister)
1075 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1076 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1077 MIB.addUse(Call->Arguments[i]);
1078
1079 return true;
1080}
1081
1082/// Helper function for building Intel's 2d block io instructions.
1084 unsigned Opcode,
1085 MachineIRBuilder &MIRBuilder,
1086 SPIRVGlobalRegistry *GR) {
1087 // Generate SPIRV instruction accordingly.
1088 if (Call->isSpirvOp())
1089 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1090
1091 auto MIB = MIRBuilder.buildInstr(Opcode)
1092 .addDef(Call->ReturnRegister)
1093 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1094 for (unsigned i = 0; i < Call->Arguments.size(); ++i)
1095 MIB.addUse(Call->Arguments[i]);
1096
1097 return true;
1098}
1099
1100static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
1101 unsigned Scope, MachineIRBuilder &MIRBuilder,
1102 SPIRVGlobalRegistry *GR) {
1103 switch (Opcode) {
1104 case SPIRV::OpCommitReadPipe:
1105 case SPIRV::OpCommitWritePipe:
1106 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
1107 case SPIRV::OpGroupCommitReadPipe:
1108 case SPIRV::OpGroupCommitWritePipe:
1109 case SPIRV::OpGroupReserveReadPipePackets:
1110 case SPIRV::OpGroupReserveWritePipePackets: {
1111 Register ScopeConstReg =
1112 MIRBuilder.buildConstant(LLT::scalar(32), Scope).getReg(0);
1113 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1114 MRI->setRegClass(ScopeConstReg, &SPIRV::iIDRegClass);
1116 MIB = MIRBuilder.buildInstr(Opcode);
1117 // Add Return register and type.
1118 if (Opcode == SPIRV::OpGroupReserveReadPipePackets ||
1119 Opcode == SPIRV::OpGroupReserveWritePipePackets)
1120 MIB.addDef(Call->ReturnRegister)
1121 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1122
1123 MIB.addUse(ScopeConstReg);
1124 for (unsigned int i = 0; i < Call->Arguments.size(); ++i)
1125 MIB.addUse(Call->Arguments[i]);
1126
1127 return true;
1128 }
1129 default:
1130 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1131 GR->getSPIRVTypeID(Call->ReturnType));
1132 }
1133}
1134
1135static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
1136 switch (dim) {
1137 case SPIRV::Dim::DIM_1D:
1138 case SPIRV::Dim::DIM_Buffer:
1139 return 1;
1140 case SPIRV::Dim::DIM_2D:
1141 case SPIRV::Dim::DIM_Cube:
1142 case SPIRV::Dim::DIM_Rect:
1143 return 2;
1144 case SPIRV::Dim::DIM_3D:
1145 return 3;
1146 default:
1147 report_fatal_error("Cannot get num components for given Dim");
1148 }
1149}
1150
1151/// Helper function for obtaining the number of size components.
1152static unsigned getNumSizeComponents(SPIRVType *imgType) {
1153 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
1154 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
1155 unsigned numComps = getNumComponentsForDim(dim);
1156 bool arrayed = imgType->getOperand(4).getImm() == 1;
1157 return arrayed ? numComps + 1 : numComps;
1158}
1159
1160//===----------------------------------------------------------------------===//
1161// Implementation functions for each builtin group
1162//===----------------------------------------------------------------------===//
1163
1165 MachineIRBuilder &MIRBuilder,
1166 SPIRVGlobalRegistry *GR, const CallBase &CB) {
1167 // Lookup the extended instruction number in the TableGen records.
1168 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1170 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1171 // fmin_common and fmax_common are now deprecated, and we should use fmin and
1172 // fmax with NotInf and NotNaN flags instead. Keep original number to add
1173 // later the NoNans and NoInfs flags.
1174 uint32_t OrigNumber = Number;
1175 const SPIRVSubtarget &ST =
1176 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
1177 if (ST.canUseExtension(SPIRV::Extension::SPV_KHR_float_controls2) &&
1178 (Number == SPIRV::OpenCLExtInst::fmin_common ||
1179 Number == SPIRV::OpenCLExtInst::fmax_common)) {
1180 Number = (Number == SPIRV::OpenCLExtInst::fmin_common)
1181 ? SPIRV::OpenCLExtInst::fmin
1182 : SPIRV::OpenCLExtInst::fmax;
1183 }
1184
1185 // Build extended instruction.
1186 auto MIB =
1187 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1188 .addDef(Call->ReturnRegister)
1189 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1190 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1191 .addImm(Number);
1192
1193 for (auto Argument : Call->Arguments)
1194 MIB.addUse(Argument);
1195 MIB.getInstr()->copyIRFlags(CB);
1196 if (OrigNumber == SPIRV::OpenCLExtInst::fmin_common ||
1197 OrigNumber == SPIRV::OpenCLExtInst::fmax_common) {
1198 // Add NoNans and NoInfs flags to fmin/fmax instruction.
1199 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoNans);
1200 MIB.getInstr()->setFlag(MachineInstr::MIFlag::FmNoInfs);
1201 }
1202 return true;
1203}
1204
1206 MachineIRBuilder &MIRBuilder,
1207 SPIRVGlobalRegistry *GR) {
1208 // Lookup the instruction opcode in the TableGen records.
1209 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1210 unsigned Opcode =
1211 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1212
1213 Register CompareRegister;
1214 SPIRVType *RelationType;
1215 std::tie(CompareRegister, RelationType) =
1216 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1217
1218 // Build relational instruction.
1219 auto MIB = MIRBuilder.buildInstr(Opcode)
1220 .addDef(CompareRegister)
1221 .addUse(GR->getSPIRVTypeID(RelationType));
1222
1223 for (auto Argument : Call->Arguments)
1224 MIB.addUse(Argument);
1225
1226 // Build select instruction.
1227 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1228 Call->ReturnType, GR);
1229}
1230
1232 MachineIRBuilder &MIRBuilder,
1233 SPIRVGlobalRegistry *GR) {
1234 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1235 const SPIRV::GroupBuiltin *GroupBuiltin =
1236 SPIRV::lookupGroupBuiltin(Builtin->Name);
1237
1238 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1239 if (Call->isSpirvOp()) {
1240 if (GroupBuiltin->NoGroupOperation) {
1242 if (GroupBuiltin->Opcode ==
1243 SPIRV::OpSubgroupMatrixMultiplyAccumulateINTEL &&
1244 Call->Arguments.size() > 4)
1245 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[4], MRI));
1246 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1247 GR->getSPIRVTypeID(Call->ReturnType), ImmArgs);
1248 }
1249
1250 // Group Operation is a literal
1251 Register GroupOpReg = Call->Arguments[1];
1252 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1253 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1255 "Group Operation parameter must be an integer constant");
1256 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1257 Register ScopeReg = Call->Arguments[0];
1258 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1259 .addDef(Call->ReturnRegister)
1260 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1261 .addUse(ScopeReg)
1262 .addImm(GrpOp);
1263 for (unsigned i = 2; i < Call->Arguments.size(); ++i)
1264 MIB.addUse(Call->Arguments[i]);
1265 return true;
1266 }
1267
1268 Register Arg0;
1269 if (GroupBuiltin->HasBoolArg) {
1270 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1271 Register BoolReg = Call->Arguments[0];
1272 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1273 if (!BoolRegType)
1274 report_fatal_error("Can't find a register's type definition");
1275 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1276 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1277 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1278 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1279 BoolType, true);
1280 } else {
1281 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1282 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1283 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1284 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1285 MIRBuilder.buildICmp(
1286 CmpInst::ICMP_NE, Arg0, BoolReg,
1287 GR->buildConstantInt(0, MIRBuilder, BoolRegType, true));
1288 insertAssignInstr(Arg0, nullptr, BoolType, GR, MIRBuilder,
1289 MIRBuilder.getMF().getRegInfo());
1290 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1291 report_fatal_error("Expect a boolean argument");
1292 }
1293 // if BoolReg is a boolean register, we don't need to do anything
1294 }
1295 }
1296
1297 Register GroupResultRegister = Call->ReturnRegister;
1298 SPIRVType *GroupResultType = Call->ReturnType;
1299
1300 // TODO: maybe we need to check whether the result type is already boolean
1301 // and in this case do not insert select instruction.
1302 const bool HasBoolReturnTy =
1303 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1304 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1305 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1306
1307 if (HasBoolReturnTy)
1308 std::tie(GroupResultRegister, GroupResultType) =
1309 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1310
1311 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1312 : SPIRV::Scope::Workgroup;
1313 Register ScopeRegister = buildConstantIntReg32(Scope, MIRBuilder, GR);
1314
1315 Register VecReg;
1316 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1317 Call->Arguments.size() > 2) {
1318 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1319 // scalar, a vector with 2 components, or a vector with 3 components.",
1320 // meaning that we must create a vector from the function arguments if
1321 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1322 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1323 Register ElemReg = Call->Arguments[1];
1324 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1325 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1326 report_fatal_error("Expect an integer <LocalId> argument");
1327 unsigned VecLen = Call->Arguments.size() - 1;
1328 VecReg = MRI->createGenericVirtualRegister(
1329 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1330 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1331 SPIRVType *VecType =
1332 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder, true);
1333 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1334 auto MIB =
1335 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1336 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1337 MIB.addUse(Call->Arguments[i]);
1338 setRegClassIfNull(Call->Arguments[i], MRI, GR);
1339 }
1340 insertAssignInstr(VecReg, nullptr, VecType, GR, MIRBuilder,
1341 MIRBuilder.getMF().getRegInfo());
1342 }
1343
1344 // Build work/sub group instruction.
1345 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1346 .addDef(GroupResultRegister)
1347 .addUse(GR->getSPIRVTypeID(GroupResultType))
1348 .addUse(ScopeRegister);
1349
1350 if (!GroupBuiltin->NoGroupOperation)
1351 MIB.addImm(GroupBuiltin->GroupOperation);
1352 if (Call->Arguments.size() > 0) {
1353 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1354 setRegClassIfNull(Call->Arguments[0], MRI, GR);
1355 if (VecReg.isValid())
1356 MIB.addUse(VecReg);
1357 else
1358 for (unsigned i = 1; i < Call->Arguments.size(); i++)
1359 MIB.addUse(Call->Arguments[i]);
1360 }
1361
1362 // Build select instruction.
1363 if (HasBoolReturnTy)
1364 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1365 Call->ReturnType, GR);
1366 return true;
1367}
1368
1370 MachineIRBuilder &MIRBuilder,
1371 SPIRVGlobalRegistry *GR) {
1372 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1373 MachineFunction &MF = MIRBuilder.getMF();
1374 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1375 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1376 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1377
1378 if (IntelSubgroups->IsMedia &&
1379 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1380 std::string DiagMsg = std::string(Builtin->Name) +
1381 ": the builtin requires the following SPIR-V "
1382 "extension: SPV_INTEL_media_block_io";
1383 report_fatal_error(DiagMsg.c_str(), false);
1384 } else if (!IntelSubgroups->IsMedia &&
1385 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1386 std::string DiagMsg = std::string(Builtin->Name) +
1387 ": the builtin requires the following SPIR-V "
1388 "extension: SPV_INTEL_subgroups";
1389 report_fatal_error(DiagMsg.c_str(), false);
1390 }
1391
1392 uint32_t OpCode = IntelSubgroups->Opcode;
1393 if (Call->isSpirvOp()) {
1394 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1395 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1396 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1397 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1398 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1399 : Register(0));
1400 }
1401
1402 if (IntelSubgroups->IsBlock) {
1403 // Minimal number or arguments set in TableGen records is 1
1404 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1405 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1406 // TODO: add required validation from the specification:
1407 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1408 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1409 // dimensions require a capability."
1410 switch (OpCode) {
1411 case SPIRV::OpSubgroupBlockReadINTEL:
1412 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1413 break;
1414 case SPIRV::OpSubgroupBlockWriteINTEL:
1415 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1416 break;
1417 }
1418 }
1419 }
1420 }
1421
1422 // TODO: opaque pointers types should be eventually resolved in such a way
1423 // that validation of block read is enabled with respect to the following
1424 // specification requirement:
1425 // "'Result Type' may be a scalar or vector type, and its component type must
1426 // be equal to the type pointed to by 'Ptr'."
1427 // For example, function parameter type should not be default i8 pointer, but
1428 // depend on the result type of the instruction where it is used as a pointer
1429 // argument of OpSubgroupBlockReadINTEL
1430
1431 // Build Intel subgroups instruction
1433 IntelSubgroups->IsWrite
1434 ? MIRBuilder.buildInstr(OpCode)
1435 : MIRBuilder.buildInstr(OpCode)
1436 .addDef(Call->ReturnRegister)
1437 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1438 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1439 MIB.addUse(Call->Arguments[i]);
1440 return true;
1441}
1442
1444 MachineIRBuilder &MIRBuilder,
1445 SPIRVGlobalRegistry *GR) {
1446 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1447 MachineFunction &MF = MIRBuilder.getMF();
1448 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1449 if (!ST->canUseExtension(
1450 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1451 std::string DiagMsg = std::string(Builtin->Name) +
1452 ": the builtin requires the following SPIR-V "
1453 "extension: SPV_KHR_uniform_group_instructions";
1454 report_fatal_error(DiagMsg.c_str(), false);
1455 }
1456 const SPIRV::GroupUniformBuiltin *GroupUniform =
1457 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1458 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1459
1460 Register GroupResultReg = Call->ReturnRegister;
1461 Register ScopeReg = Call->Arguments[0];
1462 Register ValueReg = Call->Arguments[2];
1463
1464 // Group Operation
1465 Register ConstGroupOpReg = Call->Arguments[1];
1466 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1467 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1469 "expect a constant group operation for a uniform group instruction",
1470 false);
1471 const MachineOperand &ConstOperand = Const->getOperand(1);
1472 if (!ConstOperand.isCImm())
1473 report_fatal_error("uniform group instructions: group operation must be an "
1474 "integer constant",
1475 false);
1476
1477 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1478 .addDef(GroupResultReg)
1479 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1480 .addUse(ScopeReg);
1481 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1482 MIB.addUse(ValueReg);
1483
1484 return true;
1485}
1486
1488 MachineIRBuilder &MIRBuilder,
1489 SPIRVGlobalRegistry *GR) {
1490 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1491 MachineFunction &MF = MIRBuilder.getMF();
1492 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1493 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1494 std::string DiagMsg = std::string(Builtin->Name) +
1495 ": the builtin requires the following SPIR-V "
1496 "extension: SPV_KHR_shader_clock";
1497 report_fatal_error(DiagMsg.c_str(), false);
1498 }
1499
1500 Register ResultReg = Call->ReturnRegister;
1501
1502 if (Builtin->Name == "__spirv_ReadClockKHR") {
1503 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1504 .addDef(ResultReg)
1505 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1506 .addUse(Call->Arguments[0]);
1507 } else {
1508 // Deduce the `Scope` operand from the builtin function name.
1509 SPIRV::Scope::Scope ScopeArg =
1511 .EndsWith("device", SPIRV::Scope::Scope::Device)
1512 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1513 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1514 Register ScopeReg = buildConstantIntReg32(ScopeArg, MIRBuilder, GR);
1515
1516 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1517 .addDef(ResultReg)
1518 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1519 .addUse(ScopeReg);
1520 }
1521
1522 return true;
1523}
1524
1525// These queries ask for a single size_t result for a given dimension index,
1526// e.g. size_t get_global_id(uint dimindex). In SPIR-V, the builtins
1527// corresponding to these values are all vec3 types, so we need to extract the
1528// correct index or return DefaultValue (0 or 1 depending on the query). We also
1529// handle extending or truncating in case size_t does not match the expected
1530// result type's bitwidth.
1531//
1532// For a constant index >= 3 we generate:
1533// %res = OpConstant %SizeT DefaultValue
1534//
1535// For other indices we generate:
1536// %g = OpVariable %ptr_V3_SizeT Input
1537// OpDecorate %g BuiltIn XXX
1538// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1539// OpDecorate %g Constant
1540// %loadedVec = OpLoad %V3_SizeT %g
1541//
1542// Then, if the index is constant < 3, we generate:
1543// %res = OpCompositeExtract %SizeT %loadedVec idx
1544// If the index is dynamic, we generate:
1545// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1546// %cmp = OpULessThan %bool %idx %const_3
1547// %res = OpSelect %SizeT %cmp %tmp %const_<DefaultValue>
1548//
1549// If the bitwidth of %res does not match the expected return type, we add an
1550// extend or truncate.
1552 MachineIRBuilder &MIRBuilder,
1554 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1555 uint64_t DefaultValue) {
1556 Register IndexRegister = Call->Arguments[0];
1557 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1558 const unsigned PointerSize = GR->getPointerSize();
1559 const SPIRVType *PointerSizeType =
1560 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1561 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1562 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1563
1564 // Set up the final register to do truncation or extension on at the end.
1565 Register ToTruncate = Call->ReturnRegister;
1566
1567 // If the index is constant, we can statically determine if it is in range.
1568 bool IsConstantIndex =
1569 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1570
1571 // If it's out of range (max dimension is 3), we can just return the constant
1572 // default value (0 or 1 depending on which query function).
1573 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1574 Register DefaultReg = Call->ReturnRegister;
1575 if (PointerSize != ResultWidth) {
1576 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1577 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1578 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1579 MIRBuilder.getMF());
1580 ToTruncate = DefaultReg;
1581 }
1582 auto NewRegister =
1583 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1584 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1585 } else { // If it could be in range, we need to load from the given builtin.
1586 auto Vec3Ty =
1587 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder, true);
1588 Register LoadedVector =
1589 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1590 LLT::fixed_vector(3, PointerSize));
1591 // Set up the vreg to extract the result to (possibly a new temporary one).
1592 Register Extracted = Call->ReturnRegister;
1593 if (!IsConstantIndex || PointerSize != ResultWidth) {
1594 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1595 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1596 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1597 }
1598 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1599 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1600 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1601 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1602 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1603
1604 // If the index is dynamic, need check if it's < 3, and then use a select.
1605 if (!IsConstantIndex) {
1606 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1607 *MRI);
1608
1609 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1610 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder, true);
1611
1612 Register CompareRegister =
1613 MRI->createGenericVirtualRegister(LLT::scalar(1));
1614 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1615 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1616
1617 // Use G_ICMP to check if idxVReg < 3.
1618 MIRBuilder.buildICmp(
1619 CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1620 GR->buildConstantInt(3, MIRBuilder, IndexType, true));
1621
1622 // Get constant for the default value (0 or 1 depending on which
1623 // function).
1624 Register DefaultRegister =
1625 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType, true);
1626
1627 // Get a register for the selection result (possibly a new temporary one).
1628 Register SelectionResult = Call->ReturnRegister;
1629 if (PointerSize != ResultWidth) {
1630 SelectionResult =
1631 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1632 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1633 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1634 MIRBuilder.getMF());
1635 }
1636 // Create the final G_SELECT to return the extracted value or the default.
1637 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1638 DefaultRegister);
1639 ToTruncate = SelectionResult;
1640 } else {
1641 ToTruncate = Extracted;
1642 }
1643 }
1644 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1645 if (PointerSize != ResultWidth)
1646 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1647 return true;
1648}
1649
1651 MachineIRBuilder &MIRBuilder,
1652 SPIRVGlobalRegistry *GR) {
1653 // Lookup the builtin variable record.
1654 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1655 SPIRV::BuiltIn::BuiltIn Value =
1656 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1657
1658 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1659 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1660
1661 // Build a load instruction for the builtin variable.
1662 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1663 LLT LLType;
1664 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1665 LLType =
1666 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1667 else
1668 LLType = LLT::scalar(BitWidth);
1669
1670 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1671 LLType, Call->ReturnRegister);
1672}
1673
1675 MachineIRBuilder &MIRBuilder,
1676 SPIRVGlobalRegistry *GR) {
1677 // Lookup the instruction opcode in the TableGen records.
1678 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1679 unsigned Opcode =
1680 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1681
1682 switch (Opcode) {
1683 case SPIRV::OpStore:
1684 return buildAtomicInitInst(Call, MIRBuilder);
1685 case SPIRV::OpAtomicLoad:
1686 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1687 case SPIRV::OpAtomicStore:
1688 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1689 case SPIRV::OpAtomicCompareExchange:
1690 case SPIRV::OpAtomicCompareExchangeWeak:
1691 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1692 GR);
1693 case SPIRV::OpAtomicIAdd:
1694 case SPIRV::OpAtomicISub:
1695 case SPIRV::OpAtomicOr:
1696 case SPIRV::OpAtomicXor:
1697 case SPIRV::OpAtomicAnd:
1698 case SPIRV::OpAtomicExchange:
1699 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1700 case SPIRV::OpMemoryBarrier:
1701 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1702 case SPIRV::OpAtomicFlagTestAndSet:
1703 case SPIRV::OpAtomicFlagClear:
1704 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1705 default:
1706 if (Call->isSpirvOp())
1707 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1708 GR->getSPIRVTypeID(Call->ReturnType));
1709 return false;
1710 }
1711}
1712
1714 MachineIRBuilder &MIRBuilder,
1715 SPIRVGlobalRegistry *GR) {
1716 // Lookup the instruction opcode in the TableGen records.
1717 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1718 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1719
1720 switch (Opcode) {
1721 case SPIRV::OpAtomicFAddEXT:
1722 case SPIRV::OpAtomicFMinEXT:
1723 case SPIRV::OpAtomicFMaxEXT:
1724 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1725 default:
1726 return false;
1727 }
1728}
1729
1731 MachineIRBuilder &MIRBuilder,
1732 SPIRVGlobalRegistry *GR) {
1733 // Lookup the instruction opcode in the TableGen records.
1734 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1735 unsigned Opcode =
1736 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1737
1738 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1739}
1740
1742 MachineIRBuilder &MIRBuilder,
1743 SPIRVGlobalRegistry *GR) {
1744 // Lookup the instruction opcode in the TableGen records.
1745 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1746 unsigned Opcode =
1747 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1748
1749 if (Opcode == SPIRV::OpGenericCastToPtrExplicit) {
1750 SPIRV::StorageClass::StorageClass ResSC =
1751 GR->getPointerStorageClass(Call->ReturnRegister);
1752 if (!isGenericCastablePtr(ResSC))
1753 return false;
1754
1755 MIRBuilder.buildInstr(Opcode)
1756 .addDef(Call->ReturnRegister)
1757 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1758 .addUse(Call->Arguments[0])
1759 .addImm(ResSC);
1760 } else {
1761 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1762 .addDef(Call->ReturnRegister)
1763 .addUse(Call->Arguments[0]);
1764 }
1765 return true;
1766}
1767
1768static bool generateDotOrFMulInst(const StringRef DemangledCall,
1770 MachineIRBuilder &MIRBuilder,
1771 SPIRVGlobalRegistry *GR) {
1772 if (Call->isSpirvOp())
1773 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1774 GR->getSPIRVTypeID(Call->ReturnType));
1775
1776 bool IsVec = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() ==
1777 SPIRV::OpTypeVector;
1778 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1779 uint32_t OC = IsVec ? SPIRV::OpDot : SPIRV::OpFMulS;
1780 bool IsSwapReq = false;
1781
1782 const auto *ST =
1783 static_cast<const SPIRVSubtarget *>(&MIRBuilder.getMF().getSubtarget());
1784 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt) &&
1785 (ST->canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
1786 ST->isAtLeastSPIRVVer(VersionTuple(1, 6)))) {
1787 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1788 const SPIRV::IntegerDotProductBuiltin *IntDot =
1789 SPIRV::lookupIntegerDotProductBuiltin(Builtin->Name);
1790 if (IntDot) {
1791 OC = IntDot->Opcode;
1792 IsSwapReq = IntDot->IsSwapReq;
1793 } else if (IsVec) {
1794 // Handling "dot" and "dot_acc_sat" builtins which use vectors of
1795 // integers.
1796 LLVMContext &Ctx = MIRBuilder.getContext();
1798 SPIRV::parseBuiltinTypeStr(TypeStrs, DemangledCall, Ctx);
1799 bool IsFirstSigned = TypeStrs[0].trim()[0] != 'u';
1800 bool IsSecondSigned = TypeStrs[1].trim()[0] != 'u';
1801
1802 if (Call->BuiltinName == "dot") {
1803 if (IsFirstSigned && IsSecondSigned)
1804 OC = SPIRV::OpSDot;
1805 else if (!IsFirstSigned && !IsSecondSigned)
1806 OC = SPIRV::OpUDot;
1807 else {
1808 OC = SPIRV::OpSUDot;
1809 if (!IsFirstSigned)
1810 IsSwapReq = true;
1811 }
1812 } else if (Call->BuiltinName == "dot_acc_sat") {
1813 if (IsFirstSigned && IsSecondSigned)
1814 OC = SPIRV::OpSDotAccSat;
1815 else if (!IsFirstSigned && !IsSecondSigned)
1816 OC = SPIRV::OpUDotAccSat;
1817 else {
1818 OC = SPIRV::OpSUDotAccSat;
1819 if (!IsFirstSigned)
1820 IsSwapReq = true;
1821 }
1822 }
1823 }
1824 }
1825
1826 MachineInstrBuilder MIB = MIRBuilder.buildInstr(OC)
1827 .addDef(Call->ReturnRegister)
1828 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1829
1830 if (IsSwapReq) {
1831 MIB.addUse(Call->Arguments[1]);
1832 MIB.addUse(Call->Arguments[0]);
1833 // needed for dot_acc_sat* builtins
1834 for (size_t i = 2; i < Call->Arguments.size(); ++i)
1835 MIB.addUse(Call->Arguments[i]);
1836 } else {
1837 for (size_t i = 0; i < Call->Arguments.size(); ++i)
1838 MIB.addUse(Call->Arguments[i]);
1839 }
1840
1841 // Add Packed Vector Format for Integer dot product builtins if arguments are
1842 // scalar
1843 if (!IsVec && OC != SPIRV::OpFMulS)
1844 MIB.addImm(SPIRV::PackedVectorFormat4x8Bit);
1845
1846 return true;
1847}
1848
1850 MachineIRBuilder &MIRBuilder,
1851 SPIRVGlobalRegistry *GR) {
1852 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1853 SPIRV::BuiltIn::BuiltIn Value =
1854 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1855
1856 // For now, we only support a single Wave intrinsic with a single return type.
1857 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1858 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1859
1861 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1862 /* isConst= */ false, /* LinkageType= */ std::nullopt);
1863}
1864
1865// We expect a builtin
1866// Name(ptr sret([RetType]) %result, Type %operand1, Type %operand1)
1867// where %result is a pointer to where the result of the builtin execution
1868// is to be stored, and generate the following instructions:
1869// Res = Opcode RetType Operand1 Operand1
1870// OpStore RetVariable Res
1872 MachineIRBuilder &MIRBuilder,
1873 SPIRVGlobalRegistry *GR) {
1874 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1875 unsigned Opcode =
1876 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1877
1878 Register SRetReg = Call->Arguments[0];
1879 SPIRVType *PtrRetType = GR->getSPIRVTypeForVReg(SRetReg);
1880 SPIRVType *RetType = GR->getPointeeType(PtrRetType);
1881 if (!RetType)
1882 report_fatal_error("The first parameter must be a pointer");
1883 if (RetType->getOpcode() != SPIRV::OpTypeStruct)
1884 report_fatal_error("Expected struct type result for the arithmetic with "
1885 "overflow builtins");
1886
1887 SPIRVType *OpType1 = GR->getSPIRVTypeForVReg(Call->Arguments[1]);
1888 SPIRVType *OpType2 = GR->getSPIRVTypeForVReg(Call->Arguments[2]);
1889 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1890 report_fatal_error("Operands must have the same type");
1891 if (OpType1->getOpcode() == SPIRV::OpTypeVector)
1892 switch (Opcode) {
1893 case SPIRV::OpIAddCarryS:
1894 Opcode = SPIRV::OpIAddCarryV;
1895 break;
1896 case SPIRV::OpISubBorrowS:
1897 Opcode = SPIRV::OpISubBorrowV;
1898 break;
1899 }
1900
1901 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1902 Register ResReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1903 if (const TargetRegisterClass *DstRC =
1904 MRI->getRegClassOrNull(Call->Arguments[1])) {
1905 MRI->setRegClass(ResReg, DstRC);
1906 MRI->setType(ResReg, MRI->getType(Call->Arguments[1]));
1907 } else {
1908 MRI->setType(ResReg, LLT::scalar(64));
1909 }
1910 GR->assignSPIRVTypeToVReg(RetType, ResReg, MIRBuilder.getMF());
1911 MIRBuilder.buildInstr(Opcode)
1912 .addDef(ResReg)
1913 .addUse(GR->getSPIRVTypeID(RetType))
1914 .addUse(Call->Arguments[1])
1915 .addUse(Call->Arguments[2]);
1916 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(SRetReg).addUse(ResReg);
1917 return true;
1918}
1919
1921 MachineIRBuilder &MIRBuilder,
1922 SPIRVGlobalRegistry *GR) {
1923 // Lookup the builtin record.
1924 SPIRV::BuiltIn::BuiltIn Value =
1925 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1926 const bool IsDefaultOne = (Value == SPIRV::BuiltIn::GlobalSize ||
1927 Value == SPIRV::BuiltIn::NumWorkgroups ||
1928 Value == SPIRV::BuiltIn::WorkgroupSize ||
1929 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1930 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefaultOne ? 1 : 0);
1931}
1932
1934 MachineIRBuilder &MIRBuilder,
1935 SPIRVGlobalRegistry *GR) {
1936 // Lookup the image size query component number in the TableGen records.
1937 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1938 uint32_t Component =
1939 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1940 // Query result may either be a vector or a scalar. If return type is not a
1941 // vector, expect only a single size component. Otherwise get the number of
1942 // expected components.
1943 unsigned NumExpectedRetComponents =
1944 Call->ReturnType->getOpcode() == SPIRV::OpTypeVector
1945 ? Call->ReturnType->getOperand(2).getImm()
1946 : 1;
1947 // Get the actual number of query result/size components.
1948 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1949 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1950 Register QueryResult = Call->ReturnRegister;
1951 SPIRVType *QueryResultType = Call->ReturnType;
1952 if (NumExpectedRetComponents != NumActualRetComponents) {
1953 unsigned Bitwidth = Call->ReturnType->getOpcode() == SPIRV::OpTypeInt
1954 ? Call->ReturnType->getOperand(1).getImm()
1955 : 32;
1956 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1957 LLT::fixed_vector(NumActualRetComponents, Bitwidth));
1958 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::vIDRegClass);
1959 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(Bitwidth, MIRBuilder);
1960 QueryResultType = GR->getOrCreateSPIRVVectorType(
1961 IntTy, NumActualRetComponents, MIRBuilder, true);
1962 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1963 }
1964 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1965 unsigned Opcode =
1966 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1967 auto MIB = MIRBuilder.buildInstr(Opcode)
1968 .addDef(QueryResult)
1969 .addUse(GR->getSPIRVTypeID(QueryResultType))
1970 .addUse(Call->Arguments[0]);
1971 if (!IsDimBuf)
1972 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Lod id.
1973 if (NumExpectedRetComponents == NumActualRetComponents)
1974 return true;
1975 if (NumExpectedRetComponents == 1) {
1976 // Only 1 component is expected, build OpCompositeExtract instruction.
1977 unsigned ExtractedComposite =
1978 Component == 3 ? NumActualRetComponents - 1 : Component;
1979 assert(ExtractedComposite < NumActualRetComponents &&
1980 "Invalid composite index!");
1981 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1982 SPIRVType *NewType = nullptr;
1983 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1984 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1985 if (TypeReg != NewTypeReg &&
1986 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1987 TypeReg = NewTypeReg;
1988 }
1989 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1990 .addDef(Call->ReturnRegister)
1991 .addUse(TypeReg)
1992 .addUse(QueryResult)
1993 .addImm(ExtractedComposite);
1994 if (NewType != nullptr)
1995 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1996 MIRBuilder.getMF().getRegInfo());
1997 } else {
1998 // More than 1 component is expected, fill a new vector.
1999 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
2000 .addDef(Call->ReturnRegister)
2001 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2002 .addUse(QueryResult)
2003 .addUse(QueryResult);
2004 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
2005 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
2006 }
2007 return true;
2008}
2009
2011 MachineIRBuilder &MIRBuilder,
2012 SPIRVGlobalRegistry *GR) {
2013 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
2014 "Image samples query result must be of int type!");
2015
2016 // Lookup the instruction opcode in the TableGen records.
2017 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2018 unsigned Opcode =
2019 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2020
2021 Register Image = Call->Arguments[0];
2022 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
2023 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
2024 (void)ImageDimensionality;
2025
2026 switch (Opcode) {
2027 case SPIRV::OpImageQuerySamples:
2028 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
2029 "Image must be of 2D dimensionality");
2030 break;
2031 case SPIRV::OpImageQueryLevels:
2032 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
2033 ImageDimensionality == SPIRV::Dim::DIM_2D ||
2034 ImageDimensionality == SPIRV::Dim::DIM_3D ||
2035 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
2036 "Image must be of 1D/2D/3D/Cube dimensionality");
2037 break;
2038 }
2039
2040 MIRBuilder.buildInstr(Opcode)
2041 .addDef(Call->ReturnRegister)
2042 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2043 .addUse(Image);
2044 return true;
2045}
2046
2047// TODO: Move to TableGen.
2048static SPIRV::SamplerAddressingMode::SamplerAddressingMode
2050 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
2051 case SPIRV::CLK_ADDRESS_CLAMP:
2052 return SPIRV::SamplerAddressingMode::Clamp;
2053 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
2054 return SPIRV::SamplerAddressingMode::ClampToEdge;
2055 case SPIRV::CLK_ADDRESS_REPEAT:
2056 return SPIRV::SamplerAddressingMode::Repeat;
2057 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
2058 return SPIRV::SamplerAddressingMode::RepeatMirrored;
2059 case SPIRV::CLK_ADDRESS_NONE:
2060 return SPIRV::SamplerAddressingMode::None;
2061 default:
2062 report_fatal_error("Unknown CL address mode");
2063 }
2064}
2065
2066static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
2067 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
2068}
2069
2070static SPIRV::SamplerFilterMode::SamplerFilterMode
2072 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
2073 return SPIRV::SamplerFilterMode::Linear;
2074 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
2075 return SPIRV::SamplerFilterMode::Nearest;
2076 return SPIRV::SamplerFilterMode::Nearest;
2077}
2078
2079static bool generateReadImageInst(const StringRef DemangledCall,
2081 MachineIRBuilder &MIRBuilder,
2082 SPIRVGlobalRegistry *GR) {
2083 if (Call->isSpirvOp())
2084 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageRead, Call,
2085 GR->getSPIRVTypeID(Call->ReturnType));
2086 Register Image = Call->Arguments[0];
2087 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2088 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
2089 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
2090 if (HasOclSampler) {
2091 Register Sampler = Call->Arguments[1];
2092
2093 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
2094 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
2095 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
2096 Sampler = GR->buildConstantSampler(
2098 getSamplerParamFromBitmask(SamplerMask),
2099 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder);
2100 }
2101 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2102 SPIRVType *SampledImageType =
2103 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2104 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2105
2106 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2107 .addDef(SampledImage)
2108 .addUse(GR->getSPIRVTypeID(SampledImageType))
2109 .addUse(Image)
2110 .addUse(Sampler);
2111
2113 MIRBuilder);
2114
2115 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
2116 SPIRVType *TempType =
2117 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder, true);
2118 Register TempRegister =
2119 MRI->createGenericVirtualRegister(GR->getRegType(TempType));
2120 MRI->setRegClass(TempRegister, GR->getRegClass(TempType));
2121 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
2122 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2123 .addDef(TempRegister)
2124 .addUse(GR->getSPIRVTypeID(TempType))
2125 .addUse(SampledImage)
2126 .addUse(Call->Arguments[2]) // Coordinate.
2127 .addImm(SPIRV::ImageOperand::Lod)
2128 .addUse(Lod);
2129 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
2130 .addDef(Call->ReturnRegister)
2131 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2132 .addUse(TempRegister)
2133 .addImm(0);
2134 } else {
2135 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2136 .addDef(Call->ReturnRegister)
2137 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2138 .addUse(SampledImage)
2139 .addUse(Call->Arguments[2]) // Coordinate.
2140 .addImm(SPIRV::ImageOperand::Lod)
2141 .addUse(Lod);
2142 }
2143 } else if (HasMsaa) {
2144 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2145 .addDef(Call->ReturnRegister)
2146 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2147 .addUse(Image)
2148 .addUse(Call->Arguments[1]) // Coordinate.
2149 .addImm(SPIRV::ImageOperand::Sample)
2150 .addUse(Call->Arguments[2]);
2151 } else {
2152 MIRBuilder.buildInstr(SPIRV::OpImageRead)
2153 .addDef(Call->ReturnRegister)
2154 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2155 .addUse(Image)
2156 .addUse(Call->Arguments[1]); // Coordinate.
2157 }
2158 return true;
2159}
2160
2162 MachineIRBuilder &MIRBuilder,
2163 SPIRVGlobalRegistry *GR) {
2164 if (Call->isSpirvOp())
2165 return buildOpFromWrapper(MIRBuilder, SPIRV::OpImageWrite, Call,
2166 Register(0));
2167 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
2168 .addUse(Call->Arguments[0]) // Image.
2169 .addUse(Call->Arguments[1]) // Coordinate.
2170 .addUse(Call->Arguments[2]); // Texel.
2171 return true;
2172}
2173
2174static bool generateSampleImageInst(const StringRef DemangledCall,
2176 MachineIRBuilder &MIRBuilder,
2177 SPIRVGlobalRegistry *GR) {
2178 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2179 if (Call->Builtin->Name.contains_insensitive(
2180 "__translate_sampler_initializer")) {
2181 // Build sampler literal.
2182 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
2183 Register Sampler = GR->buildConstantSampler(
2184 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
2186 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder);
2187 return Sampler.isValid();
2188 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
2189 // Create OpSampledImage.
2190 Register Image = Call->Arguments[0];
2191 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
2192 SPIRVType *SampledImageType =
2193 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
2194 Register SampledImage =
2195 Call->ReturnRegister.isValid()
2196 ? Call->ReturnRegister
2197 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2198 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
2199 .addDef(SampledImage)
2200 .addUse(GR->getSPIRVTypeID(SampledImageType))
2201 .addUse(Image)
2202 .addUse(Call->Arguments[1]); // Sampler.
2203 return true;
2204 } else if (Call->Builtin->Name.contains_insensitive(
2205 "__spirv_ImageSampleExplicitLod")) {
2206 // Sample an image using an explicit level of detail.
2207 std::string ReturnType = DemangledCall.str();
2208 if (DemangledCall.contains("_R")) {
2209 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
2210 ReturnType = ReturnType.substr(0, ReturnType.find('('));
2211 }
2212 SPIRVType *Type =
2213 Call->ReturnType
2214 ? Call->ReturnType
2215 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder, true);
2216 if (!Type) {
2217 std::string DiagMsg =
2218 "Unable to recognize SPIRV type name: " + ReturnType;
2219 report_fatal_error(DiagMsg.c_str());
2220 }
2221 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
2222 .addDef(Call->ReturnRegister)
2224 .addUse(Call->Arguments[0]) // Image.
2225 .addUse(Call->Arguments[1]) // Coordinate.
2226 .addImm(SPIRV::ImageOperand::Lod)
2227 .addUse(Call->Arguments[3]);
2228 return true;
2229 }
2230 return false;
2231}
2232
2234 MachineIRBuilder &MIRBuilder) {
2235 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
2236 Call->Arguments[1], Call->Arguments[2]);
2237 return true;
2238}
2239
2241 MachineIRBuilder &MIRBuilder,
2242 SPIRVGlobalRegistry *GR) {
2243 createContinuedInstructions(MIRBuilder, SPIRV::OpCompositeConstruct, 3,
2244 SPIRV::OpCompositeConstructContinuedINTEL,
2245 Call->Arguments, Call->ReturnRegister,
2246 GR->getSPIRVTypeID(Call->ReturnType));
2247 return true;
2248}
2249
2251 MachineIRBuilder &MIRBuilder,
2252 SPIRVGlobalRegistry *GR) {
2253 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2254 unsigned Opcode =
2255 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2256 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
2257 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
2258 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
2259 unsigned ArgSz = Call->Arguments.size();
2260 unsigned LiteralIdx = 0;
2261 switch (Opcode) {
2262 // Memory operand is optional and is literal.
2263 case SPIRV::OpCooperativeMatrixLoadKHR:
2264 LiteralIdx = ArgSz > 3 ? 3 : 0;
2265 break;
2266 case SPIRV::OpCooperativeMatrixStoreKHR:
2267 LiteralIdx = ArgSz > 4 ? 4 : 0;
2268 break;
2269 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2270 LiteralIdx = ArgSz > 7 ? 7 : 0;
2271 break;
2272 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2273 LiteralIdx = ArgSz > 8 ? 8 : 0;
2274 break;
2275 // Cooperative Matrix Operands operand is optional and is literal.
2276 case SPIRV::OpCooperativeMatrixMulAddKHR:
2277 LiteralIdx = ArgSz > 3 ? 3 : 0;
2278 break;
2279 };
2280
2282 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2283 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2284 const uint32_t CacheLevel = getConstFromIntrinsic(Call->Arguments[3], MRI);
2285 auto MIB = MIRBuilder.buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2286 .addUse(Call->Arguments[0]) // pointer
2287 .addUse(Call->Arguments[1]) // rows
2288 .addUse(Call->Arguments[2]) // columns
2289 .addImm(CacheLevel) // cache level
2290 .addUse(Call->Arguments[4]); // memory layout
2291 if (ArgSz > 5)
2292 MIB.addUse(Call->Arguments[5]); // stride
2293 if (ArgSz > 6) {
2294 const uint32_t MemOp = getConstFromIntrinsic(Call->Arguments[6], MRI);
2295 MIB.addImm(MemOp); // memory operand
2296 }
2297 return true;
2298 }
2299 if (LiteralIdx > 0)
2300 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
2301 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2302 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2303 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2304 if (!CoopMatrType)
2305 report_fatal_error("Can't find a register's type definition");
2306 MIRBuilder.buildInstr(Opcode)
2307 .addDef(Call->ReturnRegister)
2308 .addUse(TypeReg)
2309 .addUse(CoopMatrType->getOperand(0).getReg());
2310 return true;
2311 }
2312 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2313 IsSet ? TypeReg : Register(0), ImmArgs);
2314}
2315
2317 MachineIRBuilder &MIRBuilder,
2318 SPIRVGlobalRegistry *GR) {
2319 // Lookup the instruction opcode in the TableGen records.
2320 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2321 unsigned Opcode =
2322 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2323 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2324
2325 switch (Opcode) {
2326 case SPIRV::OpSpecConstant: {
2327 // Build the SpecID decoration.
2328 unsigned SpecId =
2329 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
2330 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2331 {SpecId});
2332 // Determine the constant MI.
2333 Register ConstRegister = Call->Arguments[1];
2334 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
2335 assert(Const &&
2336 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2337 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2338 "Argument should be either an int or floating-point constant");
2339 // Determine the opcode and built the OpSpec MI.
2340 const MachineOperand &ConstOperand = Const->getOperand(1);
2341 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2342 assert(ConstOperand.isCImm() && "Int constant operand is expected");
2343 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
2344 ? SPIRV::OpSpecConstantTrue
2345 : SPIRV::OpSpecConstantFalse;
2346 }
2347 auto MIB = MIRBuilder.buildInstr(Opcode)
2348 .addDef(Call->ReturnRegister)
2349 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2350
2351 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2352 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2353 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2354 else
2355 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2356 }
2357 return true;
2358 }
2359 case SPIRV::OpSpecConstantComposite: {
2360 createContinuedInstructions(MIRBuilder, Opcode, 3,
2361 SPIRV::OpSpecConstantCompositeContinuedINTEL,
2362 Call->Arguments, Call->ReturnRegister,
2363 GR->getSPIRVTypeID(Call->ReturnType));
2364 return true;
2365 }
2366 default:
2367 return false;
2368 }
2369}
2370
2372 MachineIRBuilder &MIRBuilder,
2373 SPIRVGlobalRegistry *GR) {
2374 // Lookup the instruction opcode in the TableGen records.
2375 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2376 unsigned Opcode =
2377 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2378
2379 return buildExtendedBitOpsInst(Call, Opcode, MIRBuilder, GR);
2380}
2381
2383 MachineIRBuilder &MIRBuilder,
2384 SPIRVGlobalRegistry *GR) {
2385 // Lookup the instruction opcode in the TableGen records.
2386 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2387 unsigned Opcode =
2388 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2389
2390 return buildBindlessImageINTELInst(Call, Opcode, MIRBuilder, GR);
2391}
2392
2393static bool
2395 MachineIRBuilder &MIRBuilder,
2396 SPIRVGlobalRegistry *GR) {
2397 // Lookup the instruction opcode in the TableGen records.
2398 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2399 unsigned Opcode =
2400 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2401
2402 return buildTernaryBitwiseFunctionINTELInst(Call, Opcode, MIRBuilder, GR);
2403}
2404
2406 MachineIRBuilder &MIRBuilder,
2407 SPIRVGlobalRegistry *GR) {
2408 // Lookup the instruction opcode in the TableGen records.
2409 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2410 unsigned Opcode =
2411 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2412
2413 return build2DBlockIOINTELInst(Call, Opcode, MIRBuilder, GR);
2414}
2415
2417 MachineIRBuilder &MIRBuilder,
2418 SPIRVGlobalRegistry *GR) {
2419 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2420 unsigned Opcode =
2421 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2422
2423 unsigned Scope = SPIRV::Scope::Workgroup;
2424 if (Builtin->Name.contains("sub_group"))
2425 Scope = SPIRV::Scope::Subgroup;
2426
2427 return buildPipeInst(Call, Opcode, Scope, MIRBuilder, GR);
2428}
2429
2431 MachineIRBuilder &MIRBuilder,
2432 SPIRVGlobalRegistry *GR) {
2433 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2434 unsigned Opcode =
2435 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2436
2437 bool IsSet = Opcode != SPIRV::OpPredicatedStoreINTEL;
2438 unsigned ArgSz = Call->Arguments.size();
2440 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2441 // Memory operand is optional and is literal.
2442 if (ArgSz > 3)
2443 ImmArgs.push_back(
2444 getConstFromIntrinsic(Call->Arguments[/*Literal index*/ 3], MRI));
2445
2446 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2447 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2448 IsSet ? TypeReg : Register(0), ImmArgs);
2449}
2450
2452 MachineIRBuilder &MIRBuilder,
2453 SPIRVGlobalRegistry *GR) {
2454 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2455 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2456 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2457 PtrType->getOperand(2).isReg());
2458 Register TypeReg = PtrType->getOperand(2).getReg();
2460 MachineFunction &MF = MIRBuilder.getMF();
2461 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2462 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2463 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2464 // three other arguments, so pass zero constant on absence.
2465 unsigned NumArgs = Call->Arguments.size();
2466 assert(NumArgs >= 2);
2467 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2468 Register LocalWorkSize =
2469 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2470 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2471 if (NumArgs < 4) {
2472 Register Const;
2473 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2474 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2475 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2476 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2477 DefInstr->getOperand(3).isReg());
2478 Register GWSPtr = DefInstr->getOperand(3).getReg();
2479 // TODO: Maybe simplify generation of the type of the fields.
2480 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2481 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2483 Type *FieldTy = ArrayType::get(BaseTy, Size);
2484 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(
2485 FieldTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
2486 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2487 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2488 MIRBuilder.buildInstr(SPIRV::OpLoad)
2489 .addDef(GlobalWorkSize)
2490 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2491 .addUse(GWSPtr);
2492 const SPIRVSubtarget &ST =
2493 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2494 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2495 SpvFieldTy, *ST.getInstrInfo());
2496 } else {
2497 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy, true);
2498 }
2499 if (!LocalWorkSize.isValid())
2500 LocalWorkSize = Const;
2501 if (!GlobalWorkOffset.isValid())
2502 GlobalWorkOffset = Const;
2503 }
2504 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2505 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2506 .addDef(TmpReg)
2507 .addUse(TypeReg)
2508 .addUse(GlobalWorkSize)
2509 .addUse(LocalWorkSize)
2510 .addUse(GlobalWorkOffset);
2511 return MIRBuilder.buildInstr(SPIRV::OpStore)
2512 .addUse(Call->Arguments[0])
2513 .addUse(TmpReg);
2514}
2515
2516// TODO: maybe move to the global register.
2517static SPIRVType *
2519 SPIRVGlobalRegistry *GR) {
2520 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2521 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2522 Type *PtrType = PointerType::get(Context, SC1);
2523 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder,
2524 SPIRV::AccessQualifier::ReadWrite, true);
2525}
2526
2528 MachineIRBuilder &MIRBuilder,
2529 SPIRVGlobalRegistry *GR) {
2530 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2531 const DataLayout &DL = MIRBuilder.getDataLayout();
2532 bool IsSpirvOp = Call->isSpirvOp();
2533 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2534 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2535
2536 // Make vararg instructions before OpEnqueueKernel.
2537 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2538 // local size operands as an array, so we need to unpack them.
2539 SmallVector<Register, 16> LocalSizes;
2540 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2541 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2542 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2543 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2544 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2545 GepMI->getOperand(3).isReg());
2546 Register ArrayReg = GepMI->getOperand(3).getReg();
2547 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2548 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2549 assert(LocalSizeTy && "Local size type is expected");
2550 const uint64_t LocalSizeNum =
2551 cast<ArrayType>(LocalSizeTy)->getNumElements();
2552 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2553 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2554 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2555 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2556 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2557 Register Reg = MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2558 MRI->setType(Reg, LLType);
2559 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2560 auto GEPInst = MIRBuilder.buildIntrinsic(
2561 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2562 GEPInst
2563 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2564 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2565 .addUse(buildConstantIntReg32(0, MIRBuilder, GR)) // Indices.
2566 .addUse(buildConstantIntReg32(I, MIRBuilder, GR));
2567 LocalSizes.push_back(Reg);
2568 }
2569 }
2570
2571 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2572 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2573 .addDef(Call->ReturnRegister)
2575
2576 // Copy all arguments before block invoke function pointer.
2577 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2578 for (unsigned i = 0; i < BlockFIdx; i++)
2579 MIB.addUse(Call->Arguments[i]);
2580
2581 // If there are no event arguments in the original call, add dummy ones.
2582 if (!HasEvents) {
2583 MIB.addUse(buildConstantIntReg32(0, MIRBuilder, GR)); // Dummy num events.
2584 Register NullPtr = GR->getOrCreateConstNullPtr(
2585 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2586 MIB.addUse(NullPtr); // Dummy wait events.
2587 MIB.addUse(NullPtr); // Dummy ret event.
2588 }
2589
2590 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2591 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2592 // Invoke: Pointer to invoke function.
2593 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2594
2595 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2596 // Param: Pointer to block literal.
2597 MIB.addUse(BlockLiteralReg);
2598
2599 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2600 // TODO: these numbers should be obtained from block literal structure.
2601 // Param Size: Size of block literal structure.
2602 MIB.addUse(buildConstantIntReg32(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2603 // Param Aligment: Aligment of block literal structure.
2604 MIB.addUse(buildConstantIntReg32(DL.getPrefTypeAlign(PType).value(),
2605 MIRBuilder, GR));
2606
2607 for (unsigned i = 0; i < LocalSizes.size(); i++)
2608 MIB.addUse(LocalSizes[i]);
2609 return true;
2610}
2611
2613 MachineIRBuilder &MIRBuilder,
2614 SPIRVGlobalRegistry *GR) {
2615 // Lookup the instruction opcode in the TableGen records.
2616 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2617 unsigned Opcode =
2618 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2619
2620 switch (Opcode) {
2621 case SPIRV::OpRetainEvent:
2622 case SPIRV::OpReleaseEvent:
2623 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2624 case SPIRV::OpCreateUserEvent:
2625 case SPIRV::OpGetDefaultQueue:
2626 return MIRBuilder.buildInstr(Opcode)
2627 .addDef(Call->ReturnRegister)
2628 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2629 case SPIRV::OpIsValidEvent:
2630 return MIRBuilder.buildInstr(Opcode)
2631 .addDef(Call->ReturnRegister)
2632 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2633 .addUse(Call->Arguments[0]);
2634 case SPIRV::OpSetUserEventStatus:
2635 return MIRBuilder.buildInstr(Opcode)
2636 .addUse(Call->Arguments[0])
2637 .addUse(Call->Arguments[1]);
2638 case SPIRV::OpCaptureEventProfilingInfo:
2639 return MIRBuilder.buildInstr(Opcode)
2640 .addUse(Call->Arguments[0])
2641 .addUse(Call->Arguments[1])
2642 .addUse(Call->Arguments[2]);
2643 case SPIRV::OpBuildNDRange:
2644 return buildNDRange(Call, MIRBuilder, GR);
2645 case SPIRV::OpEnqueueKernel:
2646 return buildEnqueueKernel(Call, MIRBuilder, GR);
2647 default:
2648 return false;
2649 }
2650}
2651
2653 MachineIRBuilder &MIRBuilder,
2654 SPIRVGlobalRegistry *GR) {
2655 // Lookup the instruction opcode in the TableGen records.
2656 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2657 unsigned Opcode =
2658 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2659
2660 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2661 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2662 if (Call->isSpirvOp())
2663 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2664 IsSet ? TypeReg : Register(0));
2665
2666 auto Scope = buildConstantIntReg32(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2667
2668 switch (Opcode) {
2669 case SPIRV::OpGroupAsyncCopy: {
2670 SPIRVType *NewType =
2671 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2672 ? nullptr
2673 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder, true);
2674 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2675 unsigned NumArgs = Call->Arguments.size();
2676 Register EventReg = Call->Arguments[NumArgs - 1];
2677 bool Res = MIRBuilder.buildInstr(Opcode)
2678 .addDef(Call->ReturnRegister)
2679 .addUse(TypeReg)
2680 .addUse(Scope)
2681 .addUse(Call->Arguments[0])
2682 .addUse(Call->Arguments[1])
2683 .addUse(Call->Arguments[2])
2684 .addUse(Call->Arguments.size() > 4
2685 ? Call->Arguments[3]
2686 : buildConstantIntReg32(1, MIRBuilder, GR))
2687 .addUse(EventReg);
2688 if (NewType != nullptr)
2689 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2690 MIRBuilder.getMF().getRegInfo());
2691 return Res;
2692 }
2693 case SPIRV::OpGroupWaitEvents:
2694 return MIRBuilder.buildInstr(Opcode)
2695 .addUse(Scope)
2696 .addUse(Call->Arguments[0])
2697 .addUse(Call->Arguments[1]);
2698 default:
2699 return false;
2700 }
2701}
2702
2703static bool generateConvertInst(const StringRef DemangledCall,
2705 MachineIRBuilder &MIRBuilder,
2706 SPIRVGlobalRegistry *GR) {
2707 // Lookup the conversion builtin in the TableGen records.
2708 const SPIRV::ConvertBuiltin *Builtin =
2709 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2710
2711 if (!Builtin && Call->isSpirvOp()) {
2712 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2713 unsigned Opcode =
2714 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2715 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2716 GR->getSPIRVTypeID(Call->ReturnType));
2717 }
2718
2719 assert(Builtin && "Conversion builtin not found.");
2720 if (Builtin->IsSaturated)
2721 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2722 SPIRV::Decoration::SaturatedConversion, {});
2723 if (Builtin->IsRounded)
2724 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2725 SPIRV::Decoration::FPRoundingMode,
2726 {(unsigned)Builtin->RoundingMode});
2727
2728 std::string NeedExtMsg; // no errors if empty
2729 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2730 unsigned Opcode = SPIRV::OpNop;
2731 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2732 // Int -> ...
2733 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2734 // Int -> Int
2735 if (Builtin->IsSaturated)
2736 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2737 : SPIRV::OpSatConvertSToU;
2738 else
2739 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2740 : SPIRV::OpSConvert;
2741 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2742 SPIRV::OpTypeFloat)) {
2743 // Int -> Float
2744 if (Builtin->IsBfloat16) {
2745 const auto *ST = static_cast<const SPIRVSubtarget *>(
2746 &MIRBuilder.getMF().getSubtarget());
2747 if (!ST->canUseExtension(
2748 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2749 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2750 IsRightComponentsNumber =
2751 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2752 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2753 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2754 } else {
2755 bool IsSourceSigned =
2756 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2757 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2758 }
2759 }
2760 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2761 SPIRV::OpTypeFloat)) {
2762 // Float -> ...
2763 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2764 // Float -> Int
2765 if (Builtin->IsBfloat16) {
2766 const auto *ST = static_cast<const SPIRVSubtarget *>(
2767 &MIRBuilder.getMF().getSubtarget());
2768 if (!ST->canUseExtension(
2769 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2770 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2771 IsRightComponentsNumber =
2772 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2773 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2774 Opcode = SPIRV::OpConvertFToBF16INTEL;
2775 } else {
2776 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2777 : SPIRV::OpConvertFToU;
2778 }
2779 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2780 SPIRV::OpTypeFloat)) {
2781 if (Builtin->IsTF32) {
2782 const auto *ST = static_cast<const SPIRVSubtarget *>(
2783 &MIRBuilder.getMF().getSubtarget());
2784 if (!ST->canUseExtension(
2785 SPIRV::Extension::SPV_INTEL_tensor_float32_conversion))
2786 NeedExtMsg = "SPV_INTEL_tensor_float32_conversion";
2787 IsRightComponentsNumber =
2788 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2789 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2790 Opcode = SPIRV::OpRoundFToTF32INTEL;
2791 } else {
2792 // Float -> Float
2793 Opcode = SPIRV::OpFConvert;
2794 }
2795 }
2796 }
2797
2798 if (!NeedExtMsg.empty()) {
2799 std::string DiagMsg = std::string(Builtin->Name) +
2800 ": the builtin requires the following SPIR-V "
2801 "extension: " +
2802 NeedExtMsg;
2803 report_fatal_error(DiagMsg.c_str(), false);
2804 }
2805 if (!IsRightComponentsNumber) {
2806 std::string DiagMsg =
2807 std::string(Builtin->Name) +
2808 ": result and argument must have the same number of components";
2809 report_fatal_error(DiagMsg.c_str(), false);
2810 }
2811 assert(Opcode != SPIRV::OpNop &&
2812 "Conversion between the types not implemented!");
2813
2814 MIRBuilder.buildInstr(Opcode)
2815 .addDef(Call->ReturnRegister)
2816 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2817 .addUse(Call->Arguments[0]);
2818 return true;
2819}
2820
2822 MachineIRBuilder &MIRBuilder,
2823 SPIRVGlobalRegistry *GR) {
2824 // Lookup the vector load/store builtin in the TableGen records.
2825 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2826 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2827 Call->Builtin->Set);
2828 // Build extended instruction.
2829 auto MIB =
2830 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2831 .addDef(Call->ReturnRegister)
2832 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2833 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2834 .addImm(Builtin->Number);
2835 for (auto Argument : Call->Arguments)
2836 MIB.addUse(Argument);
2837 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2838 MIB.addImm(Builtin->ElementCount);
2839
2840 // Rounding mode should be passed as a last argument in the MI for builtins
2841 // like "vstorea_halfn_r".
2842 if (Builtin->IsRounded)
2843 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2844 return true;
2845}
2846
2848 MachineIRBuilder &MIRBuilder,
2849 SPIRVGlobalRegistry *GR) {
2850 // Lookup the instruction opcode in the TableGen records.
2851 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2852 unsigned Opcode =
2853 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2854 bool IsLoad = Opcode == SPIRV::OpLoad;
2855 // Build the instruction.
2856 auto MIB = MIRBuilder.buildInstr(Opcode);
2857 if (IsLoad) {
2858 MIB.addDef(Call->ReturnRegister);
2859 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2860 }
2861 // Add a pointer to the value to load/store.
2862 MIB.addUse(Call->Arguments[0]);
2863 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2864 // Add a value to store.
2865 if (!IsLoad)
2866 MIB.addUse(Call->Arguments[1]);
2867 // Add optional memory attributes and an alignment.
2868 unsigned NumArgs = Call->Arguments.size();
2869 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2870 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2871 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2872 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2873 return true;
2874}
2875
2876namespace SPIRV {
2877// Try to find a builtin function attributes by a demangled function name and
2878// return a tuple <builtin group, op code, ext instruction number>, or a special
2879// tuple value <-1, 0, 0> if the builtin function is not found.
2880// Not all builtin functions are supported, only those with a ready-to-use op
2881// code or instruction number defined in TableGen.
2882// TODO: consider a major rework of mapping demangled calls into a builtin
2883// functions to unify search and decrease number of individual cases.
2884std::tuple<int, unsigned, unsigned>
2885mapBuiltinToOpcode(const StringRef DemangledCall,
2886 SPIRV::InstructionSet::InstructionSet Set) {
2887 Register Reg;
2889 std::unique_ptr<const IncomingCall> Call =
2890 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
2891 if (!Call)
2892 return std::make_tuple(-1, 0, 0);
2893
2894 switch (Call->Builtin->Group) {
2895 case SPIRV::Relational:
2896 case SPIRV::Atomic:
2897 case SPIRV::Barrier:
2898 case SPIRV::CastToPtr:
2899 case SPIRV::ImageMiscQuery:
2900 case SPIRV::SpecConstant:
2901 case SPIRV::Enqueue:
2902 case SPIRV::AsyncCopy:
2903 case SPIRV::LoadStore:
2904 case SPIRV::CoopMatr:
2905 if (const auto *R =
2906 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2907 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2908 break;
2909 case SPIRV::Extended:
2910 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2911 Call->Builtin->Set))
2912 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2913 break;
2914 case SPIRV::VectorLoadStore:
2915 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2916 Call->Builtin->Set))
2917 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2918 break;
2919 case SPIRV::Group:
2920 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2921 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2922 break;
2923 case SPIRV::AtomicFloating:
2924 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2925 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2926 break;
2927 case SPIRV::IntelSubgroups:
2928 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2929 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2930 break;
2931 case SPIRV::GroupUniform:
2932 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2933 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2934 break;
2935 case SPIRV::IntegerDot:
2936 if (const auto *R =
2937 SPIRV::lookupIntegerDotProductBuiltin(Call->Builtin->Name))
2938 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2939 break;
2940 case SPIRV::WriteImage:
2941 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2942 case SPIRV::Select:
2943 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2944 case SPIRV::Construct:
2945 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2946 0);
2947 case SPIRV::KernelClock:
2948 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2949 default:
2950 return std::make_tuple(-1, 0, 0);
2951 }
2952 return std::make_tuple(-1, 0, 0);
2953}
2954
2955std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2956 SPIRV::InstructionSet::InstructionSet Set,
2957 MachineIRBuilder &MIRBuilder,
2958 const Register OrigRet, const Type *OrigRetTy,
2959 const SmallVectorImpl<Register> &Args,
2960 SPIRVGlobalRegistry *GR, const CallBase &CB) {
2961 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2962
2963 // Lookup the builtin in the TableGen records.
2964 SPIRVType *SpvType = GR->getSPIRVTypeForVReg(OrigRet);
2965 assert(SpvType && "Inconsistent return register: expected valid type info");
2966 std::unique_ptr<const IncomingCall> Call =
2967 lookupBuiltin(DemangledCall, Set, OrigRet, SpvType, Args);
2968
2969 if (!Call) {
2970 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2971 return std::nullopt;
2972 }
2973
2974 // TODO: check if the provided args meet the builtin requirments.
2975 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2976 "Too few arguments to generate the builtin");
2977 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2978 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2979
2980 // Match the builtin with implementation based on the grouping.
2981 switch (Call->Builtin->Group) {
2982 case SPIRV::Extended:
2983 return generateExtInst(Call.get(), MIRBuilder, GR, CB);
2984 case SPIRV::Relational:
2985 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2986 case SPIRV::Group:
2987 return generateGroupInst(Call.get(), MIRBuilder, GR);
2988 case SPIRV::Variable:
2989 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2990 case SPIRV::Atomic:
2991 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2992 case SPIRV::AtomicFloating:
2993 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2994 case SPIRV::Barrier:
2995 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2996 case SPIRV::CastToPtr:
2997 return generateCastToPtrInst(Call.get(), MIRBuilder, GR);
2998 case SPIRV::Dot:
2999 case SPIRV::IntegerDot:
3000 return generateDotOrFMulInst(DemangledCall, Call.get(), MIRBuilder, GR);
3001 case SPIRV::Wave:
3002 return generateWaveInst(Call.get(), MIRBuilder, GR);
3003 case SPIRV::ICarryBorrow:
3004 return generateICarryBorrowInst(Call.get(), MIRBuilder, GR);
3005 case SPIRV::GetQuery:
3006 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
3007 case SPIRV::ImageSizeQuery:
3008 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
3009 case SPIRV::ImageMiscQuery:
3010 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
3011 case SPIRV::ReadImage:
3012 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3013 case SPIRV::WriteImage:
3014 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
3015 case SPIRV::SampleImage:
3016 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
3017 case SPIRV::Select:
3018 return generateSelectInst(Call.get(), MIRBuilder);
3019 case SPIRV::Construct:
3020 return generateConstructInst(Call.get(), MIRBuilder, GR);
3021 case SPIRV::SpecConstant:
3022 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
3023 case SPIRV::Enqueue:
3024 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
3025 case SPIRV::AsyncCopy:
3026 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
3027 case SPIRV::Convert:
3028 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
3029 case SPIRV::VectorLoadStore:
3030 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
3031 case SPIRV::LoadStore:
3032 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
3033 case SPIRV::IntelSubgroups:
3034 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
3035 case SPIRV::GroupUniform:
3036 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
3037 case SPIRV::KernelClock:
3038 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
3039 case SPIRV::CoopMatr:
3040 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
3041 case SPIRV::ExtendedBitOps:
3042 return generateExtendedBitOpsInst(Call.get(), MIRBuilder, GR);
3043 case SPIRV::BindlessINTEL:
3044 return generateBindlessImageINTELInst(Call.get(), MIRBuilder, GR);
3045 case SPIRV::TernaryBitwiseINTEL:
3046 return generateTernaryBitwiseFunctionINTELInst(Call.get(), MIRBuilder, GR);
3047 case SPIRV::Block2DLoadStore:
3048 return generate2DBlockIOINTELInst(Call.get(), MIRBuilder, GR);
3049 case SPIRV::Pipe:
3050 return generatePipeInst(Call.get(), MIRBuilder, GR);
3051 case SPIRV::PredicatedLoadStore:
3052 return generatePredicatedLoadStoreInst(Call.get(), MIRBuilder, GR);
3053 }
3054 return false;
3055}
3056
3058 // Parse strings representing OpenCL builtin types.
3059 if (hasBuiltinTypePrefix(TypeStr)) {
3060 // OpenCL builtin types in demangled call strings have the following format:
3061 // e.g. ocl_image2d_ro
3062 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
3063 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
3064
3065 // Check if this is pointer to a builtin type and not just pointer
3066 // representing a builtin type. In case it is a pointer to builtin type,
3067 // this will require additional handling in the method calling
3068 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
3069 // base types.
3070 if (TypeStr.ends_with("*"))
3071 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
3072
3073 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
3074 Ctx);
3075 }
3076
3077 // Parse type name in either "typeN" or "type vector[N]" format, where
3078 // N is the number of elements of the vector.
3079 Type *BaseType;
3080 unsigned VecElts = 0;
3081
3082 BaseType = parseBasicTypeName(TypeStr, Ctx);
3083 if (!BaseType)
3084 // Unable to recognize SPIRV type name.
3085 return nullptr;
3086
3087 // Handle "typeN*" or "type vector[N]*".
3088 TypeStr.consume_back("*");
3089
3090 if (TypeStr.consume_front(" vector["))
3091 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
3092
3093 TypeStr.getAsInteger(10, VecElts);
3094 if (VecElts > 0)
3096 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
3097
3098 return BaseType;
3099}
3100
3102 const StringRef DemangledCall, LLVMContext &Ctx) {
3103 auto Pos1 = DemangledCall.find('(');
3104 if (Pos1 == StringRef::npos)
3105 return false;
3106 auto Pos2 = DemangledCall.find(')');
3107 if (Pos2 == StringRef::npos || Pos1 > Pos2)
3108 return false;
3109 DemangledCall.slice(Pos1 + 1, Pos2)
3110 .split(BuiltinArgsTypeStrs, ',', -1, false);
3111 return true;
3112}
3113
3115 unsigned ArgIdx, LLVMContext &Ctx) {
3116 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
3117 parseBuiltinTypeStr(BuiltinArgsTypeStrs, DemangledCall, Ctx);
3118 if (ArgIdx >= BuiltinArgsTypeStrs.size())
3119 return nullptr;
3120 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
3121 return parseBuiltinCallArgumentType(TypeStr, Ctx);
3122}
3123
3128
3129#define GET_BuiltinTypes_DECL
3130#define GET_BuiltinTypes_IMPL
3131
3136
3137#define GET_OpenCLTypes_DECL
3138#define GET_OpenCLTypes_IMPL
3139
3140#include "SPIRVGenTables.inc"
3141} // namespace SPIRV
3142
3143//===----------------------------------------------------------------------===//
3144// Misc functions for parsing builtin types.
3145//===----------------------------------------------------------------------===//
3146
3147static Type *parseTypeString(const StringRef Name, LLVMContext &Context) {
3148 if (Name.starts_with("void"))
3149 return Type::getVoidTy(Context);
3150 else if (Name.starts_with("int") || Name.starts_with("uint"))
3151 return Type::getInt32Ty(Context);
3152 else if (Name.starts_with("float"))
3153 return Type::getFloatTy(Context);
3154 else if (Name.starts_with("half"))
3155 return Type::getHalfTy(Context);
3156 report_fatal_error("Unable to recognize type!");
3157}
3158
3159//===----------------------------------------------------------------------===//
3160// Implementation functions for builtin types.
3161//===----------------------------------------------------------------------===//
3162
3164 const SPIRV::BuiltinType *TypeRecord,
3165 MachineIRBuilder &MIRBuilder,
3166 SPIRVGlobalRegistry *GR) {
3167 unsigned Opcode = TypeRecord->Opcode;
3168 // Create or get an existing type from GlobalRegistry.
3169 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
3170}
3171
3173 SPIRVGlobalRegistry *GR) {
3174 // Create or get an existing type from GlobalRegistry.
3175 return GR->getOrCreateOpTypeSampler(MIRBuilder);
3176}
3177
3178static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
3179 MachineIRBuilder &MIRBuilder,
3180 SPIRVGlobalRegistry *GR) {
3181 assert(ExtensionType->getNumIntParameters() == 1 &&
3182 "Invalid number of parameters for SPIR-V pipe builtin!");
3183 // Create or get an existing type from GlobalRegistry.
3184 return GR->getOrCreateOpTypePipe(MIRBuilder,
3185 SPIRV::AccessQualifier::AccessQualifier(
3186 ExtensionType->getIntParameter(0)));
3187}
3188
3189static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
3190 MachineIRBuilder &MIRBuilder,
3191 SPIRVGlobalRegistry *GR) {
3192 assert(ExtensionType->getNumIntParameters() == 4 &&
3193 "Invalid number of parameters for SPIR-V coop matrices builtin!");
3194 assert(ExtensionType->getNumTypeParameters() == 1 &&
3195 "SPIR-V coop matrices builtin type must have a type parameter!");
3196 const SPIRVType *ElemType =
3197 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder,
3198 SPIRV::AccessQualifier::ReadWrite, true);
3199 // Create or get an existing type from GlobalRegistry.
3200 return GR->getOrCreateOpTypeCoopMatr(
3201 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
3202 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
3203 ExtensionType->getIntParameter(3), true);
3204}
3205
3207 MachineIRBuilder &MIRBuilder,
3208 SPIRVGlobalRegistry *GR) {
3209 SPIRVType *OpaqueImageType = GR->getImageType(
3210 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder);
3211 // Create or get an existing type from GlobalRegistry.
3212 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
3213}
3214
3215static SPIRVType *getInlineSpirvType(const TargetExtType *ExtensionType,
3216 MachineIRBuilder &MIRBuilder,
3217 SPIRVGlobalRegistry *GR) {
3218 assert(ExtensionType->getNumIntParameters() == 3 &&
3219 "Inline SPIR-V type builtin takes an opcode, size, and alignment "
3220 "parameter");
3221 auto Opcode = ExtensionType->getIntParameter(0);
3222
3223 SmallVector<MCOperand> Operands;
3224 for (Type *Param : ExtensionType->type_params()) {
3225 if (const TargetExtType *ParamEType = dyn_cast<TargetExtType>(Param)) {
3226 if (ParamEType->getName() == "spirv.IntegralConstant") {
3227 assert(ParamEType->getNumTypeParameters() == 1 &&
3228 "Inline SPIR-V integral constant builtin must have a type "
3229 "parameter");
3230 assert(ParamEType->getNumIntParameters() == 1 &&
3231 "Inline SPIR-V integral constant builtin must have a "
3232 "value parameter");
3233
3234 auto OperandValue = ParamEType->getIntParameter(0);
3235 auto *OperandType = ParamEType->getTypeParameter(0);
3236
3237 const SPIRVType *OperandSPIRVType = GR->getOrCreateSPIRVType(
3238 OperandType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3239
3241 OperandValue, MIRBuilder, OperandSPIRVType, true)));
3242 continue;
3243 } else if (ParamEType->getName() == "spirv.Literal") {
3244 assert(ParamEType->getNumTypeParameters() == 0 &&
3245 "Inline SPIR-V literal builtin does not take type "
3246 "parameters");
3247 assert(ParamEType->getNumIntParameters() == 1 &&
3248 "Inline SPIR-V literal builtin must have an integer "
3249 "parameter");
3250
3251 auto OperandValue = ParamEType->getIntParameter(0);
3252
3253 Operands.push_back(MCOperand::createImm(OperandValue));
3254 continue;
3255 }
3256 }
3257 const SPIRVType *TypeOperand = GR->getOrCreateSPIRVType(
3258 Param, MIRBuilder, SPIRV::AccessQualifier::ReadWrite, true);
3259 Operands.push_back(MCOperand::createReg(GR->getSPIRVTypeID(TypeOperand)));
3260 }
3261
3262 return GR->getOrCreateUnknownType(ExtensionType, MIRBuilder, Opcode,
3263 Operands);
3264}
3265
3266static SPIRVType *getVulkanBufferType(const TargetExtType *ExtensionType,
3267 MachineIRBuilder &MIRBuilder,
3268 SPIRVGlobalRegistry *GR) {
3269 assert(ExtensionType->getNumTypeParameters() == 1 &&
3270 "Vulkan buffers have exactly one type for the type of the buffer.");
3271 assert(ExtensionType->getNumIntParameters() == 2 &&
3272 "Vulkan buffer have 2 integer parameters: storage class and is "
3273 "writable.");
3274
3275 auto *T = ExtensionType->getTypeParameter(0);
3276 auto SC = static_cast<SPIRV::StorageClass::StorageClass>(
3277 ExtensionType->getIntParameter(0));
3278 bool IsWritable = ExtensionType->getIntParameter(1);
3279 return GR->getOrCreateVulkanBufferType(MIRBuilder, T, SC, IsWritable);
3280}
3281
3282static SPIRVType *getLayoutType(const TargetExtType *ExtensionType,
3283 MachineIRBuilder &MIRBuilder,
3284 SPIRVGlobalRegistry *GR) {
3285 return GR->getOrCreateLayoutType(MIRBuilder, ExtensionType);
3286}
3287
3288namespace SPIRV {
3290 LLVMContext &Context) {
3291 StringRef NameWithParameters = TypeName;
3292
3293 // Pointers-to-opaque-structs representing OpenCL types are first translated
3294 // to equivalent SPIR-V types. OpenCL builtin type names should have the
3295 // following format: e.g. %opencl.event_t
3296 if (NameWithParameters.starts_with("opencl.")) {
3297 const SPIRV::OpenCLType *OCLTypeRecord =
3298 SPIRV::lookupOpenCLType(NameWithParameters);
3299 if (!OCLTypeRecord)
3300 report_fatal_error("Missing TableGen record for OpenCL type: " +
3301 NameWithParameters);
3302 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
3303 // Continue with the SPIR-V builtin type...
3304 }
3305
3306 // Names of the opaque structs representing a SPIR-V builtins without
3307 // parameters should have the following format: e.g. %spirv.Event
3308 assert(NameWithParameters.starts_with("spirv.") &&
3309 "Unknown builtin opaque type!");
3310
3311 // Parameterized SPIR-V builtins names follow this format:
3312 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
3313 if (!NameWithParameters.contains('_'))
3314 return TargetExtType::get(Context, NameWithParameters);
3315
3316 SmallVector<StringRef> Parameters;
3317 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
3318 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
3319
3320 SmallVector<Type *, 1> TypeParameters;
3321 bool HasTypeParameter = !isDigit(Parameters[0][0]);
3322 if (HasTypeParameter)
3323 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
3324 SmallVector<unsigned> IntParameters;
3325 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
3326 unsigned IntParameter = 0;
3327 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
3328 (void)ValidLiteral;
3329 assert(ValidLiteral &&
3330 "Invalid format of SPIR-V builtin parameter literal!");
3331 IntParameters.push_back(IntParameter);
3332 }
3333 return TargetExtType::get(Context,
3334 NameWithParameters.substr(0, BaseNameLength),
3335 TypeParameters, IntParameters);
3336}
3337
3339 SPIRV::AccessQualifier::AccessQualifier AccessQual,
3340 MachineIRBuilder &MIRBuilder,
3341 SPIRVGlobalRegistry *GR) {
3342 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
3343 // target(...) target extension types or pointers-to-opaque-structs. The
3344 // approach relying on structs is deprecated and works only in the non-opaque
3345 // pointer mode (-opaque-pointers=0).
3346 // In order to maintain compatibility with LLVM IR generated by older versions
3347 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
3348 // "translated" to target extension types. This translation is temporary and
3349 // will be removed in the future release of LLVM.
3351 if (!BuiltinType)
3353 OpaqueType->getStructName().str(), MIRBuilder.getContext());
3354
3355 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
3356
3357 const StringRef Name = BuiltinType->getName();
3358 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
3359
3360 SPIRVType *TargetType;
3361 if (Name == "spirv.Type") {
3362 TargetType = getInlineSpirvType(BuiltinType, MIRBuilder, GR);
3363 } else if (Name == "spirv.VulkanBuffer") {
3364 TargetType = getVulkanBufferType(BuiltinType, MIRBuilder, GR);
3365 } else if (Name == "spirv.Layout") {
3366 TargetType = getLayoutType(BuiltinType, MIRBuilder, GR);
3367 } else {
3368 // Lookup the demangled builtin type in the TableGen records.
3369 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
3370 if (!TypeRecord)
3371 report_fatal_error("Missing TableGen record for builtin type: " + Name);
3372
3373 // "Lower" the BuiltinType into TargetType. The following get<...>Type
3374 // methods use the implementation details from TableGen records or
3375 // TargetExtType parameters to either create a new OpType<...> machine
3376 // instruction or get an existing equivalent SPIRVType from
3377 // GlobalRegistry.
3378
3379 switch (TypeRecord->Opcode) {
3380 case SPIRV::OpTypeImage:
3381 TargetType = GR->getImageType(BuiltinType, AccessQual, MIRBuilder);
3382 break;
3383 case SPIRV::OpTypePipe:
3384 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
3385 break;
3386 case SPIRV::OpTypeDeviceEvent:
3387 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
3388 break;
3389 case SPIRV::OpTypeSampler:
3390 TargetType = getSamplerType(MIRBuilder, GR);
3391 break;
3392 case SPIRV::OpTypeSampledImage:
3393 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
3394 break;
3395 case SPIRV::OpTypeCooperativeMatrixKHR:
3396 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
3397 break;
3398 default:
3399 TargetType =
3400 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
3401 break;
3402 }
3403 }
3404
3405 // Emit OpName instruction if a new OpType<...> instruction was added
3406 // (equivalent type was not found in GlobalRegistry).
3407 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
3408 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
3409
3410 return TargetType;
3411}
3412} // namespace SPIRV
3413} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition MD5.cpp:58
Register Reg
Promote Memory to Register
Definition Mem2Reg.cpp:110
#define T
spirv structurize SPIRV
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
#define LLVM_DEBUG(...)
Definition Debug.h:114
static const fltSemantics & IEEEsingle()
Definition APFloat.h:296
APInt bitcastToAPInt() const
Definition APFloat.h:1335
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition APFloat.h:1061
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition APInt.h:234
uint64_t getZExtValue() const
Get zero extended value.
Definition APInt.h:1540
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
static LLVM_ABI ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ ICMP_NE
not equal
Definition InstrTypes.h:698
const APFloat & getValueAPF() const
Definition Constants.h:320
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition Constants.h:154
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
Tagged union holding either a T or a Error.
Definition Error.h:485
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition Function.cpp:359
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition Type.cpp:319
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
This is an important class for using LLVM in a threaded context.
Definition LLVMContext.h:68
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
virtual MachineInstrBuilder buildConstant(const DstOp &Res, const ConstantInt &Val)
Build and insert Res = G_CONSTANT Val.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
LLVM_ABI void copyIRFlags(const Instruction &I)
Copy all flags to MachineInst MIFlags.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static LLVM_ABI PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition Register.h:19
constexpr bool isValid() const
Definition Register.h:107
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
SPIRVType * getOrCreateUnknownType(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode, const ArrayRef< MCOperand > Operands)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateVulkanBufferType(MachineIRBuilder &MIRBuilder, Type *ElemType, SPIRV::StorageClass::StorageClass SC, bool IsWritable, bool EmitIr=false)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, bool EmitIR, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * getOrCreateLayoutType(MachineIRBuilder &MIRBuilder, const TargetExtType *T, bool EmitIr=false)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR, bool ZeroAsNull=true)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:702
static constexpr size_t npos
Definition StringRef.h:57
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition StringRef.h:657
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:472
std::string str() const
str - Get the contents as an std::string.
Definition StringRef.h:225
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition StringRef.h:573
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:261
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:438
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition StringRef.h:686
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:146
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition StringRef.h:426
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition StringRef.h:637
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition StringRef.h:376
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
Definition StringRef.h:345
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition StringRef.h:293
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition StringRef.h:273
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
ArrayRef< Type * > type_params() const
Return the type parameters for this particular target extension type.
unsigned getNumIntParameters() const
static LLVM_ABI TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Definition Type.cpp:908
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
LLVM_ABI StringRef getStructName() const
static LLVM_ABI Type * getVoidTy(LLVMContext &C)
Definition Type.cpp:281
static LLVM_ABI IntegerType * getInt8Ty(LLVMContext &C)
Definition Type.cpp:295
static LLVM_ABI Type * getFloatTy(LLVMContext &C)
Definition Type.cpp:285
static LLVM_ABI Type * getHalfTy(LLVMContext &C)
Definition Type.cpp:283
LLVM Value Representation.
Definition Value.h:75
LLVM_ABI Value(Type *Ty, unsigned scid)
Definition Value.cpp:53
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition ilist_node.h:348
CallInst * Call
LLVM_C_ABI LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition Core.cpp:883
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR, const CallBase &CB)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool build2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's 2d block io instructions.
static SPIRVType * getVulkanBufferType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const CallBase &CB)
void insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static bool generateBindlessImageINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getInlineSpirvType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:643
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static bool buildExtendedBitOpsInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building extended bit operations.
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
Definition SPIRVUtils.h:529
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
FPDecorationId
Definition SPIRVUtils.h:527
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:239
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void SplitString(StringRef Source, SmallVectorImpl< StringRef > &OutFragments, StringRef Delimiters=" \t\n\v\f\r")
SplitString - Split up the specified string according to the specified delimiters,...
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition Debug.cpp:207
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isDigit(char C)
Checks if character C is one of the 10 decimal digits.
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
const MachineInstr SPIRVType
static SPIRVType * getLayoutType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateDotOrFMulInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageTy={ SPIRV::LinkageType::Import})
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
Definition SPIRVUtils.h:224
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtendedBitOpsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildPipeInst(const SPIRV::IncomingCall *Call, unsigned Opcode, unsigned Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildBindlessImageINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's bindless image instructions.
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1867
static bool generate2DBlockIOINTELInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:559
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool generatePipeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildTernaryBitwiseFunctionINTELInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building Intel's OpBitwiseFunctionINTEL instruction.
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generatePredicatedLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Implement std::hash so that hash_code can be used in STL containers.
Definition BitVector.h:867
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode