LLVM 20.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <string>
22#include <tuple>
23
24#define DEBUG_TYPE "spirv-builtins"
25
26namespace llvm {
27namespace SPIRV {
28#define GET_BuiltinGroup_DECL
29#include "SPIRVGenTables.inc"
30
33 InstructionSet::InstructionSet Set;
34 BuiltinGroup Group;
35 uint8_t MinNumArgs;
36 uint8_t MaxNumArgs;
37};
38
39#define GET_DemangledBuiltins_DECL
40#define GET_DemangledBuiltins_IMPL
41
43 const std::string BuiltinName;
45
49
56
57 bool isSpirvOp() const { return BuiltinName.rfind("__spirv_", 0) == 0; }
58};
59
62 InstructionSet::InstructionSet Set;
64};
65
66#define GET_NativeBuiltins_DECL
67#define GET_NativeBuiltins_IMPL
68
73 bool IsElect;
83};
84
85#define GET_GroupBuiltins_DECL
86#define GET_GroupBuiltins_IMPL
87
91 bool IsBlock;
92 bool IsWrite;
93};
94
95#define GET_IntelSubgroupsBuiltins_DECL
96#define GET_IntelSubgroupsBuiltins_IMPL
97
101};
102
103#define GET_AtomicFloatingBuiltins_DECL
104#define GET_AtomicFloatingBuiltins_IMPL
109};
110
111#define GET_GroupUniformBuiltins_DECL
112#define GET_GroupUniformBuiltins_IMPL
113
116 InstructionSet::InstructionSet Set;
117 BuiltIn::BuiltIn Value;
118};
119
120using namespace BuiltIn;
121#define GET_GetBuiltins_DECL
122#define GET_GetBuiltins_IMPL
123
126 InstructionSet::InstructionSet Set;
128};
129
130#define GET_ImageQueryBuiltins_DECL
131#define GET_ImageQueryBuiltins_IMPL
132
135 InstructionSet::InstructionSet Set;
140 FPRoundingMode::FPRoundingMode RoundingMode;
141};
142
145 InstructionSet::InstructionSet Set;
149 FPRoundingMode::FPRoundingMode RoundingMode;
150};
151
152using namespace FPRoundingMode;
153#define GET_ConvertBuiltins_DECL
154#define GET_ConvertBuiltins_IMPL
155
156using namespace InstructionSet;
157#define GET_VectorLoadStoreBuiltins_DECL
158#define GET_VectorLoadStoreBuiltins_IMPL
159
160#define GET_CLMemoryScope_DECL
161#define GET_CLSamplerAddressingMode_DECL
162#define GET_CLMemoryFenceFlags_DECL
163#define GET_ExtendedBuiltins_DECL
164#include "SPIRVGenTables.inc"
165} // namespace SPIRV
166
167//===----------------------------------------------------------------------===//
168// Misc functions for looking up builtins and veryfying requirements using
169// TableGen records
170//===----------------------------------------------------------------------===//
171
172namespace SPIRV {
173/// Parses the name part of the demangled builtin call.
174std::string lookupBuiltinNameHelper(StringRef DemangledCall) {
175 const static std::string PassPrefix = "(anonymous namespace)::";
176 std::string BuiltinName;
177 // Itanium Demangler result may have "(anonymous namespace)::" prefix
178 if (DemangledCall.starts_with(PassPrefix.c_str()))
179 BuiltinName = DemangledCall.substr(PassPrefix.length());
180 else
181 BuiltinName = DemangledCall;
182 // Extract the builtin function name and types of arguments from the call
183 // skeleton.
184 BuiltinName = BuiltinName.substr(0, BuiltinName.find('('));
185
186 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
187 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
188 BuiltinName = BuiltinName.substr(12);
189
190 // Check if the extracted name contains type information between angle
191 // brackets. If so, the builtin is an instantiated template - needs to have
192 // the information after angle brackets and return type removed.
193 if (BuiltinName.find('<') && BuiltinName.back() == '>') {
194 BuiltinName = BuiltinName.substr(0, BuiltinName.find('<'));
195 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
196 }
197
198 // Check if the extracted name begins with "__spirv_ImageSampleExplicitLod"
199 // contains return type information at the end "_R<type>", if so extract the
200 // plain builtin name without the type information.
201 if (StringRef(BuiltinName).contains("__spirv_ImageSampleExplicitLod") &&
202 StringRef(BuiltinName).contains("_R")) {
203 BuiltinName = BuiltinName.substr(0, BuiltinName.find("_R"));
204 }
205
206 return BuiltinName;
207}
208} // namespace SPIRV
209
210/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
211/// the provided \p DemangledCall and specified \p Set.
212///
213/// The lookup follows the following algorithm, returning the first successful
214/// match:
215/// 1. Search with the plain demangled name (expecting a 1:1 match).
216/// 2. Search with the prefix before or suffix after the demangled name
217/// signyfying the type of the first argument.
218///
219/// \returns Wrapper around the demangled call and found builtin definition.
220static std::unique_ptr<const SPIRV::IncomingCall>
222 SPIRV::InstructionSet::InstructionSet Set,
223 Register ReturnRegister, const SPIRVType *ReturnType,
225 std::string BuiltinName = SPIRV::lookupBuiltinNameHelper(DemangledCall);
226
227 SmallVector<StringRef, 10> BuiltinArgumentTypes;
228 StringRef BuiltinArgs =
229 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
230 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
231
232 // Look up the builtin in the defined set. Start with the plain demangled
233 // name, expecting a 1:1 match in the defined builtin set.
234 const SPIRV::DemangledBuiltin *Builtin;
235 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
236 return std::make_unique<SPIRV::IncomingCall>(
237 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
238
239 // If the initial look up was unsuccessful and the demangled call takes at
240 // least 1 argument, add a prefix or suffix signifying the type of the first
241 // argument and repeat the search.
242 if (BuiltinArgumentTypes.size() >= 1) {
243 char FirstArgumentType = BuiltinArgumentTypes[0][0];
244 // Prefix to be added to the builtin's name for lookup.
245 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
246 std::string Prefix;
247
248 switch (FirstArgumentType) {
249 // Unsigned:
250 case 'u':
251 if (Set == SPIRV::InstructionSet::OpenCL_std)
252 Prefix = "u_";
253 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
254 Prefix = "u";
255 break;
256 // Signed:
257 case 'c':
258 case 's':
259 case 'i':
260 case 'l':
261 if (Set == SPIRV::InstructionSet::OpenCL_std)
262 Prefix = "s_";
263 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
264 Prefix = "s";
265 break;
266 // Floating-point:
267 case 'f':
268 case 'd':
269 case 'h':
270 if (Set == SPIRV::InstructionSet::OpenCL_std ||
271 Set == SPIRV::InstructionSet::GLSL_std_450)
272 Prefix = "f";
273 break;
274 }
275
276 // If argument-type name prefix was added, look up the builtin again.
277 if (!Prefix.empty() &&
278 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
279 return std::make_unique<SPIRV::IncomingCall>(
280 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
281
282 // If lookup with a prefix failed, find a suffix to be added to the
283 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
284 // an unsigned value has a suffix "u".
285 std::string Suffix;
286
287 switch (FirstArgumentType) {
288 // Unsigned:
289 case 'u':
290 Suffix = "u";
291 break;
292 // Signed:
293 case 'c':
294 case 's':
295 case 'i':
296 case 'l':
297 Suffix = "s";
298 break;
299 // Floating-point:
300 case 'f':
301 case 'd':
302 case 'h':
303 Suffix = "f";
304 break;
305 }
306
307 // If argument-type name suffix was added, look up the builtin again.
308 if (!Suffix.empty() &&
309 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
310 return std::make_unique<SPIRV::IncomingCall>(
311 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
312 }
313
314 // No builtin with such name was found in the set.
315 return nullptr;
316}
317
320 // We expect the following sequence of instructions:
321 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
322 // or = G_GLOBAL_VALUE @block_literal_global
323 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
324 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
325 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
326 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
327 MI->getOperand(1).isReg());
328 Register BitcastReg = MI->getOperand(1).getReg();
329 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
330 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
331 BitcastMI->getOperand(2).isReg());
332 Register ValueReg = BitcastMI->getOperand(2).getReg();
333 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
334 return ValueMI;
335}
336
337// Return an integer constant corresponding to the given register and
338// defined in spv_track_constant.
339// TODO: maybe unify with prelegalizer pass.
341 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
342 assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
343 DefMI->getOperand(2).isReg());
344 MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
345 assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
346 DefMI2->getOperand(1).isCImm());
347 return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
348}
349
350// Return type of the instruction result from spv_assign_type intrinsic.
351// TODO: maybe unify with prelegalizer pass.
353 MachineInstr *NextMI = MI->getNextNode();
354 if (!NextMI)
355 return nullptr;
356 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
357 if ((NextMI = NextMI->getNextNode()) == nullptr)
358 return nullptr;
359 Register ValueReg = MI->getOperand(0).getReg();
360 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
361 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
362 NextMI->getOperand(1).getReg() != ValueReg)
363 return nullptr;
364 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
365 assert(Ty && "Type is expected");
366 return Ty;
367}
368
369static const Type *getBlockStructType(Register ParamReg,
371 // In principle, this information should be passed to us from Clang via
372 // an elementtype attribute. However, said attribute requires that
373 // the function call be an intrinsic, which is not. Instead, we rely on being
374 // able to trace this to the declaration of a variable: OpenCL C specification
375 // section 6.12.5 should guarantee that we can do this.
377 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
378 return MI->getOperand(1).getGlobal()->getType();
379 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
380 "Blocks in OpenCL C must be traceable to allocation site");
381 return getMachineInstrType(MI);
382}
383
384//===----------------------------------------------------------------------===//
385// Helper functions for building misc instructions
386//===----------------------------------------------------------------------===//
387
388/// Helper function building either a resulting scalar or vector bool register
389/// depending on the expected \p ResultType.
390///
391/// \returns Tuple of the resulting register and its type.
392static std::tuple<Register, SPIRVType *>
393buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
395 LLT Type;
396 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
397
398 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
399 unsigned VectorElements = ResultType->getOperand(2).getImm();
400 BoolType =
401 GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder);
403 cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
404 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
405 } else {
406 Type = LLT::scalar(1);
407 }
408
409 Register ResultRegister =
411 MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::iIDRegClass);
412 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
413 return std::make_tuple(ResultRegister, BoolType);
414}
415
416/// Helper function for building either a vector or scalar select instruction
417/// depending on the expected \p ResultType.
418static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
419 Register ReturnRegister, Register SourceRegister,
420 const SPIRVType *ReturnType,
422 Register TrueConst, FalseConst;
423
424 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
425 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
427 TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType);
428 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType);
429 } else {
430 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType);
431 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType);
432 }
433 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
434 FalseConst);
435}
436
437/// Helper function for building a load instruction loading into the
438/// \p DestinationReg.
440 MachineIRBuilder &MIRBuilder,
441 SPIRVGlobalRegistry *GR, LLT LowLevelType,
442 Register DestinationReg = Register(0)) {
443 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
444 if (!DestinationReg.isValid()) {
445 DestinationReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
446 MRI->setType(DestinationReg, LLT::scalar(32));
447 GR->assignSPIRVTypeToVReg(BaseType, DestinationReg, MIRBuilder.getMF());
448 }
449 // TODO: consider using correct address space and alignment (p0 is canonical
450 // type for selection though).
452 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
453 return DestinationReg;
454}
455
456/// Helper function for building a load instruction for loading a builtin global
457/// variable of \p BuiltinValue value.
459 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
460 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
461 Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
462 Register NewRegister =
463 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::iIDRegClass);
464 MIRBuilder.getMRI()->setType(NewRegister,
465 LLT::pointer(0, GR->getPointerSize()));
467 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
468 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
469
470 // Set up the global OpVariable with the necessary builtin decorations.
471 Register Variable = GR->buildGlobalVariable(
472 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
473 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
474 /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
475 false);
476
477 // Load the value from the global variable.
478 Register LoadedRegister =
479 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
480 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
481 return LoadedRegister;
482}
483
484/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
485/// and its definition, set the new register as a destination of the definition,
486/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
487/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
488/// SPIRVPreLegalizer.cpp.
489extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
490 SPIRVGlobalRegistry *GR,
491 MachineIRBuilder &MIB,
492 MachineRegisterInfo &MRI);
493
494// TODO: Move to TableGen.
495static SPIRV::MemorySemantics::MemorySemantics
496getSPIRVMemSemantics(std::memory_order MemOrder) {
497 switch (MemOrder) {
498 case std::memory_order::memory_order_relaxed:
499 return SPIRV::MemorySemantics::None;
500 case std::memory_order::memory_order_acquire:
501 return SPIRV::MemorySemantics::Acquire;
502 case std::memory_order::memory_order_release:
503 return SPIRV::MemorySemantics::Release;
504 case std::memory_order::memory_order_acq_rel:
505 return SPIRV::MemorySemantics::AcquireRelease;
506 case std::memory_order::memory_order_seq_cst:
507 return SPIRV::MemorySemantics::SequentiallyConsistent;
508 default:
509 report_fatal_error("Unknown CL memory scope");
510 }
511}
512
513static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
514 switch (ClScope) {
515 case SPIRV::CLMemoryScope::memory_scope_work_item:
516 return SPIRV::Scope::Invocation;
517 case SPIRV::CLMemoryScope::memory_scope_work_group:
518 return SPIRV::Scope::Workgroup;
519 case SPIRV::CLMemoryScope::memory_scope_device:
520 return SPIRV::Scope::Device;
521 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
522 return SPIRV::Scope::CrossDevice;
523 case SPIRV::CLMemoryScope::memory_scope_sub_group:
524 return SPIRV::Scope::Subgroup;
525 }
526 report_fatal_error("Unknown CL memory scope");
527}
528
531 unsigned BitWidth = 32) {
532 SPIRVType *IntType = GR->getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder);
533 return GR->buildConstantInt(Val, MIRBuilder, IntType);
534}
535
536static Register buildScopeReg(Register CLScopeRegister,
537 SPIRV::Scope::Scope Scope,
538 MachineIRBuilder &MIRBuilder,
541 if (CLScopeRegister.isValid()) {
542 auto CLScope =
543 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
544 Scope = getSPIRVScope(CLScope);
545
546 if (CLScope == static_cast<unsigned>(Scope)) {
547 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
548 return CLScopeRegister;
549 }
550 }
551 return buildConstantIntReg(Scope, MIRBuilder, GR);
552}
553
554static Register buildMemSemanticsReg(Register SemanticsRegister,
555 Register PtrRegister, unsigned &Semantics,
556 MachineIRBuilder &MIRBuilder,
558 if (SemanticsRegister.isValid()) {
559 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
560 std::memory_order Order =
561 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
562 Semantics =
563 getSPIRVMemSemantics(Order) |
565
566 if (Order == Semantics) {
567 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
568 return SemanticsRegister;
569 }
570 }
571 return buildConstantIntReg(Semantics, MIRBuilder, GR);
572}
573
574static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
575 const SPIRV::IncomingCall *Call,
576 Register TypeReg,
577 ArrayRef<uint32_t> ImmArgs = {}) {
578 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
579 auto MIB = MIRBuilder.buildInstr(Opcode);
580 if (TypeReg.isValid())
581 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
582 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
583 for (unsigned i = 0; i < Sz; ++i) {
584 Register ArgReg = Call->Arguments[i];
585 if (!MRI->getRegClassOrNull(ArgReg))
586 MRI->setRegClass(ArgReg, &SPIRV::iIDRegClass);
587 MIB.addUse(ArgReg);
588 }
589 for (uint32_t ImmArg : ImmArgs)
590 MIB.addImm(ImmArg);
591 return true;
592}
593
594/// Helper function for translating atomic init to OpStore.
596 MachineIRBuilder &MIRBuilder) {
597 if (Call->isSpirvOp())
598 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
599
600 assert(Call->Arguments.size() == 2 &&
601 "Need 2 arguments for atomic init translation");
602 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
603 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
604 MIRBuilder.buildInstr(SPIRV::OpStore)
605 .addUse(Call->Arguments[0])
606 .addUse(Call->Arguments[1]);
607 return true;
608}
609
610/// Helper function for building an atomic load instruction.
612 MachineIRBuilder &MIRBuilder,
614 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
615 if (Call->isSpirvOp())
616 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
617
618 Register PtrRegister = Call->Arguments[0];
619 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::iIDRegClass);
620 // TODO: if true insert call to __translate_ocl_memory_sccope before
621 // OpAtomicLoad and the function implementation. We can use Translator's
622 // output for transcoding/atomic_explicit_arguments.cl as an example.
623 Register ScopeRegister;
624 if (Call->Arguments.size() > 1) {
625 ScopeRegister = Call->Arguments[1];
626 MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::iIDRegClass);
627 } else
628 ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
629
630 Register MemSemanticsReg;
631 if (Call->Arguments.size() > 2) {
632 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
633 MemSemanticsReg = Call->Arguments[2];
634 MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::iIDRegClass);
635 } else {
636 int Semantics =
637 SPIRV::MemorySemantics::SequentiallyConsistent |
639 MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
640 }
641
642 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
643 .addDef(Call->ReturnRegister)
644 .addUse(TypeReg)
645 .addUse(PtrRegister)
646 .addUse(ScopeRegister)
647 .addUse(MemSemanticsReg);
648 return true;
649}
650
651/// Helper function for building an atomic store instruction.
653 MachineIRBuilder &MIRBuilder,
655 if (Call->isSpirvOp())
656 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0));
657
658 Register ScopeRegister =
659 buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
660 Register PtrRegister = Call->Arguments[0];
661 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::iIDRegClass);
662 int Semantics =
663 SPIRV::MemorySemantics::SequentiallyConsistent |
665 Register MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
666 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
667 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
668 .addUse(PtrRegister)
669 .addUse(ScopeRegister)
670 .addUse(MemSemanticsReg)
671 .addUse(Call->Arguments[1]);
672 return true;
673}
674
675/// Helper function for building an atomic compare-exchange instruction.
677 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
678 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
679 if (Call->isSpirvOp())
680 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
681 GR->getSPIRVTypeID(Call->ReturnType));
682
683 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
684 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
685
686 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
687 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
688 Register Desired = Call->Arguments[2]; // Value (C Desired).
689 MRI->setRegClass(ObjectPtr, &SPIRV::iIDRegClass);
690 MRI->setRegClass(ExpectedArg, &SPIRV::iIDRegClass);
691 MRI->setRegClass(Desired, &SPIRV::iIDRegClass);
692 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
693 LLT DesiredLLT = MRI->getType(Desired);
694
695 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
696 SPIRV::OpTypePointer);
697 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
698 (void)ExpectedType;
699 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
700 : ExpectedType == SPIRV::OpTypePointer);
701 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
702
703 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
704 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
705 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
706 SpvObjectPtrTy->getOperand(1).getImm());
707 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
708
709 Register MemSemEqualReg;
710 Register MemSemUnequalReg;
711 uint64_t MemSemEqual =
712 IsCmpxchg
713 ? SPIRV::MemorySemantics::None
714 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
715 uint64_t MemSemUnequal =
716 IsCmpxchg
717 ? SPIRV::MemorySemantics::None
718 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
719 if (Call->Arguments.size() >= 4) {
720 assert(Call->Arguments.size() >= 5 &&
721 "Need 5+ args for explicit atomic cmpxchg");
722 auto MemOrdEq =
723 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
724 auto MemOrdNeq =
725 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
726 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
727 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
728 if (MemOrdEq == MemSemEqual)
729 MemSemEqualReg = Call->Arguments[3];
730 if (MemOrdNeq == MemSemEqual)
731 MemSemUnequalReg = Call->Arguments[4];
732 MRI->setRegClass(Call->Arguments[3], &SPIRV::iIDRegClass);
733 MRI->setRegClass(Call->Arguments[4], &SPIRV::iIDRegClass);
734 }
735 if (!MemSemEqualReg.isValid())
736 MemSemEqualReg = buildConstantIntReg(MemSemEqual, MIRBuilder, GR);
737 if (!MemSemUnequalReg.isValid())
738 MemSemUnequalReg = buildConstantIntReg(MemSemUnequal, MIRBuilder, GR);
739
740 Register ScopeReg;
741 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
742 if (Call->Arguments.size() >= 6) {
743 assert(Call->Arguments.size() == 6 &&
744 "Extra args for explicit atomic cmpxchg");
745 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
746 getIConstVal(Call->Arguments[5], MRI));
747 Scope = getSPIRVScope(ClScope);
748 if (ClScope == static_cast<unsigned>(Scope))
749 ScopeReg = Call->Arguments[5];
750 MRI->setRegClass(Call->Arguments[5], &SPIRV::iIDRegClass);
751 }
752 if (!ScopeReg.isValid())
753 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
754
755 Register Expected = IsCmpxchg
756 ? ExpectedArg
757 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
758 GR, LLT::scalar(32));
759 MRI->setType(Expected, DesiredLLT);
760 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
761 : Call->ReturnRegister;
762 if (!MRI->getRegClassOrNull(Tmp))
763 MRI->setRegClass(Tmp, &SPIRV::iIDRegClass);
764 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
765
766 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
767 MIRBuilder.buildInstr(Opcode)
768 .addDef(Tmp)
769 .addUse(GR->getSPIRVTypeID(IntTy))
770 .addUse(ObjectPtr)
771 .addUse(ScopeReg)
772 .addUse(MemSemEqualReg)
773 .addUse(MemSemUnequalReg)
774 .addUse(Desired)
776 if (!IsCmpxchg) {
777 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
778 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
779 }
780 return true;
781}
782
783/// Helper function for building atomic instructions.
784static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
785 MachineIRBuilder &MIRBuilder,
787 if (Call->isSpirvOp())
788 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
789 GR->getSPIRVTypeID(Call->ReturnType));
790
791 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
792 Register ScopeRegister =
793 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
794
795 assert(Call->Arguments.size() <= 4 &&
796 "Too many args for explicit atomic RMW");
797 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
798 MIRBuilder, GR, MRI);
799
800 Register PtrRegister = Call->Arguments[0];
801 unsigned Semantics = SPIRV::MemorySemantics::None;
802 MRI->setRegClass(PtrRegister, &SPIRV::iIDRegClass);
803 Register MemSemanticsReg =
804 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
805 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
806 Semantics, MIRBuilder, GR);
807 MRI->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
808 Register ValueReg = Call->Arguments[1];
809 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
810 // support cl_ext_float_atomics
811 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
812 if (Opcode == SPIRV::OpAtomicIAdd) {
813 Opcode = SPIRV::OpAtomicFAddEXT;
814 } else if (Opcode == SPIRV::OpAtomicISub) {
815 // Translate OpAtomicISub applied to a floating type argument to
816 // OpAtomicFAddEXT with the negative value operand
817 Opcode = SPIRV::OpAtomicFAddEXT;
818 Register NegValueReg =
819 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
820 MRI->setRegClass(NegValueReg, &SPIRV::iIDRegClass);
821 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
822 MIRBuilder.getMF());
823 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
824 .addDef(NegValueReg)
825 .addUse(ValueReg);
826 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
827 MIRBuilder.getMF().getRegInfo());
828 ValueReg = NegValueReg;
829 }
830 }
831 MIRBuilder.buildInstr(Opcode)
832 .addDef(Call->ReturnRegister)
833 .addUse(ValueTypeReg)
834 .addUse(PtrRegister)
835 .addUse(ScopeRegister)
836 .addUse(MemSemanticsReg)
837 .addUse(ValueReg);
838 return true;
839}
840
841/// Helper function for building an atomic floating-type instruction.
843 unsigned Opcode,
844 MachineIRBuilder &MIRBuilder,
846 assert(Call->Arguments.size() == 4 &&
847 "Wrong number of atomic floating-type builtin");
848
849 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
850
851 Register PtrReg = Call->Arguments[0];
852 MRI->setRegClass(PtrReg, &SPIRV::iIDRegClass);
853
854 Register ScopeReg = Call->Arguments[1];
855 MRI->setRegClass(ScopeReg, &SPIRV::iIDRegClass);
856
857 Register MemSemanticsReg = Call->Arguments[2];
858 MRI->setRegClass(MemSemanticsReg, &SPIRV::iIDRegClass);
859
860 Register ValueReg = Call->Arguments[3];
861 MRI->setRegClass(ValueReg, &SPIRV::iIDRegClass);
862
863 MIRBuilder.buildInstr(Opcode)
864 .addDef(Call->ReturnRegister)
865 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
866 .addUse(PtrReg)
867 .addUse(ScopeReg)
868 .addUse(MemSemanticsReg)
869 .addUse(ValueReg);
870 return true;
871}
872
873/// Helper function for building atomic flag instructions (e.g.
874/// OpAtomicFlagTestAndSet).
876 unsigned Opcode, MachineIRBuilder &MIRBuilder,
878 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
879 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
880 if (Call->isSpirvOp())
881 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
882 IsSet ? TypeReg : Register(0));
883
884 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
885 Register PtrRegister = Call->Arguments[0];
886 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
887 Register MemSemanticsReg =
888 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
889 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
890 Semantics, MIRBuilder, GR);
891
892 assert((Opcode != SPIRV::OpAtomicFlagClear ||
893 (Semantics != SPIRV::MemorySemantics::Acquire &&
894 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
895 "Invalid memory order argument!");
896
897 Register ScopeRegister =
898 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
899 ScopeRegister =
900 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
901
902 auto MIB = MIRBuilder.buildInstr(Opcode);
903 if (IsSet)
904 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
905
906 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
907 return true;
908}
909
910/// Helper function for building barriers, i.e., memory/control ordering
911/// operations.
912static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
913 MachineIRBuilder &MIRBuilder,
915 if (Call->isSpirvOp())
916 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
917
918 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
919 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
920 unsigned MemSemantics = SPIRV::MemorySemantics::None;
921
922 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
923 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
924
925 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
926 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
927
928 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
929 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
930
931 if (Opcode == SPIRV::OpMemoryBarrier) {
932 std::memory_order MemOrder =
933 static_cast<std::memory_order>(getIConstVal(Call->Arguments[1], MRI));
934 MemSemantics = getSPIRVMemSemantics(MemOrder) | MemSemantics;
935 } else {
936 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
937 }
938
939 Register MemSemanticsReg;
940 if (MemFlags == MemSemantics) {
941 MemSemanticsReg = Call->Arguments[0];
942 MRI->setRegClass(MemSemanticsReg, &SPIRV::iIDRegClass);
943 } else
944 MemSemanticsReg = buildConstantIntReg(MemSemantics, MIRBuilder, GR);
945
946 Register ScopeReg;
947 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
948 SPIRV::Scope::Scope MemScope = Scope;
949 if (Call->Arguments.size() >= 2) {
950 assert(
951 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
952 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
953 "Extra args for explicitly scoped barrier");
954 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
955 : Call->Arguments[1];
956 SPIRV::CLMemoryScope CLScope =
957 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
958 MemScope = getSPIRVScope(CLScope);
959 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
960 (Opcode == SPIRV::OpMemoryBarrier))
961 Scope = MemScope;
962
963 if (CLScope == static_cast<unsigned>(Scope)) {
964 ScopeReg = Call->Arguments[1];
965 MRI->setRegClass(ScopeReg, &SPIRV::iIDRegClass);
966 }
967 }
968
969 if (!ScopeReg.isValid())
970 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
971
972 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
973 if (Opcode != SPIRV::OpMemoryBarrier)
974 MIB.addUse(buildConstantIntReg(MemScope, MIRBuilder, GR));
975 MIB.addUse(MemSemanticsReg);
976 return true;
977}
978
979static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
980 switch (dim) {
981 case SPIRV::Dim::DIM_1D:
982 case SPIRV::Dim::DIM_Buffer:
983 return 1;
984 case SPIRV::Dim::DIM_2D:
985 case SPIRV::Dim::DIM_Cube:
986 case SPIRV::Dim::DIM_Rect:
987 return 2;
988 case SPIRV::Dim::DIM_3D:
989 return 3;
990 default:
991 report_fatal_error("Cannot get num components for given Dim");
992 }
993}
994
995/// Helper function for obtaining the number of size components.
996static unsigned getNumSizeComponents(SPIRVType *imgType) {
997 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
998 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
999 unsigned numComps = getNumComponentsForDim(dim);
1000 bool arrayed = imgType->getOperand(4).getImm() == 1;
1001 return arrayed ? numComps + 1 : numComps;
1002}
1003
1004//===----------------------------------------------------------------------===//
1005// Implementation functions for each builtin group
1006//===----------------------------------------------------------------------===//
1007
1008static bool generateExtInst(const SPIRV::IncomingCall *Call,
1009 MachineIRBuilder &MIRBuilder,
1010 SPIRVGlobalRegistry *GR) {
1011 // Lookup the extended instruction number in the TableGen records.
1012 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1014 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1015
1016 // Build extended instruction.
1017 auto MIB =
1018 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1019 .addDef(Call->ReturnRegister)
1020 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1021 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1022 .addImm(Number);
1023
1024 for (auto Argument : Call->Arguments)
1025 MIB.addUse(Argument);
1026 return true;
1027}
1028
1030 MachineIRBuilder &MIRBuilder,
1031 SPIRVGlobalRegistry *GR) {
1032 // Lookup the instruction opcode in the TableGen records.
1033 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1034 unsigned Opcode =
1035 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1036
1037 Register CompareRegister;
1038 SPIRVType *RelationType;
1039 std::tie(CompareRegister, RelationType) =
1040 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1041
1042 // Build relational instruction.
1043 auto MIB = MIRBuilder.buildInstr(Opcode)
1044 .addDef(CompareRegister)
1045 .addUse(GR->getSPIRVTypeID(RelationType));
1046
1047 for (auto Argument : Call->Arguments)
1048 MIB.addUse(Argument);
1049
1050 // Build select instruction.
1051 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1052 Call->ReturnType, GR);
1053}
1054
1056 MachineIRBuilder &MIRBuilder,
1057 SPIRVGlobalRegistry *GR) {
1058 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1059 const SPIRV::GroupBuiltin *GroupBuiltin =
1060 SPIRV::lookupGroupBuiltin(Builtin->Name);
1061
1062 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1063 if (Call->isSpirvOp()) {
1064 if (GroupBuiltin->NoGroupOperation)
1065 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1066 GR->getSPIRVTypeID(Call->ReturnType));
1067
1068 // Group Operation is a literal
1069 Register GroupOpReg = Call->Arguments[1];
1070 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1071 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1073 "Group Operation parameter must be an integer constant");
1074 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1075 Register ScopeReg = Call->Arguments[0];
1076 if (!MRI->getRegClassOrNull(ScopeReg))
1077 MRI->setRegClass(ScopeReg, &SPIRV::iIDRegClass);
1078 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1079 .addDef(Call->ReturnRegister)
1080 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1081 .addUse(ScopeReg)
1082 .addImm(GrpOp);
1083 for (unsigned i = 2; i < Call->Arguments.size(); ++i) {
1084 Register ArgReg = Call->Arguments[i];
1085 if (!MRI->getRegClassOrNull(ArgReg))
1086 MRI->setRegClass(ArgReg, &SPIRV::iIDRegClass);
1087 MIB.addUse(ArgReg);
1088 }
1089 return true;
1090 }
1091
1092 Register Arg0;
1093 if (GroupBuiltin->HasBoolArg) {
1094 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1095 Register BoolReg = Call->Arguments[0];
1096 SPIRVType *BoolRegType = GR->getSPIRVTypeForVReg(BoolReg);
1097 if (!BoolRegType)
1098 report_fatal_error("Can't find a register's type definition");
1099 MachineInstr *ArgInstruction = getDefInstrMaybeConstant(BoolReg, MRI);
1100 if (ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT) {
1101 if (BoolRegType->getOpcode() != SPIRV::OpTypeBool)
1102 Arg0 = GR->buildConstantInt(getIConstVal(BoolReg, MRI), MIRBuilder,
1103 BoolType);
1104 } else {
1105 if (BoolRegType->getOpcode() == SPIRV::OpTypeInt) {
1106 Arg0 = MRI->createGenericVirtualRegister(LLT::scalar(1));
1107 MRI->setRegClass(Arg0, &SPIRV::IDRegClass);
1108 GR->assignSPIRVTypeToVReg(BoolType, Arg0, MIRBuilder.getMF());
1109 MIRBuilder.buildICmp(CmpInst::ICMP_NE, Arg0, BoolReg,
1110 GR->buildConstantInt(0, MIRBuilder, BoolRegType));
1111 insertAssignInstr(Arg0, nullptr, BoolType, GR, MIRBuilder,
1112 MIRBuilder.getMF().getRegInfo());
1113 } else if (BoolRegType->getOpcode() != SPIRV::OpTypeBool) {
1114 report_fatal_error("Expect a boolean argument");
1115 }
1116 // if BoolReg is a boolean register, we don't need to do anything
1117 }
1118 }
1119
1120 Register GroupResultRegister = Call->ReturnRegister;
1121 SPIRVType *GroupResultType = Call->ReturnType;
1122
1123 // TODO: maybe we need to check whether the result type is already boolean
1124 // and in this case do not insert select instruction.
1125 const bool HasBoolReturnTy =
1126 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1127 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1128 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1129
1130 if (HasBoolReturnTy)
1131 std::tie(GroupResultRegister, GroupResultType) =
1132 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1133
1134 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1135 : SPIRV::Scope::Workgroup;
1136 Register ScopeRegister = buildConstantIntReg(Scope, MIRBuilder, GR);
1137
1138 Register VecReg;
1139 if (GroupBuiltin->Opcode == SPIRV::OpGroupBroadcast &&
1140 Call->Arguments.size() > 2) {
1141 // For OpGroupBroadcast "LocalId must be an integer datatype. It must be a
1142 // scalar, a vector with 2 components, or a vector with 3 components.",
1143 // meaning that we must create a vector from the function arguments if
1144 // it's a work_group_broadcast(val, local_id_x, local_id_y) or
1145 // work_group_broadcast(val, local_id_x, local_id_y, local_id_z) call.
1146 Register ElemReg = Call->Arguments[1];
1147 SPIRVType *ElemType = GR->getSPIRVTypeForVReg(ElemReg);
1148 if (!ElemType || ElemType->getOpcode() != SPIRV::OpTypeInt)
1149 report_fatal_error("Expect an integer <LocalId> argument");
1150 unsigned VecLen = Call->Arguments.size() - 1;
1151 VecReg = MRI->createGenericVirtualRegister(
1152 LLT::fixed_vector(VecLen, MRI->getType(ElemReg)));
1153 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1154 SPIRVType *VecType =
1155 GR->getOrCreateSPIRVVectorType(ElemType, VecLen, MIRBuilder);
1156 GR->assignSPIRVTypeToVReg(VecType, VecReg, MIRBuilder.getMF());
1157 auto MIB =
1158 MIRBuilder.buildInstr(TargetOpcode::G_BUILD_VECTOR).addDef(VecReg);
1159 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1160 MIB.addUse(Call->Arguments[i]);
1161 MRI->setRegClass(Call->Arguments[i], &SPIRV::iIDRegClass);
1162 }
1163 insertAssignInstr(VecReg, nullptr, VecType, GR, MIRBuilder,
1164 MIRBuilder.getMF().getRegInfo());
1165 }
1166
1167 // Build work/sub group instruction.
1168 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1169 .addDef(GroupResultRegister)
1170 .addUse(GR->getSPIRVTypeID(GroupResultType))
1171 .addUse(ScopeRegister);
1172
1173 if (!GroupBuiltin->NoGroupOperation)
1174 MIB.addImm(GroupBuiltin->GroupOperation);
1175 if (Call->Arguments.size() > 0) {
1176 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1177 MRI->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
1178 if (VecReg.isValid())
1179 MIB.addUse(VecReg);
1180 else
1181 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1182 MIB.addUse(Call->Arguments[i]);
1183 MRI->setRegClass(Call->Arguments[i], &SPIRV::iIDRegClass);
1184 }
1185 }
1186
1187 // Build select instruction.
1188 if (HasBoolReturnTy)
1189 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1190 Call->ReturnType, GR);
1191 return true;
1192}
1193
1195 MachineIRBuilder &MIRBuilder,
1196 SPIRVGlobalRegistry *GR) {
1197 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1198 MachineFunction &MF = MIRBuilder.getMF();
1199 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1200 if (!ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1201 std::string DiagMsg = std::string(Builtin->Name) +
1202 ": the builtin requires the following SPIR-V "
1203 "extension: SPV_INTEL_subgroups";
1204 report_fatal_error(DiagMsg.c_str(), false);
1205 }
1206 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1207 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1208
1209 uint32_t OpCode = IntelSubgroups->Opcode;
1210 if (Call->isSpirvOp()) {
1211 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1212 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL;
1213 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1214 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1215 : Register(0));
1216 }
1217
1218 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1219 if (IntelSubgroups->IsBlock) {
1220 // Minimal number or arguments set in TableGen records is 1
1221 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1222 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1223 // TODO: add required validation from the specification:
1224 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1225 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1226 // dimensions require a capability."
1227 switch (OpCode) {
1228 case SPIRV::OpSubgroupBlockReadINTEL:
1229 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1230 break;
1231 case SPIRV::OpSubgroupBlockWriteINTEL:
1232 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1233 break;
1234 }
1235 }
1236 }
1237 }
1238
1239 // TODO: opaque pointers types should be eventually resolved in such a way
1240 // that validation of block read is enabled with respect to the following
1241 // specification requirement:
1242 // "'Result Type' may be a scalar or vector type, and its component type must
1243 // be equal to the type pointed to by 'Ptr'."
1244 // For example, function parameter type should not be default i8 pointer, but
1245 // depend on the result type of the instruction where it is used as a pointer
1246 // argument of OpSubgroupBlockReadINTEL
1247
1248 // Build Intel subgroups instruction
1250 IntelSubgroups->IsWrite
1251 ? MIRBuilder.buildInstr(OpCode)
1252 : MIRBuilder.buildInstr(OpCode)
1253 .addDef(Call->ReturnRegister)
1254 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1255 for (size_t i = 0; i < Call->Arguments.size(); ++i) {
1256 MIB.addUse(Call->Arguments[i]);
1257 MRI->setRegClass(Call->Arguments[i], &SPIRV::iIDRegClass);
1258 }
1259
1260 return true;
1261}
1262
1264 MachineIRBuilder &MIRBuilder,
1265 SPIRVGlobalRegistry *GR) {
1266 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1267 MachineFunction &MF = MIRBuilder.getMF();
1268 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1269 if (!ST->canUseExtension(
1270 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1271 std::string DiagMsg = std::string(Builtin->Name) +
1272 ": the builtin requires the following SPIR-V "
1273 "extension: SPV_KHR_uniform_group_instructions";
1274 report_fatal_error(DiagMsg.c_str(), false);
1275 }
1276 const SPIRV::GroupUniformBuiltin *GroupUniform =
1277 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1278 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1279
1280 Register GroupResultReg = Call->ReturnRegister;
1281 MRI->setRegClass(GroupResultReg, &SPIRV::iIDRegClass);
1282
1283 // Scope
1284 Register ScopeReg = Call->Arguments[0];
1285 MRI->setRegClass(ScopeReg, &SPIRV::iIDRegClass);
1286
1287 // Group Operation
1288 Register ConstGroupOpReg = Call->Arguments[1];
1289 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1290 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1292 "expect a constant group operation for a uniform group instruction",
1293 false);
1294 const MachineOperand &ConstOperand = Const->getOperand(1);
1295 if (!ConstOperand.isCImm())
1296 report_fatal_error("uniform group instructions: group operation must be an "
1297 "integer constant",
1298 false);
1299
1300 // Value
1301 Register ValueReg = Call->Arguments[2];
1302 MRI->setRegClass(ValueReg, &SPIRV::iIDRegClass);
1303
1304 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1305 .addDef(GroupResultReg)
1306 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1307 .addUse(ScopeReg);
1308 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1309 MIB.addUse(ValueReg);
1310
1311 return true;
1312}
1313
1315 MachineIRBuilder &MIRBuilder,
1316 SPIRVGlobalRegistry *GR) {
1317 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1318 MachineFunction &MF = MIRBuilder.getMF();
1319 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1320 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1321 std::string DiagMsg = std::string(Builtin->Name) +
1322 ": the builtin requires the following SPIR-V "
1323 "extension: SPV_KHR_shader_clock";
1324 report_fatal_error(DiagMsg.c_str(), false);
1325 }
1326
1327 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1328 Register ResultReg = Call->ReturnRegister;
1329 MRI->setRegClass(ResultReg, &SPIRV::iIDRegClass);
1330
1331 // Deduce the `Scope` operand from the builtin function name.
1332 SPIRV::Scope::Scope ScopeArg =
1334 .EndsWith("device", SPIRV::Scope::Scope::Device)
1335 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1336 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1337 Register ScopeReg = buildConstantIntReg(ScopeArg, MIRBuilder, GR);
1338
1339 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1340 .addDef(ResultReg)
1341 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1342 .addUse(ScopeReg);
1343
1344 return true;
1345}
1346
1347// These queries ask for a single size_t result for a given dimension index, e.g
1348// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
1349// these values are all vec3 types, so we need to extract the correct index or
1350// return defaultVal (0 or 1 depending on the query). We also handle extending
1351// or tuncating in case size_t does not match the expected result type's
1352// bitwidth.
1353//
1354// For a constant index >= 3 we generate:
1355// %res = OpConstant %SizeT 0
1356//
1357// For other indices we generate:
1358// %g = OpVariable %ptr_V3_SizeT Input
1359// OpDecorate %g BuiltIn XXX
1360// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1361// OpDecorate %g Constant
1362// %loadedVec = OpLoad %V3_SizeT %g
1363//
1364// Then, if the index is constant < 3, we generate:
1365// %res = OpCompositeExtract %SizeT %loadedVec idx
1366// If the index is dynamic, we generate:
1367// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1368// %cmp = OpULessThan %bool %idx %const_3
1369// %res = OpSelect %SizeT %cmp %tmp %const_0
1370//
1371// If the bitwidth of %res does not match the expected return type, we add an
1372// extend or truncate.
1374 MachineIRBuilder &MIRBuilder,
1376 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1377 uint64_t DefaultValue) {
1378 Register IndexRegister = Call->Arguments[0];
1379 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1380 const unsigned PointerSize = GR->getPointerSize();
1381 const SPIRVType *PointerSizeType =
1382 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1383 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1384 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1385
1386 // Set up the final register to do truncation or extension on at the end.
1387 Register ToTruncate = Call->ReturnRegister;
1388
1389 // If the index is constant, we can statically determine if it is in range.
1390 bool IsConstantIndex =
1391 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1392
1393 // If it's out of range (max dimension is 3), we can just return the constant
1394 // default value (0 or 1 depending on which query function).
1395 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1396 Register DefaultReg = Call->ReturnRegister;
1397 if (PointerSize != ResultWidth) {
1398 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1399 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1400 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1401 MIRBuilder.getMF());
1402 ToTruncate = DefaultReg;
1403 }
1404 auto NewRegister =
1405 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1406 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1407 } else { // If it could be in range, we need to load from the given builtin.
1408 auto Vec3Ty =
1409 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder);
1410 Register LoadedVector =
1411 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1412 LLT::fixed_vector(3, PointerSize));
1413 // Set up the vreg to extract the result to (possibly a new temporary one).
1414 Register Extracted = Call->ReturnRegister;
1415 if (!IsConstantIndex || PointerSize != ResultWidth) {
1416 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1417 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1418 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1419 }
1420 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1421 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1422 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1423 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1424 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1425
1426 // If the index is dynamic, need check if it's < 3, and then use a select.
1427 if (!IsConstantIndex) {
1428 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1429 *MRI);
1430
1431 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1432 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1433
1434 Register CompareRegister =
1435 MRI->createGenericVirtualRegister(LLT::scalar(1));
1436 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1437 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1438
1439 // Use G_ICMP to check if idxVReg < 3.
1440 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1441 GR->buildConstantInt(3, MIRBuilder, IndexType));
1442
1443 // Get constant for the default value (0 or 1 depending on which
1444 // function).
1445 Register DefaultRegister =
1446 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1447
1448 // Get a register for the selection result (possibly a new temporary one).
1449 Register SelectionResult = Call->ReturnRegister;
1450 if (PointerSize != ResultWidth) {
1451 SelectionResult =
1452 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1453 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1454 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1455 MIRBuilder.getMF());
1456 }
1457 // Create the final G_SELECT to return the extracted value or the default.
1458 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1459 DefaultRegister);
1460 ToTruncate = SelectionResult;
1461 } else {
1462 ToTruncate = Extracted;
1463 }
1464 }
1465 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1466 if (PointerSize != ResultWidth)
1467 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1468 return true;
1469}
1470
1472 MachineIRBuilder &MIRBuilder,
1473 SPIRVGlobalRegistry *GR) {
1474 // Lookup the builtin variable record.
1475 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1476 SPIRV::BuiltIn::BuiltIn Value =
1477 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1478
1479 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1480 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1481
1482 // Build a load instruction for the builtin variable.
1483 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1484 LLT LLType;
1485 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1486 LLType =
1487 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1488 else
1489 LLType = LLT::scalar(BitWidth);
1490
1491 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1492 LLType, Call->ReturnRegister);
1493}
1494
1496 MachineIRBuilder &MIRBuilder,
1497 SPIRVGlobalRegistry *GR) {
1498 // Lookup the instruction opcode in the TableGen records.
1499 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1500 unsigned Opcode =
1501 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1502
1503 switch (Opcode) {
1504 case SPIRV::OpStore:
1505 return buildAtomicInitInst(Call, MIRBuilder);
1506 case SPIRV::OpAtomicLoad:
1507 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1508 case SPIRV::OpAtomicStore:
1509 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1510 case SPIRV::OpAtomicCompareExchange:
1511 case SPIRV::OpAtomicCompareExchangeWeak:
1512 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1513 GR);
1514 case SPIRV::OpAtomicIAdd:
1515 case SPIRV::OpAtomicISub:
1516 case SPIRV::OpAtomicOr:
1517 case SPIRV::OpAtomicXor:
1518 case SPIRV::OpAtomicAnd:
1519 case SPIRV::OpAtomicExchange:
1520 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1521 case SPIRV::OpMemoryBarrier:
1522 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1523 case SPIRV::OpAtomicFlagTestAndSet:
1524 case SPIRV::OpAtomicFlagClear:
1525 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1526 default:
1527 if (Call->isSpirvOp())
1528 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1529 GR->getSPIRVTypeID(Call->ReturnType));
1530 return false;
1531 }
1532}
1533
1535 MachineIRBuilder &MIRBuilder,
1536 SPIRVGlobalRegistry *GR) {
1537 // Lookup the instruction opcode in the TableGen records.
1538 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1539 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1540
1541 switch (Opcode) {
1542 case SPIRV::OpAtomicFAddEXT:
1543 case SPIRV::OpAtomicFMinEXT:
1544 case SPIRV::OpAtomicFMaxEXT:
1545 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1546 default:
1547 return false;
1548 }
1549}
1550
1552 MachineIRBuilder &MIRBuilder,
1553 SPIRVGlobalRegistry *GR) {
1554 // Lookup the instruction opcode in the TableGen records.
1555 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1556 unsigned Opcode =
1557 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1558
1559 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1560}
1561
1563 MachineIRBuilder &MIRBuilder) {
1564 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1565 .addDef(Call->ReturnRegister)
1566 .addUse(Call->Arguments[0]);
1567 return true;
1568}
1569
1571 MachineIRBuilder &MIRBuilder,
1572 SPIRVGlobalRegistry *GR) {
1573 if (Call->isSpirvOp())
1574 return buildOpFromWrapper(MIRBuilder, SPIRV::OpDot, Call,
1575 GR->getSPIRVTypeID(Call->ReturnType));
1576 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1577 bool IsVec = Opcode == SPIRV::OpTypeVector;
1578 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1579 MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1580 .addDef(Call->ReturnRegister)
1581 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1582 .addUse(Call->Arguments[0])
1583 .addUse(Call->Arguments[1]);
1584 return true;
1585}
1586
1588 MachineIRBuilder &MIRBuilder,
1589 SPIRVGlobalRegistry *GR) {
1590 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1591 SPIRV::BuiltIn::BuiltIn Value =
1592 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1593
1594 // For now, we only support a single Wave intrinsic with a single return type.
1595 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1596 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1597
1599 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1600 /* isConst= */ false, /* hasLinkageTy= */ false);
1601}
1602
1604 MachineIRBuilder &MIRBuilder,
1605 SPIRVGlobalRegistry *GR) {
1606 // Lookup the builtin record.
1607 SPIRV::BuiltIn::BuiltIn Value =
1608 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1609 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1610 Value == SPIRV::BuiltIn::WorkgroupSize ||
1611 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1612 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1613}
1614
1616 MachineIRBuilder &MIRBuilder,
1617 SPIRVGlobalRegistry *GR) {
1618 // Lookup the image size query component number in the TableGen records.
1619 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1620 uint32_t Component =
1621 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1622 // Query result may either be a vector or a scalar. If return type is not a
1623 // vector, expect only a single size component. Otherwise get the number of
1624 // expected components.
1625 SPIRVType *RetTy = Call->ReturnType;
1626 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1627 ? RetTy->getOperand(2).getImm()
1628 : 1;
1629 // Get the actual number of query result/size components.
1630 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1631 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1632 Register QueryResult = Call->ReturnRegister;
1633 SPIRVType *QueryResultType = Call->ReturnType;
1634 if (NumExpectedRetComponents != NumActualRetComponents) {
1635 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1636 LLT::fixed_vector(NumActualRetComponents, 32));
1637 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::iIDRegClass);
1638 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1639 QueryResultType = GR->getOrCreateSPIRVVectorType(
1640 IntTy, NumActualRetComponents, MIRBuilder);
1641 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1642 }
1643 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1644 unsigned Opcode =
1645 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1646 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
1647 auto MIB = MIRBuilder.buildInstr(Opcode)
1648 .addDef(QueryResult)
1649 .addUse(GR->getSPIRVTypeID(QueryResultType))
1650 .addUse(Call->Arguments[0]);
1651 if (!IsDimBuf)
1652 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Lod id.
1653 if (NumExpectedRetComponents == NumActualRetComponents)
1654 return true;
1655 if (NumExpectedRetComponents == 1) {
1656 // Only 1 component is expected, build OpCompositeExtract instruction.
1657 unsigned ExtractedComposite =
1658 Component == 3 ? NumActualRetComponents - 1 : Component;
1659 assert(ExtractedComposite < NumActualRetComponents &&
1660 "Invalid composite index!");
1661 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1662 SPIRVType *NewType = nullptr;
1663 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1664 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1665 if (TypeReg != NewTypeReg &&
1666 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1667 TypeReg = NewTypeReg;
1668 }
1669 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1670 .addDef(Call->ReturnRegister)
1671 .addUse(TypeReg)
1672 .addUse(QueryResult)
1673 .addImm(ExtractedComposite);
1674 if (NewType != nullptr)
1675 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1676 MIRBuilder.getMF().getRegInfo());
1677 } else {
1678 // More than 1 component is expected, fill a new vector.
1679 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1680 .addDef(Call->ReturnRegister)
1681 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1682 .addUse(QueryResult)
1683 .addUse(QueryResult);
1684 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1685 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1686 }
1687 return true;
1688}
1689
1691 MachineIRBuilder &MIRBuilder,
1692 SPIRVGlobalRegistry *GR) {
1693 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1694 "Image samples query result must be of int type!");
1695
1696 // Lookup the instruction opcode in the TableGen records.
1697 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1698 unsigned Opcode =
1699 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1700
1701 Register Image = Call->Arguments[0];
1702 MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::iIDRegClass);
1703 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1704 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1705 (void)ImageDimensionality;
1706
1707 switch (Opcode) {
1708 case SPIRV::OpImageQuerySamples:
1709 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1710 "Image must be of 2D dimensionality");
1711 break;
1712 case SPIRV::OpImageQueryLevels:
1713 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1714 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1715 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1716 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1717 "Image must be of 1D/2D/3D/Cube dimensionality");
1718 break;
1719 }
1720
1721 MIRBuilder.buildInstr(Opcode)
1722 .addDef(Call->ReturnRegister)
1723 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1724 .addUse(Image);
1725 return true;
1726}
1727
1728// TODO: Move to TableGen.
1729static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1731 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1732 case SPIRV::CLK_ADDRESS_CLAMP:
1733 return SPIRV::SamplerAddressingMode::Clamp;
1734 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1735 return SPIRV::SamplerAddressingMode::ClampToEdge;
1736 case SPIRV::CLK_ADDRESS_REPEAT:
1737 return SPIRV::SamplerAddressingMode::Repeat;
1738 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1739 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1740 case SPIRV::CLK_ADDRESS_NONE:
1741 return SPIRV::SamplerAddressingMode::None;
1742 default:
1743 report_fatal_error("Unknown CL address mode");
1744 }
1745}
1746
1747static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
1748 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1749}
1750
1751static SPIRV::SamplerFilterMode::SamplerFilterMode
1753 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1754 return SPIRV::SamplerFilterMode::Linear;
1755 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1756 return SPIRV::SamplerFilterMode::Nearest;
1757 return SPIRV::SamplerFilterMode::Nearest;
1758}
1759
1760static bool generateReadImageInst(const StringRef DemangledCall,
1761 const SPIRV::IncomingCall *Call,
1762 MachineIRBuilder &MIRBuilder,
1763 SPIRVGlobalRegistry *GR) {
1764 Register Image = Call->Arguments[0];
1765 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1766 MRI->setRegClass(Image, &SPIRV::iIDRegClass);
1767 MRI->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
1768 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
1769 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
1770 if (HasOclSampler || HasMsaa)
1771 MRI->setRegClass(Call->Arguments[2], &SPIRV::iIDRegClass);
1772 if (HasOclSampler) {
1773 Register Sampler = Call->Arguments[1];
1774
1775 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1776 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
1777 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
1778 Sampler = GR->buildConstantSampler(
1780 getSamplerParamFromBitmask(SamplerMask),
1781 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder,
1782 GR->getSPIRVTypeForVReg(Sampler));
1783 }
1784 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1785 SPIRVType *SampledImageType =
1786 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1787 Register SampledImage = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1788
1789 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1790 .addDef(SampledImage)
1791 .addUse(GR->getSPIRVTypeID(SampledImageType))
1792 .addUse(Image)
1793 .addUse(Sampler);
1794
1796 MIRBuilder);
1797 SPIRVType *TempType = Call->ReturnType;
1798 bool NeedsExtraction = false;
1799 if (TempType->getOpcode() != SPIRV::OpTypeVector) {
1800 TempType =
1801 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1802 NeedsExtraction = true;
1803 }
1804 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(TempType));
1805 Register TempRegister = MRI->createGenericVirtualRegister(LLType);
1806 MRI->setRegClass(TempRegister, &SPIRV::iIDRegClass);
1807 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
1808
1809 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1810 .addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister)
1811 .addUse(GR->getSPIRVTypeID(TempType))
1812 .addUse(SampledImage)
1813 .addUse(Call->Arguments[2]) // Coordinate.
1814 .addImm(SPIRV::ImageOperand::Lod)
1815 .addUse(Lod);
1816
1817 if (NeedsExtraction)
1818 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1819 .addDef(Call->ReturnRegister)
1820 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1821 .addUse(TempRegister)
1822 .addImm(0);
1823 } else if (HasMsaa) {
1824 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1825 .addDef(Call->ReturnRegister)
1826 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1827 .addUse(Image)
1828 .addUse(Call->Arguments[1]) // Coordinate.
1829 .addImm(SPIRV::ImageOperand::Sample)
1830 .addUse(Call->Arguments[2]);
1831 } else {
1832 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1833 .addDef(Call->ReturnRegister)
1834 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1835 .addUse(Image)
1836 .addUse(Call->Arguments[1]); // Coordinate.
1837 }
1838 return true;
1839}
1840
1842 MachineIRBuilder &MIRBuilder,
1843 SPIRVGlobalRegistry *GR) {
1844 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
1845 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
1846 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::iIDRegClass);
1847 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1848 .addUse(Call->Arguments[0]) // Image.
1849 .addUse(Call->Arguments[1]) // Coordinate.
1850 .addUse(Call->Arguments[2]); // Texel.
1851 return true;
1852}
1853
1854static bool generateSampleImageInst(const StringRef DemangledCall,
1855 const SPIRV::IncomingCall *Call,
1856 MachineIRBuilder &MIRBuilder,
1857 SPIRVGlobalRegistry *GR) {
1858 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1859 if (Call->Builtin->Name.contains_insensitive(
1860 "__translate_sampler_initializer")) {
1861 // Build sampler literal.
1862 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1863 Register Sampler = GR->buildConstantSampler(
1864 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1866 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1867 return Sampler.isValid();
1868 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1869 // Create OpSampledImage.
1870 Register Image = Call->Arguments[0];
1871 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1872 SPIRVType *SampledImageType =
1873 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1874 Register SampledImage =
1875 Call->ReturnRegister.isValid()
1876 ? Call->ReturnRegister
1877 : MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1878 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1879 .addDef(SampledImage)
1880 .addUse(GR->getSPIRVTypeID(SampledImageType))
1881 .addUse(Image)
1882 .addUse(Call->Arguments[1]); // Sampler.
1883 return true;
1884 } else if (Call->Builtin->Name.contains_insensitive(
1885 "__spirv_ImageSampleExplicitLod")) {
1886 // Sample an image using an explicit level of detail.
1887 std::string ReturnType = DemangledCall.str();
1888 if (DemangledCall.contains("_R")) {
1889 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
1890 ReturnType = ReturnType.substr(0, ReturnType.find('('));
1891 }
1892 SPIRVType *Type =
1893 Call->ReturnType
1894 ? Call->ReturnType
1895 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder);
1896 if (!Type) {
1897 std::string DiagMsg =
1898 "Unable to recognize SPIRV type name: " + ReturnType;
1899 report_fatal_error(DiagMsg.c_str());
1900 }
1901 MRI->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
1902 MRI->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
1903 MRI->setRegClass(Call->Arguments[3], &SPIRV::iIDRegClass);
1904
1905 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1906 .addDef(Call->ReturnRegister)
1908 .addUse(Call->Arguments[0]) // Image.
1909 .addUse(Call->Arguments[1]) // Coordinate.
1910 .addImm(SPIRV::ImageOperand::Lod)
1911 .addUse(Call->Arguments[3]);
1912 return true;
1913 }
1914 return false;
1915}
1916
1918 MachineIRBuilder &MIRBuilder) {
1919 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1920 Call->Arguments[1], Call->Arguments[2]);
1921 return true;
1922}
1923
1925 MachineIRBuilder &MIRBuilder,
1926 SPIRVGlobalRegistry *GR) {
1927 return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call,
1928 GR->getSPIRVTypeID(Call->ReturnType));
1929}
1930
1932 MachineIRBuilder &MIRBuilder,
1933 SPIRVGlobalRegistry *GR) {
1934 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1935 unsigned Opcode =
1936 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1937 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR;
1938 unsigned ArgSz = Call->Arguments.size();
1939 unsigned LiteralIdx = 0;
1940 if (Opcode == SPIRV::OpCooperativeMatrixLoadKHR && ArgSz > 3)
1941 LiteralIdx = 3;
1942 else if (Opcode == SPIRV::OpCooperativeMatrixStoreKHR && ArgSz > 4)
1943 LiteralIdx = 4;
1945 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1946 if (LiteralIdx > 0)
1947 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
1948 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1949 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
1950 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1951 if (!CoopMatrType)
1952 report_fatal_error("Can't find a register's type definition");
1953 MIRBuilder.buildInstr(Opcode)
1954 .addDef(Call->ReturnRegister)
1955 .addUse(TypeReg)
1956 .addUse(CoopMatrType->getOperand(0).getReg());
1957 return true;
1958 }
1959 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1960 IsSet ? TypeReg : Register(0), ImmArgs);
1961}
1962
1964 MachineIRBuilder &MIRBuilder,
1965 SPIRVGlobalRegistry *GR) {
1966 // Lookup the instruction opcode in the TableGen records.
1967 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1968 unsigned Opcode =
1969 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1970 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1971
1972 switch (Opcode) {
1973 case SPIRV::OpSpecConstant: {
1974 // Build the SpecID decoration.
1975 unsigned SpecId =
1976 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
1977 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
1978 {SpecId});
1979 // Determine the constant MI.
1980 Register ConstRegister = Call->Arguments[1];
1981 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
1982 assert(Const &&
1983 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1984 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
1985 "Argument should be either an int or floating-point constant");
1986 // Determine the opcode and built the OpSpec MI.
1987 const MachineOperand &ConstOperand = Const->getOperand(1);
1988 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
1989 assert(ConstOperand.isCImm() && "Int constant operand is expected");
1990 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
1991 ? SPIRV::OpSpecConstantTrue
1992 : SPIRV::OpSpecConstantFalse;
1993 }
1994 auto MIB = MIRBuilder.buildInstr(Opcode)
1995 .addDef(Call->ReturnRegister)
1996 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1997
1998 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
1999 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2000 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
2001 else
2002 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
2003 }
2004 return true;
2005 }
2006 case SPIRV::OpSpecConstantComposite: {
2007 auto MIB = MIRBuilder.buildInstr(Opcode)
2008 .addDef(Call->ReturnRegister)
2009 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2010 for (unsigned i = 0; i < Call->Arguments.size(); i++)
2011 MIB.addUse(Call->Arguments[i]);
2012 return true;
2013 }
2014 default:
2015 return false;
2016 }
2017}
2018
2019static bool buildNDRange(const SPIRV::IncomingCall *Call,
2020 MachineIRBuilder &MIRBuilder,
2021 SPIRVGlobalRegistry *GR) {
2022 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2023 MRI->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
2024 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
2025 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
2026 PtrType->getOperand(2).isReg());
2027 Register TypeReg = PtrType->getOperand(2).getReg();
2029 MachineFunction &MF = MIRBuilder.getMF();
2030 Register TmpReg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2031 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
2032 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
2033 // three other arguments, so pass zero constant on absence.
2034 unsigned NumArgs = Call->Arguments.size();
2035 assert(NumArgs >= 2);
2036 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2037 MRI->setRegClass(GlobalWorkSize, &SPIRV::iIDRegClass);
2038 Register LocalWorkSize =
2039 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2040 if (LocalWorkSize.isValid())
2041 MRI->setRegClass(LocalWorkSize, &SPIRV::iIDRegClass);
2042 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
2043 if (GlobalWorkOffset.isValid())
2044 MRI->setRegClass(GlobalWorkOffset, &SPIRV::iIDRegClass);
2045 if (NumArgs < 4) {
2046 Register Const;
2047 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
2048 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
2049 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
2050 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
2051 DefInstr->getOperand(3).isReg());
2052 Register GWSPtr = DefInstr->getOperand(3).getReg();
2053 if (!MRI->getRegClassOrNull(GWSPtr))
2054 MRI->setRegClass(GWSPtr, &SPIRV::iIDRegClass);
2055 // TODO: Maybe simplify generation of the type of the fields.
2056 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
2057 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
2059 Type *FieldTy = ArrayType::get(BaseTy, Size);
2060 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder);
2061 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2062 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
2063 MIRBuilder.buildInstr(SPIRV::OpLoad)
2064 .addDef(GlobalWorkSize)
2065 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
2066 .addUse(GWSPtr);
2067 const SPIRVSubtarget &ST =
2068 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2069 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2070 SpvFieldTy, *ST.getInstrInfo());
2071 } else {
2072 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy);
2073 }
2074 if (!LocalWorkSize.isValid())
2075 LocalWorkSize = Const;
2076 if (!GlobalWorkOffset.isValid())
2077 GlobalWorkOffset = Const;
2078 }
2079 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2080 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2081 .addDef(TmpReg)
2082 .addUse(TypeReg)
2083 .addUse(GlobalWorkSize)
2084 .addUse(LocalWorkSize)
2085 .addUse(GlobalWorkOffset);
2086 return MIRBuilder.buildInstr(SPIRV::OpStore)
2087 .addUse(Call->Arguments[0])
2088 .addUse(TmpReg);
2089}
2090
2091// TODO: maybe move to the global register.
2092static SPIRVType *
2094 SPIRVGlobalRegistry *GR) {
2095 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2096 Type *OpaqueType = StructType::getTypeByName(Context, "spirv.DeviceEvent");
2097 if (!OpaqueType)
2098 OpaqueType = StructType::getTypeByName(Context, "opencl.clk_event_t");
2099 if (!OpaqueType)
2100 OpaqueType = StructType::create(Context, "spirv.DeviceEvent");
2101 unsigned SC0 = storageClassToAddressSpace(SPIRV::StorageClass::Function);
2102 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2103 Type *PtrType = PointerType::get(PointerType::get(OpaqueType, SC0), SC1);
2104 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder);
2105}
2106
2108 MachineIRBuilder &MIRBuilder,
2109 SPIRVGlobalRegistry *GR) {
2110 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2111 const DataLayout &DL = MIRBuilder.getDataLayout();
2112 bool IsSpirvOp = Call->isSpirvOp();
2113 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2114 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2115
2116 // Make vararg instructions before OpEnqueueKernel.
2117 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2118 // local size operands as an array, so we need to unpack them.
2119 SmallVector<Register, 16> LocalSizes;
2120 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2121 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2122 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2123 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2124 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2125 GepMI->getOperand(3).isReg());
2126 Register ArrayReg = GepMI->getOperand(3).getReg();
2127 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2128 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2129 assert(LocalSizeTy && "Local size type is expected");
2130 const uint64_t LocalSizeNum =
2131 cast<ArrayType>(LocalSizeTy)->getNumElements();
2132 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2133 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2134 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2135 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2136 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2137 Register Reg = MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2138 MRI->setType(Reg, LLType);
2139 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2140 auto GEPInst = MIRBuilder.buildIntrinsic(
2141 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2142 GEPInst
2143 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2144 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2145 .addUse(buildConstantIntReg(0, MIRBuilder, GR)) // Indices.
2146 .addUse(buildConstantIntReg(I, MIRBuilder, GR));
2147 LocalSizes.push_back(Reg);
2148 }
2149 }
2150
2151 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2152 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2153 .addDef(Call->ReturnRegister)
2154 .addUse(GR->getSPIRVTypeID(Int32Ty));
2155
2156 // Copy all arguments before block invoke function pointer.
2157 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2158 for (unsigned i = 0; i < BlockFIdx; i++)
2159 MIB.addUse(Call->Arguments[i]);
2160
2161 // If there are no event arguments in the original call, add dummy ones.
2162 if (!HasEvents) {
2163 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Dummy num events.
2164 Register NullPtr = GR->getOrCreateConstNullPtr(
2165 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2166 MIB.addUse(NullPtr); // Dummy wait events.
2167 MIB.addUse(NullPtr); // Dummy ret event.
2168 }
2169
2170 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2171 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2172 // Invoke: Pointer to invoke function.
2173 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2174
2175 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2176 // Param: Pointer to block literal.
2177 MIB.addUse(BlockLiteralReg);
2178
2179 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2180 // TODO: these numbers should be obtained from block literal structure.
2181 // Param Size: Size of block literal structure.
2182 MIB.addUse(buildConstantIntReg(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2183 // Param Aligment: Aligment of block literal structure.
2184 MIB.addUse(
2185 buildConstantIntReg(DL.getPrefTypeAlign(PType).value(), MIRBuilder, GR));
2186
2187 for (unsigned i = 0; i < LocalSizes.size(); i++)
2188 MIB.addUse(LocalSizes[i]);
2189 return true;
2190}
2191
2193 MachineIRBuilder &MIRBuilder,
2194 SPIRVGlobalRegistry *GR) {
2195 // Lookup the instruction opcode in the TableGen records.
2196 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2197 unsigned Opcode =
2198 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2199
2200 switch (Opcode) {
2201 case SPIRV::OpRetainEvent:
2202 case SPIRV::OpReleaseEvent:
2203 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
2204 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2205 case SPIRV::OpCreateUserEvent:
2206 case SPIRV::OpGetDefaultQueue:
2207 return MIRBuilder.buildInstr(Opcode)
2208 .addDef(Call->ReturnRegister)
2209 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2210 case SPIRV::OpIsValidEvent:
2211 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
2212 return MIRBuilder.buildInstr(Opcode)
2213 .addDef(Call->ReturnRegister)
2214 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2215 .addUse(Call->Arguments[0]);
2216 case SPIRV::OpSetUserEventStatus:
2217 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
2218 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
2219 return MIRBuilder.buildInstr(Opcode)
2220 .addUse(Call->Arguments[0])
2221 .addUse(Call->Arguments[1]);
2222 case SPIRV::OpCaptureEventProfilingInfo:
2223 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
2224 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
2225 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::iIDRegClass);
2226 return MIRBuilder.buildInstr(Opcode)
2227 .addUse(Call->Arguments[0])
2228 .addUse(Call->Arguments[1])
2229 .addUse(Call->Arguments[2]);
2230 case SPIRV::OpBuildNDRange:
2231 return buildNDRange(Call, MIRBuilder, GR);
2232 case SPIRV::OpEnqueueKernel:
2233 return buildEnqueueKernel(Call, MIRBuilder, GR);
2234 default:
2235 return false;
2236 }
2237}
2238
2240 MachineIRBuilder &MIRBuilder,
2241 SPIRVGlobalRegistry *GR) {
2242 // Lookup the instruction opcode in the TableGen records.
2243 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2244 unsigned Opcode =
2245 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2246
2247 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2248 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2249 if (Call->isSpirvOp())
2250 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2251 IsSet ? TypeReg : Register(0));
2252
2253 auto Scope = buildConstantIntReg(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2254
2255 switch (Opcode) {
2256 case SPIRV::OpGroupAsyncCopy: {
2257 SPIRVType *NewType =
2258 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2259 ? nullptr
2260 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder);
2261 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2262 unsigned NumArgs = Call->Arguments.size();
2263 Register EventReg = Call->Arguments[NumArgs - 1];
2264 bool Res = MIRBuilder.buildInstr(Opcode)
2265 .addDef(Call->ReturnRegister)
2266 .addUse(TypeReg)
2267 .addUse(Scope)
2268 .addUse(Call->Arguments[0])
2269 .addUse(Call->Arguments[1])
2270 .addUse(Call->Arguments[2])
2271 .addUse(Call->Arguments.size() > 4
2272 ? Call->Arguments[3]
2273 : buildConstantIntReg(1, MIRBuilder, GR))
2274 .addUse(EventReg);
2275 if (NewType != nullptr)
2276 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2277 MIRBuilder.getMF().getRegInfo());
2278 return Res;
2279 }
2280 case SPIRV::OpGroupWaitEvents:
2281 return MIRBuilder.buildInstr(Opcode)
2282 .addUse(Scope)
2283 .addUse(Call->Arguments[0])
2284 .addUse(Call->Arguments[1]);
2285 default:
2286 return false;
2287 }
2288}
2289
2290static bool generateConvertInst(const StringRef DemangledCall,
2291 const SPIRV::IncomingCall *Call,
2292 MachineIRBuilder &MIRBuilder,
2293 SPIRVGlobalRegistry *GR) {
2294 // Lookup the conversion builtin in the TableGen records.
2295 const SPIRV::ConvertBuiltin *Builtin =
2296 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2297
2298 if (!Builtin && Call->isSpirvOp()) {
2299 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2300 unsigned Opcode =
2301 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2302 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2303 GR->getSPIRVTypeID(Call->ReturnType));
2304 }
2305
2306 if (Builtin->IsSaturated)
2307 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2308 SPIRV::Decoration::SaturatedConversion, {});
2309 if (Builtin->IsRounded)
2310 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2311 SPIRV::Decoration::FPRoundingMode,
2312 {(unsigned)Builtin->RoundingMode});
2313
2314 std::string NeedExtMsg; // no errors if empty
2315 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2316 unsigned Opcode = SPIRV::OpNop;
2317 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2318 // Int -> ...
2319 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2320 // Int -> Int
2321 if (Builtin->IsSaturated)
2322 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2323 : SPIRV::OpSatConvertSToU;
2324 else
2325 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2326 : SPIRV::OpSConvert;
2327 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2328 SPIRV::OpTypeFloat)) {
2329 // Int -> Float
2330 if (Builtin->IsBfloat16) {
2331 const auto *ST = static_cast<const SPIRVSubtarget *>(
2332 &MIRBuilder.getMF().getSubtarget());
2333 if (!ST->canUseExtension(
2334 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2335 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2336 IsRightComponentsNumber =
2337 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2338 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2339 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2340 } else {
2341 bool IsSourceSigned =
2342 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2343 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2344 }
2345 }
2346 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2347 SPIRV::OpTypeFloat)) {
2348 // Float -> ...
2349 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2350 // Float -> Int
2351 if (Builtin->IsBfloat16) {
2352 const auto *ST = static_cast<const SPIRVSubtarget *>(
2353 &MIRBuilder.getMF().getSubtarget());
2354 if (!ST->canUseExtension(
2355 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2356 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2357 IsRightComponentsNumber =
2358 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2359 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2360 Opcode = SPIRV::OpConvertFToBF16INTEL;
2361 } else {
2362 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2363 : SPIRV::OpConvertFToU;
2364 }
2365 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2366 SPIRV::OpTypeFloat)) {
2367 // Float -> Float
2368 Opcode = SPIRV::OpFConvert;
2369 }
2370 }
2371
2372 if (!NeedExtMsg.empty()) {
2373 std::string DiagMsg = std::string(Builtin->Name) +
2374 ": the builtin requires the following SPIR-V "
2375 "extension: " +
2376 NeedExtMsg;
2377 report_fatal_error(DiagMsg.c_str(), false);
2378 }
2379 if (!IsRightComponentsNumber) {
2380 std::string DiagMsg =
2381 std::string(Builtin->Name) +
2382 ": result and argument must have the same number of components";
2383 report_fatal_error(DiagMsg.c_str(), false);
2384 }
2385 assert(Opcode != SPIRV::OpNop &&
2386 "Conversion between the types not implemented!");
2387
2388 MIRBuilder.buildInstr(Opcode)
2389 .addDef(Call->ReturnRegister)
2390 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2391 .addUse(Call->Arguments[0]);
2392 return true;
2393}
2394
2396 MachineIRBuilder &MIRBuilder,
2397 SPIRVGlobalRegistry *GR) {
2398 // Lookup the vector load/store builtin in the TableGen records.
2399 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2400 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2401 Call->Builtin->Set);
2402 // Build extended instruction.
2403 auto MIB =
2404 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2405 .addDef(Call->ReturnRegister)
2406 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2407 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2408 .addImm(Builtin->Number);
2409 for (auto Argument : Call->Arguments)
2410 MIB.addUse(Argument);
2411 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2412 MIB.addImm(Builtin->ElementCount);
2413
2414 // Rounding mode should be passed as a last argument in the MI for builtins
2415 // like "vstorea_halfn_r".
2416 if (Builtin->IsRounded)
2417 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2418 return true;
2419}
2420
2422 MachineIRBuilder &MIRBuilder,
2423 SPIRVGlobalRegistry *GR) {
2424 // Lookup the instruction opcode in the TableGen records.
2425 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2426 unsigned Opcode =
2427 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2428 bool IsLoad = Opcode == SPIRV::OpLoad;
2429 // Build the instruction.
2430 auto MIB = MIRBuilder.buildInstr(Opcode);
2431 if (IsLoad) {
2432 MIB.addDef(Call->ReturnRegister);
2433 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2434 }
2435 // Add a pointer to the value to load/store.
2436 MIB.addUse(Call->Arguments[0]);
2437 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2438 MRI->setRegClass(Call->Arguments[0], &SPIRV::iIDRegClass);
2439 // Add a value to store.
2440 if (!IsLoad) {
2441 MIB.addUse(Call->Arguments[1]);
2442 MRI->setRegClass(Call->Arguments[1], &SPIRV::iIDRegClass);
2443 }
2444 // Add optional memory attributes and an alignment.
2445 unsigned NumArgs = Call->Arguments.size();
2446 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3) {
2447 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2448 MRI->setRegClass(Call->Arguments[IsLoad ? 1 : 2], &SPIRV::iIDRegClass);
2449 }
2450 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4) {
2451 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2452 MRI->setRegClass(Call->Arguments[IsLoad ? 2 : 3], &SPIRV::iIDRegClass);
2453 }
2454 return true;
2455}
2456
2457namespace SPIRV {
2458// Try to find a builtin function attributes by a demangled function name and
2459// return a tuple <builtin group, op code, ext instruction number>, or a special
2460// tuple value <-1, 0, 0> if the builtin function is not found.
2461// Not all builtin functions are supported, only those with a ready-to-use op
2462// code or instruction number defined in TableGen.
2463// TODO: consider a major rework of mapping demangled calls into a builtin
2464// functions to unify search and decrease number of individual cases.
2465std::tuple<int, unsigned, unsigned>
2466mapBuiltinToOpcode(const StringRef DemangledCall,
2467 SPIRV::InstructionSet::InstructionSet Set) {
2468 Register Reg;
2470 std::unique_ptr<const IncomingCall> Call =
2471 lookupBuiltin(DemangledCall, Set, Reg, nullptr, Args);
2472 if (!Call)
2473 return std::make_tuple(-1, 0, 0);
2474
2475 switch (Call->Builtin->Group) {
2476 case SPIRV::Relational:
2477 case SPIRV::Atomic:
2478 case SPIRV::Barrier:
2479 case SPIRV::CastToPtr:
2480 case SPIRV::ImageMiscQuery:
2481 case SPIRV::SpecConstant:
2482 case SPIRV::Enqueue:
2483 case SPIRV::AsyncCopy:
2484 case SPIRV::LoadStore:
2485 case SPIRV::CoopMatr:
2486 if (const auto *R =
2487 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2488 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2489 break;
2490 case SPIRV::Extended:
2491 if (const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2492 Call->Builtin->Set))
2493 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2494 break;
2495 case SPIRV::VectorLoadStore:
2496 if (const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2497 Call->Builtin->Set))
2498 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2499 break;
2500 case SPIRV::Group:
2501 if (const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2502 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2503 break;
2504 case SPIRV::AtomicFloating:
2505 if (const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2506 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2507 break;
2508 case SPIRV::IntelSubgroups:
2509 if (const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2510 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2511 break;
2512 case SPIRV::GroupUniform:
2513 if (const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2514 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2515 break;
2516 case SPIRV::WriteImage:
2517 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2518 case SPIRV::Select:
2519 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2520 case SPIRV::Construct:
2521 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2522 0);
2523 case SPIRV::KernelClock:
2524 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2525 default:
2526 return std::make_tuple(-1, 0, 0);
2527 }
2528 return std::make_tuple(-1, 0, 0);
2529}
2530
2531std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2532 SPIRV::InstructionSet::InstructionSet Set,
2533 MachineIRBuilder &MIRBuilder,
2534 const Register OrigRet, const Type *OrigRetTy,
2535 const SmallVectorImpl<Register> &Args,
2536 SPIRVGlobalRegistry *GR) {
2537 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2538
2539 // SPIR-V type and return register.
2540 Register ReturnRegister = OrigRet;
2541 SPIRVType *ReturnType = nullptr;
2542 if (OrigRetTy && !OrigRetTy->isVoidTy()) {
2543 ReturnType = GR->assignTypeToVReg(OrigRetTy, OrigRet, MIRBuilder);
2544 if (!MIRBuilder.getMRI()->getRegClassOrNull(ReturnRegister))
2545 MIRBuilder.getMRI()->setRegClass(ReturnRegister, &SPIRV::iIDRegClass);
2546 } else if (OrigRetTy && OrigRetTy->isVoidTy()) {
2547 ReturnRegister = MIRBuilder.getMRI()->createVirtualRegister(&IDRegClass);
2548 MIRBuilder.getMRI()->setType(ReturnRegister, LLT::scalar(32));
2549 ReturnType = GR->assignTypeToVReg(OrigRetTy, ReturnRegister, MIRBuilder);
2550 }
2551
2552 // Lookup the builtin in the TableGen records.
2553 std::unique_ptr<const IncomingCall> Call =
2554 lookupBuiltin(DemangledCall, Set, ReturnRegister, ReturnType, Args);
2555
2556 if (!Call) {
2557 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2558 return std::nullopt;
2559 }
2560
2561 // TODO: check if the provided args meet the builtin requirments.
2562 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2563 "Too few arguments to generate the builtin");
2564 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2565 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2566
2567 // Match the builtin with implementation based on the grouping.
2568 switch (Call->Builtin->Group) {
2569 case SPIRV::Extended:
2570 return generateExtInst(Call.get(), MIRBuilder, GR);
2571 case SPIRV::Relational:
2572 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2573 case SPIRV::Group:
2574 return generateGroupInst(Call.get(), MIRBuilder, GR);
2575 case SPIRV::Variable:
2576 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2577 case SPIRV::Atomic:
2578 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2579 case SPIRV::AtomicFloating:
2580 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2581 case SPIRV::Barrier:
2582 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2583 case SPIRV::CastToPtr:
2584 return generateCastToPtrInst(Call.get(), MIRBuilder);
2585 case SPIRV::Dot:
2586 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
2587 case SPIRV::Wave:
2588 return generateWaveInst(Call.get(), MIRBuilder, GR);
2589 case SPIRV::GetQuery:
2590 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2591 case SPIRV::ImageSizeQuery:
2592 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2593 case SPIRV::ImageMiscQuery:
2594 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2595 case SPIRV::ReadImage:
2596 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2597 case SPIRV::WriteImage:
2598 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2599 case SPIRV::SampleImage:
2600 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2601 case SPIRV::Select:
2602 return generateSelectInst(Call.get(), MIRBuilder);
2603 case SPIRV::Construct:
2604 return generateConstructInst(Call.get(), MIRBuilder, GR);
2605 case SPIRV::SpecConstant:
2606 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2607 case SPIRV::Enqueue:
2608 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2609 case SPIRV::AsyncCopy:
2610 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2611 case SPIRV::Convert:
2612 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2613 case SPIRV::VectorLoadStore:
2614 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2615 case SPIRV::LoadStore:
2616 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2617 case SPIRV::IntelSubgroups:
2618 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2619 case SPIRV::GroupUniform:
2620 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2621 case SPIRV::KernelClock:
2622 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
2623 case SPIRV::CoopMatr:
2624 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
2625 }
2626 return false;
2627}
2628
2630 unsigned ArgIdx, LLVMContext &Ctx) {
2631 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
2632 StringRef BuiltinArgs =
2633 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
2634 BuiltinArgs.split(BuiltinArgsTypeStrs, ',', -1, false);
2635 if (ArgIdx >= BuiltinArgsTypeStrs.size())
2636 return nullptr;
2637 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
2638
2639 // Parse strings representing OpenCL builtin types.
2640 if (hasBuiltinTypePrefix(TypeStr)) {
2641 // OpenCL builtin types in demangled call strings have the following format:
2642 // e.g. ocl_image2d_ro
2643 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
2644 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
2645
2646 // Check if this is pointer to a builtin type and not just pointer
2647 // representing a builtin type. In case it is a pointer to builtin type,
2648 // this will require additional handling in the method calling
2649 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
2650 // base types.
2651 if (TypeStr.ends_with("*"))
2652 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
2653
2654 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
2655 Ctx);
2656 }
2657
2658 // Parse type name in either "typeN" or "type vector[N]" format, where
2659 // N is the number of elements of the vector.
2660 Type *BaseType;
2661 unsigned VecElts = 0;
2662
2663 BaseType = parseBasicTypeName(TypeStr, Ctx);
2664 if (!BaseType)
2665 // Unable to recognize SPIRV type name.
2666 return nullptr;
2667
2668 // Handle "typeN*" or "type vector[N]*".
2669 TypeStr.consume_back("*");
2670
2671 if (TypeStr.consume_front(" vector["))
2672 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
2673
2674 TypeStr.getAsInteger(10, VecElts);
2675 if (VecElts > 0)
2677 BaseType->isVoidTy() ? Type::getInt8Ty(Ctx) : BaseType, VecElts, false);
2678
2679 return BaseType;
2680}
2681
2685};
2686
2687#define GET_BuiltinTypes_DECL
2688#define GET_BuiltinTypes_IMPL
2689
2693};
2694
2695#define GET_OpenCLTypes_DECL
2696#define GET_OpenCLTypes_IMPL
2697
2698#include "SPIRVGenTables.inc"
2699} // namespace SPIRV
2700
2701//===----------------------------------------------------------------------===//
2702// Misc functions for parsing builtin types.
2703//===----------------------------------------------------------------------===//
2704
2706 if (Name.starts_with("void"))
2707 return Type::getVoidTy(Context);
2708 else if (Name.starts_with("int") || Name.starts_with("uint"))
2709 return Type::getInt32Ty(Context);
2710 else if (Name.starts_with("float"))
2711 return Type::getFloatTy(Context);
2712 else if (Name.starts_with("half"))
2713 return Type::getHalfTy(Context);
2714 report_fatal_error("Unable to recognize type!");
2715}
2716
2717//===----------------------------------------------------------------------===//
2718// Implementation functions for builtin types.
2719//===----------------------------------------------------------------------===//
2720
2722 const SPIRV::BuiltinType *TypeRecord,
2723 MachineIRBuilder &MIRBuilder,
2724 SPIRVGlobalRegistry *GR) {
2725 unsigned Opcode = TypeRecord->Opcode;
2726 // Create or get an existing type from GlobalRegistry.
2727 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
2728}
2729
2731 SPIRVGlobalRegistry *GR) {
2732 // Create or get an existing type from GlobalRegistry.
2733 return GR->getOrCreateOpTypeSampler(MIRBuilder);
2734}
2735
2736static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
2737 MachineIRBuilder &MIRBuilder,
2738 SPIRVGlobalRegistry *GR) {
2739 assert(ExtensionType->getNumIntParameters() == 1 &&
2740 "Invalid number of parameters for SPIR-V pipe builtin!");
2741 // Create or get an existing type from GlobalRegistry.
2742 return GR->getOrCreateOpTypePipe(MIRBuilder,
2743 SPIRV::AccessQualifier::AccessQualifier(
2744 ExtensionType->getIntParameter(0)));
2745}
2746
2747static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
2748 MachineIRBuilder &MIRBuilder,
2749 SPIRVGlobalRegistry *GR) {
2750 assert(ExtensionType->getNumIntParameters() == 4 &&
2751 "Invalid number of parameters for SPIR-V coop matrices builtin!");
2752 assert(ExtensionType->getNumTypeParameters() == 1 &&
2753 "SPIR-V coop matrices builtin type must have a type parameter!");
2754 const SPIRVType *ElemType =
2755 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2756 // Create or get an existing type from GlobalRegistry.
2757 return GR->getOrCreateOpTypeCoopMatr(
2758 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
2759 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2760 ExtensionType->getIntParameter(3));
2761}
2762
2763static SPIRVType *
2764getImageType(const TargetExtType *ExtensionType,
2765 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2766 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
2767 assert(ExtensionType->getNumTypeParameters() == 1 &&
2768 "SPIR-V image builtin type must have sampled type parameter!");
2769 const SPIRVType *SampledType =
2770 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2771 assert(ExtensionType->getNumIntParameters() == 7 &&
2772 "Invalid number of parameters for SPIR-V image builtin!");
2773 // Create or get an existing type from GlobalRegistry.
2774 return GR->getOrCreateOpTypeImage(
2775 MIRBuilder, SampledType,
2776 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2777 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2778 ExtensionType->getIntParameter(3), ExtensionType->getIntParameter(4),
2779 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2780 Qualifier == SPIRV::AccessQualifier::WriteOnly
2781 ? SPIRV::AccessQualifier::WriteOnly
2782 : SPIRV::AccessQualifier::AccessQualifier(
2783 ExtensionType->getIntParameter(6)));
2784}
2785
2787 MachineIRBuilder &MIRBuilder,
2788 SPIRVGlobalRegistry *GR) {
2789 SPIRVType *OpaqueImageType = getImageType(
2790 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2791 // Create or get an existing type from GlobalRegistry.
2792 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
2793}
2794
2795namespace SPIRV {
2797 LLVMContext &Context) {
2798 StringRef NameWithParameters = TypeName;
2799
2800 // Pointers-to-opaque-structs representing OpenCL types are first translated
2801 // to equivalent SPIR-V types. OpenCL builtin type names should have the
2802 // following format: e.g. %opencl.event_t
2803 if (NameWithParameters.starts_with("opencl.")) {
2804 const SPIRV::OpenCLType *OCLTypeRecord =
2805 SPIRV::lookupOpenCLType(NameWithParameters);
2806 if (!OCLTypeRecord)
2807 report_fatal_error("Missing TableGen record for OpenCL type: " +
2808 NameWithParameters);
2809 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
2810 // Continue with the SPIR-V builtin type...
2811 }
2812
2813 // Names of the opaque structs representing a SPIR-V builtins without
2814 // parameters should have the following format: e.g. %spirv.Event
2815 assert(NameWithParameters.starts_with("spirv.") &&
2816 "Unknown builtin opaque type!");
2817
2818 // Parameterized SPIR-V builtins names follow this format:
2819 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
2820 if (!NameWithParameters.contains('_'))
2821 return TargetExtType::get(Context, NameWithParameters);
2822
2823 SmallVector<StringRef> Parameters;
2824 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
2825 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
2826
2827 SmallVector<Type *, 1> TypeParameters;
2828 bool HasTypeParameter = !isDigit(Parameters[0][0]);
2829 if (HasTypeParameter)
2830 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
2831 SmallVector<unsigned> IntParameters;
2832 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2833 unsigned IntParameter = 0;
2834 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2835 (void)ValidLiteral;
2836 assert(ValidLiteral &&
2837 "Invalid format of SPIR-V builtin parameter literal!");
2838 IntParameters.push_back(IntParameter);
2839 }
2840 return TargetExtType::get(Context,
2841 NameWithParameters.substr(0, BaseNameLength),
2842 TypeParameters, IntParameters);
2843}
2844
2846 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2847 MachineIRBuilder &MIRBuilder,
2848 SPIRVGlobalRegistry *GR) {
2849 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
2850 // target(...) target extension types or pointers-to-opaque-structs. The
2851 // approach relying on structs is deprecated and works only in the non-opaque
2852 // pointer mode (-opaque-pointers=0).
2853 // In order to maintain compatibility with LLVM IR generated by older versions
2854 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
2855 // "translated" to target extension types. This translation is temporary and
2856 // will be removed in the future release of LLVM.
2857 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
2858 if (!BuiltinType)
2860 OpaqueType->getStructName().str(), MIRBuilder.getContext());
2861
2862 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
2863
2864 const StringRef Name = BuiltinType->getName();
2865 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
2866
2867 // Lookup the demangled builtin type in the TableGen records.
2868 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2869 if (!TypeRecord)
2870 report_fatal_error("Missing TableGen record for builtin type: " + Name);
2871
2872 // "Lower" the BuiltinType into TargetType. The following get<...>Type methods
2873 // use the implementation details from TableGen records or TargetExtType
2874 // parameters to either create a new OpType<...> machine instruction or get an
2875 // existing equivalent SPIRVType from GlobalRegistry.
2876 SPIRVType *TargetType;
2877 switch (TypeRecord->Opcode) {
2878 case SPIRV::OpTypeImage:
2879 TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR);
2880 break;
2881 case SPIRV::OpTypePipe:
2882 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
2883 break;
2884 case SPIRV::OpTypeDeviceEvent:
2885 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
2886 break;
2887 case SPIRV::OpTypeSampler:
2888 TargetType = getSamplerType(MIRBuilder, GR);
2889 break;
2890 case SPIRV::OpTypeSampledImage:
2891 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
2892 break;
2893 case SPIRV::OpTypeCooperativeMatrixKHR:
2894 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
2895 break;
2896 default:
2897 TargetType =
2898 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
2899 break;
2900 }
2901
2902 // Emit OpName instruction if a new OpType<...> instruction was added
2903 // (equivalent type was not found in GlobalRegistry).
2904 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
2905 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
2906
2907 return TargetType;
2908}
2909} // namespace SPIRV
2910} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
return RetTy
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
APInt bitcastToAPInt() const
Definition: APFloat.h:1266
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:994
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1498
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:635
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
@ ICMP_NE
not equal
Definition: InstrTypes.h:779
const APFloat & getValueAPF() const
Definition: Constants.h:312
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
Tagged union holding either a T or a Error.
Definition: Error.h:481
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:380
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:266
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, MachineFunction &MF)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * assignTypeToVReg(const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr, bool EmitIR=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:685
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition: StringRef.h:640
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:455
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:215
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:556
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:250
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:421
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:669
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:409
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:620
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition: StringRef.h:362
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:282
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:262
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & EndsWith(StringLiteral S, T Value)
Definition: StringSwitch.h:76
Class to represent struct types.
Definition: DerivedTypes.h:216
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
Definition: Type.cpp:620
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:501
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:720
unsigned getNumIntParameters() const
Definition: DerivedTypes.h:765
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types=std::nullopt, ArrayRef< unsigned > Ints=std::nullopt)
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:784
Type * getTypeParameter(unsigned i) const
Definition: DerivedTypes.h:755
unsigned getNumTypeParameters() const
Definition: DerivedTypes.h:756
unsigned getIntParameter(unsigned i) const
Definition: DerivedTypes.h:764
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:139
LLVM Value Representation.
Definition: Value.h:74
Value(Type *Ty, unsigned scid)
Definition: Value.cpp:53
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:664
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition: Core.cpp:881
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
std::string lookupBuiltinNameHelper(StringRef DemangledCall)
Parses the name part of the demangled builtin call.
StorageClass
Definition: XCOFF.h:170
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:100
unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:166
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildConstantIntReg(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, unsigned BitWidth=32)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, bool hasLinkageTy=true)
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:80
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:273
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:218
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
Definition: SPIRVUtils.cpp:404
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:254
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool hasBuiltinTypePrefix(StringRef Name)
Definition: SPIRVUtils.cpp:376
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:285
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:279
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:281
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
BuiltIn::BuiltIn Value
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode