LLVM 19.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVSubtarget.h"
17#include "SPIRVUtils.h"
20#include "llvm/IR/IntrinsicsSPIRV.h"
21#include <string>
22#include <tuple>
23
24#define DEBUG_TYPE "spirv-builtins"
25
26namespace llvm {
27namespace SPIRV {
28#define GET_BuiltinGroup_DECL
29#include "SPIRVGenTables.inc"
30
33 InstructionSet::InstructionSet Set;
34 BuiltinGroup Group;
35 uint8_t MinNumArgs;
36 uint8_t MaxNumArgs;
37};
38
39#define GET_DemangledBuiltins_DECL
40#define GET_DemangledBuiltins_IMPL
41
43 const std::string BuiltinName;
45
49
56
57 bool isSpirvOp() const { return BuiltinName.rfind("__spirv_", 0) == 0; }
58};
59
62 InstructionSet::InstructionSet Set;
64};
65
66#define GET_NativeBuiltins_DECL
67#define GET_NativeBuiltins_IMPL
68
73 bool IsElect;
83};
84
85#define GET_GroupBuiltins_DECL
86#define GET_GroupBuiltins_IMPL
87
91 bool IsBlock;
92 bool IsWrite;
93};
94
95#define GET_IntelSubgroupsBuiltins_DECL
96#define GET_IntelSubgroupsBuiltins_IMPL
97
101};
102
103#define GET_AtomicFloatingBuiltins_DECL
104#define GET_AtomicFloatingBuiltins_IMPL
109};
110
111#define GET_GroupUniformBuiltins_DECL
112#define GET_GroupUniformBuiltins_IMPL
113
116 InstructionSet::InstructionSet Set;
117 BuiltIn::BuiltIn Value;
118};
119
120using namespace BuiltIn;
121#define GET_GetBuiltins_DECL
122#define GET_GetBuiltins_IMPL
123
126 InstructionSet::InstructionSet Set;
128};
129
130#define GET_ImageQueryBuiltins_DECL
131#define GET_ImageQueryBuiltins_IMPL
132
135 InstructionSet::InstructionSet Set;
140 FPRoundingMode::FPRoundingMode RoundingMode;
141};
142
145 InstructionSet::InstructionSet Set;
149 FPRoundingMode::FPRoundingMode RoundingMode;
150};
151
152using namespace FPRoundingMode;
153#define GET_ConvertBuiltins_DECL
154#define GET_ConvertBuiltins_IMPL
155
156using namespace InstructionSet;
157#define GET_VectorLoadStoreBuiltins_DECL
158#define GET_VectorLoadStoreBuiltins_IMPL
159
160#define GET_CLMemoryScope_DECL
161#define GET_CLSamplerAddressingMode_DECL
162#define GET_CLMemoryFenceFlags_DECL
163#define GET_ExtendedBuiltins_DECL
164#include "SPIRVGenTables.inc"
165} // namespace SPIRV
166
167//===----------------------------------------------------------------------===//
168// Misc functions for looking up builtins and veryfying requirements using
169// TableGen records
170//===----------------------------------------------------------------------===//
171
172/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
173/// the provided \p DemangledCall and specified \p Set.
174///
175/// The lookup follows the following algorithm, returning the first successful
176/// match:
177/// 1. Search with the plain demangled name (expecting a 1:1 match).
178/// 2. Search with the prefix before or suffix after the demangled name
179/// signyfying the type of the first argument.
180///
181/// \returns Wrapper around the demangled call and found builtin definition.
182static std::unique_ptr<const SPIRV::IncomingCall>
184 SPIRV::InstructionSet::InstructionSet Set,
185 Register ReturnRegister, const SPIRVType *ReturnType,
187 // Extract the builtin function name and types of arguments from the call
188 // skeleton.
189 std::string BuiltinName =
190 DemangledCall.substr(0, DemangledCall.find('(')).str();
191
192 // Account for possible "__spirv_ocl_" prefix in SPIR-V friendly LLVM IR
193 if (BuiltinName.rfind("__spirv_ocl_", 0) == 0)
194 BuiltinName = BuiltinName.substr(12);
195
196 // Check if the extracted name contains type information between angle
197 // brackets. If so, the builtin is an instantiated template - needs to have
198 // the information after angle brackets and return type removed.
199 if (BuiltinName.find('<') && BuiltinName.back() == '>') {
200 BuiltinName = BuiltinName.substr(0, BuiltinName.find('<'));
201 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(' ') + 1);
202 }
203
204 // Check if the extracted name begins with "__spirv_ImageSampleExplicitLod"
205 // contains return type information at the end "_R<type>", if so extract the
206 // plain builtin name without the type information.
207 if (StringRef(BuiltinName).contains("__spirv_ImageSampleExplicitLod") &&
208 StringRef(BuiltinName).contains("_R")) {
209 BuiltinName = BuiltinName.substr(0, BuiltinName.find("_R"));
210 }
211
212 SmallVector<StringRef, 10> BuiltinArgumentTypes;
213 StringRef BuiltinArgs =
214 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
215 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
216
217 // Look up the builtin in the defined set. Start with the plain demangled
218 // name, expecting a 1:1 match in the defined builtin set.
219 const SPIRV::DemangledBuiltin *Builtin;
220 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
221 return std::make_unique<SPIRV::IncomingCall>(
222 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
223
224 // If the initial look up was unsuccessful and the demangled call takes at
225 // least 1 argument, add a prefix or suffix signifying the type of the first
226 // argument and repeat the search.
227 if (BuiltinArgumentTypes.size() >= 1) {
228 char FirstArgumentType = BuiltinArgumentTypes[0][0];
229 // Prefix to be added to the builtin's name for lookup.
230 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
231 std::string Prefix;
232
233 switch (FirstArgumentType) {
234 // Unsigned:
235 case 'u':
236 if (Set == SPIRV::InstructionSet::OpenCL_std)
237 Prefix = "u_";
238 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
239 Prefix = "u";
240 break;
241 // Signed:
242 case 'c':
243 case 's':
244 case 'i':
245 case 'l':
246 if (Set == SPIRV::InstructionSet::OpenCL_std)
247 Prefix = "s_";
248 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
249 Prefix = "s";
250 break;
251 // Floating-point:
252 case 'f':
253 case 'd':
254 case 'h':
255 if (Set == SPIRV::InstructionSet::OpenCL_std ||
256 Set == SPIRV::InstructionSet::GLSL_std_450)
257 Prefix = "f";
258 break;
259 }
260
261 // If argument-type name prefix was added, look up the builtin again.
262 if (!Prefix.empty() &&
263 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
264 return std::make_unique<SPIRV::IncomingCall>(
265 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
266
267 // If lookup with a prefix failed, find a suffix to be added to the
268 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
269 // an unsigned value has a suffix "u".
270 std::string Suffix;
271
272 switch (FirstArgumentType) {
273 // Unsigned:
274 case 'u':
275 Suffix = "u";
276 break;
277 // Signed:
278 case 'c':
279 case 's':
280 case 'i':
281 case 'l':
282 Suffix = "s";
283 break;
284 // Floating-point:
285 case 'f':
286 case 'd':
287 case 'h':
288 Suffix = "f";
289 break;
290 }
291
292 // If argument-type name suffix was added, look up the builtin again.
293 if (!Suffix.empty() &&
294 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
295 return std::make_unique<SPIRV::IncomingCall>(
296 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
297 }
298
299 // No builtin with such name was found in the set.
300 return nullptr;
301}
302
305 // We expect the following sequence of instructions:
306 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
307 // or = G_GLOBAL_VALUE @block_literal_global
308 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
309 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
310 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
311 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
312 MI->getOperand(1).isReg());
313 Register BitcastReg = MI->getOperand(1).getReg();
314 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
315 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
316 BitcastMI->getOperand(2).isReg());
317 Register ValueReg = BitcastMI->getOperand(2).getReg();
318 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
319 return ValueMI;
320}
321
322// Return an integer constant corresponding to the given register and
323// defined in spv_track_constant.
324// TODO: maybe unify with prelegalizer pass.
326 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
327 assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
328 DefMI->getOperand(2).isReg());
329 MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
330 assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
331 DefMI2->getOperand(1).isCImm());
332 return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
333}
334
335// Return type of the instruction result from spv_assign_type intrinsic.
336// TODO: maybe unify with prelegalizer pass.
338 MachineInstr *NextMI = MI->getNextNode();
339 if (!NextMI)
340 return nullptr;
341 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
342 if ((NextMI = NextMI->getNextNode()) == nullptr)
343 return nullptr;
344 Register ValueReg = MI->getOperand(0).getReg();
345 if ((!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) &&
346 !isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_ptr_type)) ||
347 NextMI->getOperand(1).getReg() != ValueReg)
348 return nullptr;
349 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
350 assert(Ty && "Type is expected");
351 return Ty;
352}
353
354static const Type *getBlockStructType(Register ParamReg,
356 // In principle, this information should be passed to us from Clang via
357 // an elementtype attribute. However, said attribute requires that
358 // the function call be an intrinsic, which is not. Instead, we rely on being
359 // able to trace this to the declaration of a variable: OpenCL C specification
360 // section 6.12.5 should guarantee that we can do this.
362 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
363 return MI->getOperand(1).getGlobal()->getType();
364 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
365 "Blocks in OpenCL C must be traceable to allocation site");
366 return getMachineInstrType(MI);
367}
368
369//===----------------------------------------------------------------------===//
370// Helper functions for building misc instructions
371//===----------------------------------------------------------------------===//
372
373/// Helper function building either a resulting scalar or vector bool register
374/// depending on the expected \p ResultType.
375///
376/// \returns Tuple of the resulting register and its type.
377static std::tuple<Register, SPIRVType *>
378buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
380 LLT Type;
381 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
382
383 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
384 unsigned VectorElements = ResultType->getOperand(2).getImm();
385 BoolType =
386 GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder);
388 cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
389 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
390 } else {
391 Type = LLT::scalar(1);
392 }
393
394 Register ResultRegister =
396 MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::IDRegClass);
397 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
398 return std::make_tuple(ResultRegister, BoolType);
399}
400
401/// Helper function for building either a vector or scalar select instruction
402/// depending on the expected \p ResultType.
403static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
404 Register ReturnRegister, Register SourceRegister,
405 const SPIRVType *ReturnType,
407 Register TrueConst, FalseConst;
408
409 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
410 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
412 TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType);
413 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType);
414 } else {
415 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType);
416 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType);
417 }
418 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
419 FalseConst);
420}
421
422/// Helper function for building a load instruction loading into the
423/// \p DestinationReg.
425 MachineIRBuilder &MIRBuilder,
426 SPIRVGlobalRegistry *GR, LLT LowLevelType,
427 Register DestinationReg = Register(0)) {
428 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
429 if (!DestinationReg.isValid()) {
430 DestinationReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
431 MRI->setType(DestinationReg, LLT::scalar(32));
432 GR->assignSPIRVTypeToVReg(BaseType, DestinationReg, MIRBuilder.getMF());
433 }
434 // TODO: consider using correct address space and alignment (p0 is canonical
435 // type for selection though).
437 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
438 return DestinationReg;
439}
440
441/// Helper function for building a load instruction for loading a builtin global
442/// variable of \p BuiltinValue value.
444 MachineIRBuilder &MIRBuilder, SPIRVType *VariableType,
445 SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType,
446 Register Reg = Register(0), bool isConst = true, bool hasLinkageTy = true) {
447 Register NewRegister =
448 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
449 MIRBuilder.getMRI()->setType(NewRegister,
450 LLT::pointer(0, GR->getPointerSize()));
452 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
453 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
454
455 // Set up the global OpVariable with the necessary builtin decorations.
456 Register Variable = GR->buildGlobalVariable(
457 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
458 SPIRV::StorageClass::Input, nullptr, /* isConst= */ isConst,
459 /* HasLinkageTy */ hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
460 false);
461
462 // Load the value from the global variable.
463 Register LoadedRegister =
464 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
465 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
466 return LoadedRegister;
467}
468
469/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
470/// and its definition, set the new register as a destination of the definition,
471/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
472/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
473/// SPIRVPreLegalizer.cpp.
474extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
475 SPIRVGlobalRegistry *GR,
476 MachineIRBuilder &MIB,
477 MachineRegisterInfo &MRI);
478
479// TODO: Move to TableGen.
480static SPIRV::MemorySemantics::MemorySemantics
481getSPIRVMemSemantics(std::memory_order MemOrder) {
482 switch (MemOrder) {
483 case std::memory_order::memory_order_relaxed:
484 return SPIRV::MemorySemantics::None;
485 case std::memory_order::memory_order_acquire:
486 return SPIRV::MemorySemantics::Acquire;
487 case std::memory_order::memory_order_release:
488 return SPIRV::MemorySemantics::Release;
489 case std::memory_order::memory_order_acq_rel:
490 return SPIRV::MemorySemantics::AcquireRelease;
491 case std::memory_order::memory_order_seq_cst:
492 return SPIRV::MemorySemantics::SequentiallyConsistent;
493 default:
494 report_fatal_error("Unknown CL memory scope");
495 }
496}
497
498static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
499 switch (ClScope) {
500 case SPIRV::CLMemoryScope::memory_scope_work_item:
501 return SPIRV::Scope::Invocation;
502 case SPIRV::CLMemoryScope::memory_scope_work_group:
503 return SPIRV::Scope::Workgroup;
504 case SPIRV::CLMemoryScope::memory_scope_device:
505 return SPIRV::Scope::Device;
506 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
507 return SPIRV::Scope::CrossDevice;
508 case SPIRV::CLMemoryScope::memory_scope_sub_group:
509 return SPIRV::Scope::Subgroup;
510 }
511 report_fatal_error("Unknown CL memory scope");
512}
513
516 unsigned BitWidth = 32) {
517 SPIRVType *IntType = GR->getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder);
518 return GR->buildConstantInt(Val, MIRBuilder, IntType);
519}
520
521static Register buildScopeReg(Register CLScopeRegister,
522 SPIRV::Scope::Scope Scope,
523 MachineIRBuilder &MIRBuilder,
526 if (CLScopeRegister.isValid()) {
527 auto CLScope =
528 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
529 Scope = getSPIRVScope(CLScope);
530
531 if (CLScope == static_cast<unsigned>(Scope)) {
532 MRI->setRegClass(CLScopeRegister, &SPIRV::IDRegClass);
533 return CLScopeRegister;
534 }
535 }
536 return buildConstantIntReg(Scope, MIRBuilder, GR);
537}
538
539static Register buildMemSemanticsReg(Register SemanticsRegister,
540 Register PtrRegister, unsigned &Semantics,
541 MachineIRBuilder &MIRBuilder,
543 if (SemanticsRegister.isValid()) {
544 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
545 std::memory_order Order =
546 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
547 Semantics =
548 getSPIRVMemSemantics(Order) |
550
551 if (Order == Semantics) {
552 MRI->setRegClass(SemanticsRegister, &SPIRV::IDRegClass);
553 return SemanticsRegister;
554 }
555 }
556 return buildConstantIntReg(Semantics, MIRBuilder, GR);
557}
558
559static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode,
560 const SPIRV::IncomingCall *Call,
561 Register TypeReg,
562 ArrayRef<uint32_t> ImmArgs = {}) {
563 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
564 auto MIB = MIRBuilder.buildInstr(Opcode);
565 if (TypeReg.isValid())
566 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
567 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
568 for (unsigned i = 0; i < Sz; ++i) {
569 Register ArgReg = Call->Arguments[i];
570 if (!MRI->getRegClassOrNull(ArgReg))
571 MRI->setRegClass(ArgReg, &SPIRV::IDRegClass);
572 MIB.addUse(ArgReg);
573 }
574 for (uint32_t ImmArg : ImmArgs)
575 MIB.addImm(ImmArg);
576 return true;
577}
578
579/// Helper function for translating atomic init to OpStore.
581 MachineIRBuilder &MIRBuilder) {
582 if (Call->isSpirvOp())
583 return buildOpFromWrapper(MIRBuilder, SPIRV::OpStore, Call, Register(0));
584
585 assert(Call->Arguments.size() == 2 &&
586 "Need 2 arguments for atomic init translation");
587 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
588 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
589 MIRBuilder.buildInstr(SPIRV::OpStore)
590 .addUse(Call->Arguments[0])
591 .addUse(Call->Arguments[1]);
592 return true;
593}
594
595/// Helper function for building an atomic load instruction.
597 MachineIRBuilder &MIRBuilder,
599 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
600 if (Call->isSpirvOp())
601 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicLoad, Call, TypeReg);
602
603 Register PtrRegister = Call->Arguments[0];
604 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
605 // TODO: if true insert call to __translate_ocl_memory_sccope before
606 // OpAtomicLoad and the function implementation. We can use Translator's
607 // output for transcoding/atomic_explicit_arguments.cl as an example.
608 Register ScopeRegister;
609 if (Call->Arguments.size() > 1) {
610 ScopeRegister = Call->Arguments[1];
611 MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::IDRegClass);
612 } else
613 ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
614
615 Register MemSemanticsReg;
616 if (Call->Arguments.size() > 2) {
617 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
618 MemSemanticsReg = Call->Arguments[2];
619 MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
620 } else {
621 int Semantics =
622 SPIRV::MemorySemantics::SequentiallyConsistent |
624 MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
625 }
626
627 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
628 .addDef(Call->ReturnRegister)
629 .addUse(TypeReg)
630 .addUse(PtrRegister)
631 .addUse(ScopeRegister)
632 .addUse(MemSemanticsReg);
633 return true;
634}
635
636/// Helper function for building an atomic store instruction.
638 MachineIRBuilder &MIRBuilder,
640 if (Call->isSpirvOp())
641 return buildOpFromWrapper(MIRBuilder, SPIRV::OpAtomicStore, Call, Register(0));
642
643 Register ScopeRegister =
644 buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
645 Register PtrRegister = Call->Arguments[0];
646 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
647 int Semantics =
648 SPIRV::MemorySemantics::SequentiallyConsistent |
650 Register MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
651 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
652 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
653 .addUse(PtrRegister)
654 .addUse(ScopeRegister)
655 .addUse(MemSemanticsReg)
656 .addUse(Call->Arguments[1]);
657 return true;
658}
659
660/// Helper function for building an atomic compare-exchange instruction.
662 const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin,
663 unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
664 if (Call->isSpirvOp())
665 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
666 GR->getSPIRVTypeID(Call->ReturnType));
667
668 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
669 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
670
671 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
672 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
673 Register Desired = Call->Arguments[2]; // Value (C Desired).
674 MRI->setRegClass(ObjectPtr, &SPIRV::IDRegClass);
675 MRI->setRegClass(ExpectedArg, &SPIRV::IDRegClass);
676 MRI->setRegClass(Desired, &SPIRV::IDRegClass);
677 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
678 LLT DesiredLLT = MRI->getType(Desired);
679
680 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
681 SPIRV::OpTypePointer);
682 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
683 (void)ExpectedType;
684 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
685 : ExpectedType == SPIRV::OpTypePointer);
686 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
687
688 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
689 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
690 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
691 SpvObjectPtrTy->getOperand(1).getImm());
692 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
693
694 Register MemSemEqualReg;
695 Register MemSemUnequalReg;
696 uint64_t MemSemEqual =
697 IsCmpxchg
698 ? SPIRV::MemorySemantics::None
699 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
700 uint64_t MemSemUnequal =
701 IsCmpxchg
702 ? SPIRV::MemorySemantics::None
703 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
704 if (Call->Arguments.size() >= 4) {
705 assert(Call->Arguments.size() >= 5 &&
706 "Need 5+ args for explicit atomic cmpxchg");
707 auto MemOrdEq =
708 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
709 auto MemOrdNeq =
710 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
711 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
712 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
713 if (MemOrdEq == MemSemEqual)
714 MemSemEqualReg = Call->Arguments[3];
715 if (MemOrdNeq == MemSemEqual)
716 MemSemUnequalReg = Call->Arguments[4];
717 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
718 MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass);
719 }
720 if (!MemSemEqualReg.isValid())
721 MemSemEqualReg = buildConstantIntReg(MemSemEqual, MIRBuilder, GR);
722 if (!MemSemUnequalReg.isValid())
723 MemSemUnequalReg = buildConstantIntReg(MemSemUnequal, MIRBuilder, GR);
724
725 Register ScopeReg;
726 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
727 if (Call->Arguments.size() >= 6) {
728 assert(Call->Arguments.size() == 6 &&
729 "Extra args for explicit atomic cmpxchg");
730 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
731 getIConstVal(Call->Arguments[5], MRI));
732 Scope = getSPIRVScope(ClScope);
733 if (ClScope == static_cast<unsigned>(Scope))
734 ScopeReg = Call->Arguments[5];
735 MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass);
736 }
737 if (!ScopeReg.isValid())
738 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
739
740 Register Expected = IsCmpxchg
741 ? ExpectedArg
742 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
743 GR, LLT::scalar(32));
744 MRI->setType(Expected, DesiredLLT);
745 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
746 : Call->ReturnRegister;
747 if (!MRI->getRegClassOrNull(Tmp))
748 MRI->setRegClass(Tmp, &SPIRV::IDRegClass);
749 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
750
751 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
752 MIRBuilder.buildInstr(Opcode)
753 .addDef(Tmp)
754 .addUse(GR->getSPIRVTypeID(IntTy))
755 .addUse(ObjectPtr)
756 .addUse(ScopeReg)
757 .addUse(MemSemEqualReg)
758 .addUse(MemSemUnequalReg)
759 .addUse(Desired)
761 if (!IsCmpxchg) {
762 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
763 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
764 }
765 return true;
766}
767
768/// Helper function for building atomic instructions.
769static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
770 MachineIRBuilder &MIRBuilder,
772 if (Call->isSpirvOp())
773 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
774 GR->getSPIRVTypeID(Call->ReturnType));
775
776 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
777 Register ScopeRegister =
778 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
779
780 assert(Call->Arguments.size() <= 4 &&
781 "Too many args for explicit atomic RMW");
782 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
783 MIRBuilder, GR, MRI);
784
785 Register PtrRegister = Call->Arguments[0];
786 unsigned Semantics = SPIRV::MemorySemantics::None;
787 MRI->setRegClass(PtrRegister, &SPIRV::IDRegClass);
788 Register MemSemanticsReg =
789 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
790 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
791 Semantics, MIRBuilder, GR);
792 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
793 Register ValueReg = Call->Arguments[1];
794 Register ValueTypeReg = GR->getSPIRVTypeID(Call->ReturnType);
795 // support cl_ext_float_atomics
796 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
797 if (Opcode == SPIRV::OpAtomicIAdd) {
798 Opcode = SPIRV::OpAtomicFAddEXT;
799 } else if (Opcode == SPIRV::OpAtomicISub) {
800 // Translate OpAtomicISub applied to a floating type argument to
801 // OpAtomicFAddEXT with the negative value operand
802 Opcode = SPIRV::OpAtomicFAddEXT;
803 Register NegValueReg =
804 MRI->createGenericVirtualRegister(MRI->getType(ValueReg));
805 MRI->setRegClass(NegValueReg, &SPIRV::IDRegClass);
806 GR->assignSPIRVTypeToVReg(Call->ReturnType, NegValueReg,
807 MIRBuilder.getMF());
808 MIRBuilder.buildInstr(TargetOpcode::G_FNEG)
809 .addDef(NegValueReg)
810 .addUse(ValueReg);
811 insertAssignInstr(NegValueReg, nullptr, Call->ReturnType, GR, MIRBuilder,
812 MIRBuilder.getMF().getRegInfo());
813 ValueReg = NegValueReg;
814 }
815 }
816 MIRBuilder.buildInstr(Opcode)
817 .addDef(Call->ReturnRegister)
818 .addUse(ValueTypeReg)
819 .addUse(PtrRegister)
820 .addUse(ScopeRegister)
821 .addUse(MemSemanticsReg)
822 .addUse(ValueReg);
823 return true;
824}
825
826/// Helper function for building an atomic floating-type instruction.
828 unsigned Opcode,
829 MachineIRBuilder &MIRBuilder,
831 assert(Call->Arguments.size() == 4 &&
832 "Wrong number of atomic floating-type builtin");
833
834 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
835
836 Register PtrReg = Call->Arguments[0];
837 MRI->setRegClass(PtrReg, &SPIRV::IDRegClass);
838
839 Register ScopeReg = Call->Arguments[1];
840 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
841
842 Register MemSemanticsReg = Call->Arguments[2];
843 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
844
845 Register ValueReg = Call->Arguments[3];
846 MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
847
848 MIRBuilder.buildInstr(Opcode)
849 .addDef(Call->ReturnRegister)
850 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
851 .addUse(PtrReg)
852 .addUse(ScopeReg)
853 .addUse(MemSemanticsReg)
854 .addUse(ValueReg);
855 return true;
856}
857
858/// Helper function for building atomic flag instructions (e.g.
859/// OpAtomicFlagTestAndSet).
861 unsigned Opcode, MachineIRBuilder &MIRBuilder,
863 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
864 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
865 if (Call->isSpirvOp())
866 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
867 IsSet ? TypeReg : Register(0));
868
869 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
870 Register PtrRegister = Call->Arguments[0];
871 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
872 Register MemSemanticsReg =
873 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
874 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
875 Semantics, MIRBuilder, GR);
876
877 assert((Opcode != SPIRV::OpAtomicFlagClear ||
878 (Semantics != SPIRV::MemorySemantics::Acquire &&
879 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
880 "Invalid memory order argument!");
881
882 Register ScopeRegister =
883 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
884 ScopeRegister =
885 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
886
887 auto MIB = MIRBuilder.buildInstr(Opcode);
888 if (IsSet)
889 MIB.addDef(Call->ReturnRegister).addUse(TypeReg);
890
891 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
892 return true;
893}
894
895/// Helper function for building barriers, i.e., memory/control ordering
896/// operations.
897static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
898 MachineIRBuilder &MIRBuilder,
900 if (Call->isSpirvOp())
901 return buildOpFromWrapper(MIRBuilder, Opcode, Call, Register(0));
902
903 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
904 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
905 unsigned MemSemantics = SPIRV::MemorySemantics::None;
906
907 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
908 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
909
910 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
911 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
912
913 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
914 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
915
916 if (Opcode == SPIRV::OpMemoryBarrier) {
917 std::memory_order MemOrder =
918 static_cast<std::memory_order>(getIConstVal(Call->Arguments[1], MRI));
919 MemSemantics = getSPIRVMemSemantics(MemOrder) | MemSemantics;
920 } else {
921 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
922 }
923
924 Register MemSemanticsReg;
925 if (MemFlags == MemSemantics) {
926 MemSemanticsReg = Call->Arguments[0];
927 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
928 } else
929 MemSemanticsReg = buildConstantIntReg(MemSemantics, MIRBuilder, GR);
930
931 Register ScopeReg;
932 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
933 SPIRV::Scope::Scope MemScope = Scope;
934 if (Call->Arguments.size() >= 2) {
935 assert(
936 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
937 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
938 "Extra args for explicitly scoped barrier");
939 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
940 : Call->Arguments[1];
941 SPIRV::CLMemoryScope CLScope =
942 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
943 MemScope = getSPIRVScope(CLScope);
944 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
945 (Opcode == SPIRV::OpMemoryBarrier))
946 Scope = MemScope;
947
948 if (CLScope == static_cast<unsigned>(Scope)) {
949 ScopeReg = Call->Arguments[1];
950 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
951 }
952 }
953
954 if (!ScopeReg.isValid())
955 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
956
957 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
958 if (Opcode != SPIRV::OpMemoryBarrier)
959 MIB.addUse(buildConstantIntReg(MemScope, MIRBuilder, GR));
960 MIB.addUse(MemSemanticsReg);
961 return true;
962}
963
964static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
965 switch (dim) {
966 case SPIRV::Dim::DIM_1D:
967 case SPIRV::Dim::DIM_Buffer:
968 return 1;
969 case SPIRV::Dim::DIM_2D:
970 case SPIRV::Dim::DIM_Cube:
971 case SPIRV::Dim::DIM_Rect:
972 return 2;
973 case SPIRV::Dim::DIM_3D:
974 return 3;
975 default:
976 report_fatal_error("Cannot get num components for given Dim");
977 }
978}
979
980/// Helper function for obtaining the number of size components.
981static unsigned getNumSizeComponents(SPIRVType *imgType) {
982 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
983 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
984 unsigned numComps = getNumComponentsForDim(dim);
985 bool arrayed = imgType->getOperand(4).getImm() == 1;
986 return arrayed ? numComps + 1 : numComps;
987}
988
989//===----------------------------------------------------------------------===//
990// Implementation functions for each builtin group
991//===----------------------------------------------------------------------===//
992
993static bool generateExtInst(const SPIRV::IncomingCall *Call,
994 MachineIRBuilder &MIRBuilder,
996 // Lookup the extended instruction number in the TableGen records.
997 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
999 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
1000
1001 // Build extended instruction.
1002 auto MIB =
1003 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1004 .addDef(Call->ReturnRegister)
1005 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1006 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1007 .addImm(Number);
1008
1009 for (auto Argument : Call->Arguments)
1010 MIB.addUse(Argument);
1011 return true;
1012}
1013
1015 MachineIRBuilder &MIRBuilder,
1016 SPIRVGlobalRegistry *GR) {
1017 // Lookup the instruction opcode in the TableGen records.
1018 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1019 unsigned Opcode =
1020 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1021
1022 Register CompareRegister;
1023 SPIRVType *RelationType;
1024 std::tie(CompareRegister, RelationType) =
1025 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1026
1027 // Build relational instruction.
1028 auto MIB = MIRBuilder.buildInstr(Opcode)
1029 .addDef(CompareRegister)
1030 .addUse(GR->getSPIRVTypeID(RelationType));
1031
1032 for (auto Argument : Call->Arguments)
1033 MIB.addUse(Argument);
1034
1035 // Build select instruction.
1036 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1037 Call->ReturnType, GR);
1038}
1039
1041 MachineIRBuilder &MIRBuilder,
1042 SPIRVGlobalRegistry *GR) {
1043 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1044 const SPIRV::GroupBuiltin *GroupBuiltin =
1045 SPIRV::lookupGroupBuiltin(Builtin->Name);
1046
1047 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1048 if (Call->isSpirvOp()) {
1049 if (GroupBuiltin->NoGroupOperation)
1050 return buildOpFromWrapper(MIRBuilder, GroupBuiltin->Opcode, Call,
1051 GR->getSPIRVTypeID(Call->ReturnType));
1052
1053 // Group Operation is a literal
1054 Register GroupOpReg = Call->Arguments[1];
1055 const MachineInstr *MI = getDefInstrMaybeConstant(GroupOpReg, MRI);
1056 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT)
1058 "Group Operation parameter must be an integer constant");
1059 uint64_t GrpOp = MI->getOperand(1).getCImm()->getValue().getZExtValue();
1060 Register ScopeReg = Call->Arguments[0];
1061 if (!MRI->getRegClassOrNull(ScopeReg))
1062 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
1063 Register ValueReg = Call->Arguments[2];
1064 if (!MRI->getRegClassOrNull(ValueReg))
1065 MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
1066 MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1067 .addDef(Call->ReturnRegister)
1068 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1069 .addUse(ScopeReg)
1070 .addImm(GrpOp)
1071 .addUse(ValueReg);
1072 return true;
1073 }
1074
1075 Register Arg0;
1076 if (GroupBuiltin->HasBoolArg) {
1077 Register ConstRegister = Call->Arguments[0];
1078 auto ArgInstruction = getDefInstrMaybeConstant(ConstRegister, MRI);
1079 (void)ArgInstruction;
1080 // TODO: support non-constant bool values.
1081 assert(ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT &&
1082 "Only constant bool value args are supported");
1083 if (GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() !=
1084 SPIRV::OpTypeBool)
1085 Arg0 = GR->buildConstantInt(getIConstVal(ConstRegister, MRI), MIRBuilder,
1086 GR->getOrCreateSPIRVBoolType(MIRBuilder));
1087 }
1088
1089 Register GroupResultRegister = Call->ReturnRegister;
1090 SPIRVType *GroupResultType = Call->ReturnType;
1091
1092 // TODO: maybe we need to check whether the result type is already boolean
1093 // and in this case do not insert select instruction.
1094 const bool HasBoolReturnTy =
1095 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
1096 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
1097 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
1098
1099 if (HasBoolReturnTy)
1100 std::tie(GroupResultRegister, GroupResultType) =
1101 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
1102
1103 auto Scope = Builtin->Name.starts_with("sub_group") ? SPIRV::Scope::Subgroup
1104 : SPIRV::Scope::Workgroup;
1105 Register ScopeRegister = buildConstantIntReg(Scope, MIRBuilder, GR);
1106
1107 // Build work/sub group instruction.
1108 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
1109 .addDef(GroupResultRegister)
1110 .addUse(GR->getSPIRVTypeID(GroupResultType))
1111 .addUse(ScopeRegister);
1112
1113 if (!GroupBuiltin->NoGroupOperation)
1114 MIB.addImm(GroupBuiltin->GroupOperation);
1115 if (Call->Arguments.size() > 0) {
1116 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
1117 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1118 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
1119 MIB.addUse(Call->Arguments[i]);
1120 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
1121 }
1122 }
1123
1124 // Build select instruction.
1125 if (HasBoolReturnTy)
1126 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1127 Call->ReturnType, GR);
1128 return true;
1129}
1130
1132 MachineIRBuilder &MIRBuilder,
1133 SPIRVGlobalRegistry *GR) {
1134 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1135 MachineFunction &MF = MIRBuilder.getMF();
1136 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1137 if (!ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1138 std::string DiagMsg = std::string(Builtin->Name) +
1139 ": the builtin requires the following SPIR-V "
1140 "extension: SPV_INTEL_subgroups";
1141 report_fatal_error(DiagMsg.c_str(), false);
1142 }
1143 const SPIRV::IntelSubgroupsBuiltin *IntelSubgroups =
1144 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->Name);
1145
1146 uint32_t OpCode = IntelSubgroups->Opcode;
1147 if (Call->isSpirvOp()) {
1148 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1149 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL;
1150 return buildOpFromWrapper(MIRBuilder, OpCode, Call,
1151 IsSet ? GR->getSPIRVTypeID(Call->ReturnType)
1152 : Register(0));
1153 }
1154
1155 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1156 if (IntelSubgroups->IsBlock) {
1157 // Minimal number or arguments set in TableGen records is 1
1158 if (SPIRVType *Arg0Type = GR->getSPIRVTypeForVReg(Call->Arguments[0])) {
1159 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1160 // TODO: add required validation from the specification:
1161 // "'Image' must be an object whose type is OpTypeImage with a 'Sampled'
1162 // operand of 0 or 2. If the 'Sampled' operand is 2, then some
1163 // dimensions require a capability."
1164 switch (OpCode) {
1165 case SPIRV::OpSubgroupBlockReadINTEL:
1166 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1167 break;
1168 case SPIRV::OpSubgroupBlockWriteINTEL:
1169 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1170 break;
1171 }
1172 }
1173 }
1174 }
1175
1176 // TODO: opaque pointers types should be eventually resolved in such a way
1177 // that validation of block read is enabled with respect to the following
1178 // specification requirement:
1179 // "'Result Type' may be a scalar or vector type, and its component type must
1180 // be equal to the type pointed to by 'Ptr'."
1181 // For example, function parameter type should not be default i8 pointer, but
1182 // depend on the result type of the instruction where it is used as a pointer
1183 // argument of OpSubgroupBlockReadINTEL
1184
1185 // Build Intel subgroups instruction
1187 IntelSubgroups->IsWrite
1188 ? MIRBuilder.buildInstr(OpCode)
1189 : MIRBuilder.buildInstr(OpCode)
1190 .addDef(Call->ReturnRegister)
1191 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1192 for (size_t i = 0; i < Call->Arguments.size(); ++i) {
1193 MIB.addUse(Call->Arguments[i]);
1194 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
1195 }
1196
1197 return true;
1198}
1199
1201 MachineIRBuilder &MIRBuilder,
1202 SPIRVGlobalRegistry *GR) {
1203 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1204 MachineFunction &MF = MIRBuilder.getMF();
1205 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1206 if (!ST->canUseExtension(
1207 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1208 std::string DiagMsg = std::string(Builtin->Name) +
1209 ": the builtin requires the following SPIR-V "
1210 "extension: SPV_KHR_uniform_group_instructions";
1211 report_fatal_error(DiagMsg.c_str(), false);
1212 }
1213 const SPIRV::GroupUniformBuiltin *GroupUniform =
1214 SPIRV::lookupGroupUniformBuiltin(Builtin->Name);
1215 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1216
1217 Register GroupResultReg = Call->ReturnRegister;
1218 MRI->setRegClass(GroupResultReg, &SPIRV::IDRegClass);
1219
1220 // Scope
1221 Register ScopeReg = Call->Arguments[0];
1222 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
1223
1224 // Group Operation
1225 Register ConstGroupOpReg = Call->Arguments[1];
1226 const MachineInstr *Const = getDefInstrMaybeConstant(ConstGroupOpReg, MRI);
1227 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1229 "expect a constant group operation for a uniform group instruction",
1230 false);
1231 const MachineOperand &ConstOperand = Const->getOperand(1);
1232 if (!ConstOperand.isCImm())
1233 report_fatal_error("uniform group instructions: group operation must be an "
1234 "integer constant",
1235 false);
1236
1237 // Value
1238 Register ValueReg = Call->Arguments[2];
1239 MRI->setRegClass(ValueReg, &SPIRV::IDRegClass);
1240
1241 auto MIB = MIRBuilder.buildInstr(GroupUniform->Opcode)
1242 .addDef(GroupResultReg)
1243 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1244 .addUse(ScopeReg);
1245 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1246 MIB.addUse(ValueReg);
1247
1248 return true;
1249}
1250
1252 MachineIRBuilder &MIRBuilder,
1253 SPIRVGlobalRegistry *GR) {
1254 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1255 MachineFunction &MF = MIRBuilder.getMF();
1256 const auto *ST = static_cast<const SPIRVSubtarget *>(&MF.getSubtarget());
1257 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1258 std::string DiagMsg = std::string(Builtin->Name) +
1259 ": the builtin requires the following SPIR-V "
1260 "extension: SPV_KHR_shader_clock";
1261 report_fatal_error(DiagMsg.c_str(), false);
1262 }
1263
1264 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1265 Register ResultReg = Call->ReturnRegister;
1266 MRI->setRegClass(ResultReg, &SPIRV::IDRegClass);
1267
1268 // Deduce the `Scope` operand from the builtin function name.
1269 SPIRV::Scope::Scope ScopeArg =
1271 .EndsWith("device", SPIRV::Scope::Scope::Device)
1272 .EndsWith("work_group", SPIRV::Scope::Scope::Workgroup)
1273 .EndsWith("sub_group", SPIRV::Scope::Scope::Subgroup);
1274 Register ScopeReg = buildConstantIntReg(ScopeArg, MIRBuilder, GR);
1275
1276 MIRBuilder.buildInstr(SPIRV::OpReadClockKHR)
1277 .addDef(ResultReg)
1278 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1279 .addUse(ScopeReg);
1280
1281 return true;
1282}
1283
1284// These queries ask for a single size_t result for a given dimension index, e.g
1285// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
1286// these values are all vec3 types, so we need to extract the correct index or
1287// return defaultVal (0 or 1 depending on the query). We also handle extending
1288// or tuncating in case size_t does not match the expected result type's
1289// bitwidth.
1290//
1291// For a constant index >= 3 we generate:
1292// %res = OpConstant %SizeT 0
1293//
1294// For other indices we generate:
1295// %g = OpVariable %ptr_V3_SizeT Input
1296// OpDecorate %g BuiltIn XXX
1297// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
1298// OpDecorate %g Constant
1299// %loadedVec = OpLoad %V3_SizeT %g
1300//
1301// Then, if the index is constant < 3, we generate:
1302// %res = OpCompositeExtract %SizeT %loadedVec idx
1303// If the index is dynamic, we generate:
1304// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
1305// %cmp = OpULessThan %bool %idx %const_3
1306// %res = OpSelect %SizeT %cmp %tmp %const_0
1307//
1308// If the bitwidth of %res does not match the expected return type, we add an
1309// extend or truncate.
1311 MachineIRBuilder &MIRBuilder,
1313 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1314 uint64_t DefaultValue) {
1315 Register IndexRegister = Call->Arguments[0];
1316 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1317 const unsigned PointerSize = GR->getPointerSize();
1318 const SPIRVType *PointerSizeType =
1319 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
1320 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1321 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
1322
1323 // Set up the final register to do truncation or extension on at the end.
1324 Register ToTruncate = Call->ReturnRegister;
1325
1326 // If the index is constant, we can statically determine if it is in range.
1327 bool IsConstantIndex =
1328 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1329
1330 // If it's out of range (max dimension is 3), we can just return the constant
1331 // default value (0 or 1 depending on which query function).
1332 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
1333 Register DefaultReg = Call->ReturnRegister;
1334 if (PointerSize != ResultWidth) {
1335 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1336 MRI->setRegClass(DefaultReg, &SPIRV::IDRegClass);
1337 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
1338 MIRBuilder.getMF());
1339 ToTruncate = DefaultReg;
1340 }
1341 auto NewRegister =
1342 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1343 MIRBuilder.buildCopy(DefaultReg, NewRegister);
1344 } else { // If it could be in range, we need to load from the given builtin.
1345 auto Vec3Ty =
1346 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder);
1347 Register LoadedVector =
1348 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
1349 LLT::fixed_vector(3, PointerSize));
1350 // Set up the vreg to extract the result to (possibly a new temporary one).
1351 Register Extracted = Call->ReturnRegister;
1352 if (!IsConstantIndex || PointerSize != ResultWidth) {
1353 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1354 MRI->setRegClass(Extracted, &SPIRV::IDRegClass);
1355 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
1356 }
1357 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
1358 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
1359 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
1360 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
1361 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
1362
1363 // If the index is dynamic, need check if it's < 3, and then use a select.
1364 if (!IsConstantIndex) {
1365 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
1366 *MRI);
1367
1368 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
1369 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
1370
1371 Register CompareRegister =
1372 MRI->createGenericVirtualRegister(LLT::scalar(1));
1373 MRI->setRegClass(CompareRegister, &SPIRV::IDRegClass);
1374 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
1375
1376 // Use G_ICMP to check if idxVReg < 3.
1377 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
1378 GR->buildConstantInt(3, MIRBuilder, IndexType));
1379
1380 // Get constant for the default value (0 or 1 depending on which
1381 // function).
1382 Register DefaultRegister =
1383 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1384
1385 // Get a register for the selection result (possibly a new temporary one).
1386 Register SelectionResult = Call->ReturnRegister;
1387 if (PointerSize != ResultWidth) {
1388 SelectionResult =
1389 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1390 MRI->setRegClass(SelectionResult, &SPIRV::IDRegClass);
1391 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1392 MIRBuilder.getMF());
1393 }
1394 // Create the final G_SELECT to return the extracted value or the default.
1395 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1396 DefaultRegister);
1397 ToTruncate = SelectionResult;
1398 } else {
1399 ToTruncate = Extracted;
1400 }
1401 }
1402 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1403 if (PointerSize != ResultWidth)
1404 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1405 return true;
1406}
1407
1409 MachineIRBuilder &MIRBuilder,
1410 SPIRVGlobalRegistry *GR) {
1411 // Lookup the builtin variable record.
1412 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1413 SPIRV::BuiltIn::BuiltIn Value =
1414 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1415
1416 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1417 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1418
1419 // Build a load instruction for the builtin variable.
1420 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1421 LLT LLType;
1422 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1423 LLType =
1424 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1425 else
1426 LLType = LLT::scalar(BitWidth);
1427
1428 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1429 LLType, Call->ReturnRegister);
1430}
1431
1433 MachineIRBuilder &MIRBuilder,
1434 SPIRVGlobalRegistry *GR) {
1435 // Lookup the instruction opcode in the TableGen records.
1436 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1437 unsigned Opcode =
1438 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1439
1440 switch (Opcode) {
1441 case SPIRV::OpStore:
1442 return buildAtomicInitInst(Call, MIRBuilder);
1443 case SPIRV::OpAtomicLoad:
1444 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1445 case SPIRV::OpAtomicStore:
1446 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1447 case SPIRV::OpAtomicCompareExchange:
1448 case SPIRV::OpAtomicCompareExchangeWeak:
1449 return buildAtomicCompareExchangeInst(Call, Builtin, Opcode, MIRBuilder,
1450 GR);
1451 case SPIRV::OpAtomicIAdd:
1452 case SPIRV::OpAtomicISub:
1453 case SPIRV::OpAtomicOr:
1454 case SPIRV::OpAtomicXor:
1455 case SPIRV::OpAtomicAnd:
1456 case SPIRV::OpAtomicExchange:
1457 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1458 case SPIRV::OpMemoryBarrier:
1459 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1460 case SPIRV::OpAtomicFlagTestAndSet:
1461 case SPIRV::OpAtomicFlagClear:
1462 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1463 default:
1464 return false;
1465 }
1466}
1467
1469 MachineIRBuilder &MIRBuilder,
1470 SPIRVGlobalRegistry *GR) {
1471 // Lookup the instruction opcode in the TableGen records.
1472 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1473 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->Name)->Opcode;
1474
1475 switch (Opcode) {
1476 case SPIRV::OpAtomicFAddEXT:
1477 case SPIRV::OpAtomicFMinEXT:
1478 case SPIRV::OpAtomicFMaxEXT:
1479 return buildAtomicFloatingRMWInst(Call, Opcode, MIRBuilder, GR);
1480 default:
1481 return false;
1482 }
1483}
1484
1486 MachineIRBuilder &MIRBuilder,
1487 SPIRVGlobalRegistry *GR) {
1488 // Lookup the instruction opcode in the TableGen records.
1489 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1490 unsigned Opcode =
1491 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1492
1493 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1494}
1495
1497 MachineIRBuilder &MIRBuilder) {
1498 MIRBuilder.buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1499 .addDef(Call->ReturnRegister)
1500 .addUse(Call->Arguments[0]);
1501 return true;
1502}
1503
1505 MachineIRBuilder &MIRBuilder,
1506 SPIRVGlobalRegistry *GR) {
1507 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1508 bool IsVec = Opcode == SPIRV::OpTypeVector;
1509 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1510 MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1511 .addDef(Call->ReturnRegister)
1512 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1513 .addUse(Call->Arguments[0])
1514 .addUse(Call->Arguments[1]);
1515 return true;
1516}
1517
1519 MachineIRBuilder &MIRBuilder,
1520 SPIRVGlobalRegistry *GR) {
1521 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1522 SPIRV::BuiltIn::BuiltIn Value =
1523 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1524
1525 // For now, we only support a single Wave intrinsic with a single return type.
1526 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1527 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(Call->ReturnType));
1528
1530 MIRBuilder, Call->ReturnType, GR, Value, LLType, Call->ReturnRegister,
1531 /* isConst= */ false, /* hasLinkageTy= */ false);
1532}
1533
1535 MachineIRBuilder &MIRBuilder,
1536 SPIRVGlobalRegistry *GR) {
1537 // Lookup the builtin record.
1538 SPIRV::BuiltIn::BuiltIn Value =
1539 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1540 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1541 Value == SPIRV::BuiltIn::WorkgroupSize ||
1542 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1543 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1544}
1545
1547 MachineIRBuilder &MIRBuilder,
1548 SPIRVGlobalRegistry *GR) {
1549 // Lookup the image size query component number in the TableGen records.
1550 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1551 uint32_t Component =
1552 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1553 // Query result may either be a vector or a scalar. If return type is not a
1554 // vector, expect only a single size component. Otherwise get the number of
1555 // expected components.
1556 SPIRVType *RetTy = Call->ReturnType;
1557 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1558 ? RetTy->getOperand(2).getImm()
1559 : 1;
1560 // Get the actual number of query result/size components.
1561 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1562 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1563 Register QueryResult = Call->ReturnRegister;
1564 SPIRVType *QueryResultType = Call->ReturnType;
1565 if (NumExpectedRetComponents != NumActualRetComponents) {
1566 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1567 LLT::fixed_vector(NumActualRetComponents, 32));
1568 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::IDRegClass);
1569 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1570 QueryResultType = GR->getOrCreateSPIRVVectorType(
1571 IntTy, NumActualRetComponents, MIRBuilder);
1572 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1573 }
1574 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1575 unsigned Opcode =
1576 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1577 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1578 auto MIB = MIRBuilder.buildInstr(Opcode)
1579 .addDef(QueryResult)
1580 .addUse(GR->getSPIRVTypeID(QueryResultType))
1581 .addUse(Call->Arguments[0]);
1582 if (!IsDimBuf)
1583 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Lod id.
1584 if (NumExpectedRetComponents == NumActualRetComponents)
1585 return true;
1586 if (NumExpectedRetComponents == 1) {
1587 // Only 1 component is expected, build OpCompositeExtract instruction.
1588 unsigned ExtractedComposite =
1589 Component == 3 ? NumActualRetComponents - 1 : Component;
1590 assert(ExtractedComposite < NumActualRetComponents &&
1591 "Invalid composite index!");
1592 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1593 SPIRVType *NewType = nullptr;
1594 if (QueryResultType->getOpcode() == SPIRV::OpTypeVector) {
1595 Register NewTypeReg = QueryResultType->getOperand(1).getReg();
1596 if (TypeReg != NewTypeReg &&
1597 (NewType = GR->getSPIRVTypeForVReg(NewTypeReg)) != nullptr)
1598 TypeReg = NewTypeReg;
1599 }
1600 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1601 .addDef(Call->ReturnRegister)
1602 .addUse(TypeReg)
1603 .addUse(QueryResult)
1604 .addImm(ExtractedComposite);
1605 if (NewType != nullptr)
1606 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
1607 MIRBuilder.getMF().getRegInfo());
1608 } else {
1609 // More than 1 component is expected, fill a new vector.
1610 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1611 .addDef(Call->ReturnRegister)
1612 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1613 .addUse(QueryResult)
1614 .addUse(QueryResult);
1615 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1616 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1617 }
1618 return true;
1619}
1620
1622 MachineIRBuilder &MIRBuilder,
1623 SPIRVGlobalRegistry *GR) {
1624 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1625 "Image samples query result must be of int type!");
1626
1627 // Lookup the instruction opcode in the TableGen records.
1628 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1629 unsigned Opcode =
1630 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1631
1632 Register Image = Call->Arguments[0];
1633 MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::IDRegClass);
1634 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1635 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1636 (void)ImageDimensionality;
1637
1638 switch (Opcode) {
1639 case SPIRV::OpImageQuerySamples:
1640 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1641 "Image must be of 2D dimensionality");
1642 break;
1643 case SPIRV::OpImageQueryLevels:
1644 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1645 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1646 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1647 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1648 "Image must be of 1D/2D/3D/Cube dimensionality");
1649 break;
1650 }
1651
1652 MIRBuilder.buildInstr(Opcode)
1653 .addDef(Call->ReturnRegister)
1654 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1655 .addUse(Image);
1656 return true;
1657}
1658
1659// TODO: Move to TableGen.
1660static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1662 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1663 case SPIRV::CLK_ADDRESS_CLAMP:
1664 return SPIRV::SamplerAddressingMode::Clamp;
1665 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1666 return SPIRV::SamplerAddressingMode::ClampToEdge;
1667 case SPIRV::CLK_ADDRESS_REPEAT:
1668 return SPIRV::SamplerAddressingMode::Repeat;
1669 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1670 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1671 case SPIRV::CLK_ADDRESS_NONE:
1672 return SPIRV::SamplerAddressingMode::None;
1673 default:
1674 report_fatal_error("Unknown CL address mode");
1675 }
1676}
1677
1678static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
1679 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1680}
1681
1682static SPIRV::SamplerFilterMode::SamplerFilterMode
1684 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1685 return SPIRV::SamplerFilterMode::Linear;
1686 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1687 return SPIRV::SamplerFilterMode::Nearest;
1688 return SPIRV::SamplerFilterMode::Nearest;
1689}
1690
1691static bool generateReadImageInst(const StringRef DemangledCall,
1692 const SPIRV::IncomingCall *Call,
1693 MachineIRBuilder &MIRBuilder,
1694 SPIRVGlobalRegistry *GR) {
1695 Register Image = Call->Arguments[0];
1696 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1697 MRI->setRegClass(Image, &SPIRV::IDRegClass);
1698 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1699 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
1700 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
1701 if (HasOclSampler || HasMsaa)
1702 MRI->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1703 if (HasOclSampler) {
1704 Register Sampler = Call->Arguments[1];
1705
1706 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1707 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
1708 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
1709 Sampler = GR->buildConstantSampler(
1711 getSamplerParamFromBitmask(SamplerMask),
1712 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder,
1713 GR->getSPIRVTypeForVReg(Sampler));
1714 }
1715 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1716 SPIRVType *SampledImageType =
1717 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1718 Register SampledImage = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1719
1720 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1721 .addDef(SampledImage)
1722 .addUse(GR->getSPIRVTypeID(SampledImageType))
1723 .addUse(Image)
1724 .addUse(Sampler);
1725
1727 MIRBuilder);
1728 SPIRVType *TempType = Call->ReturnType;
1729 bool NeedsExtraction = false;
1730 if (TempType->getOpcode() != SPIRV::OpTypeVector) {
1731 TempType =
1732 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1733 NeedsExtraction = true;
1734 }
1735 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(TempType));
1736 Register TempRegister = MRI->createGenericVirtualRegister(LLType);
1737 MRI->setRegClass(TempRegister, &SPIRV::IDRegClass);
1738 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
1739
1740 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1741 .addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister)
1742 .addUse(GR->getSPIRVTypeID(TempType))
1743 .addUse(SampledImage)
1744 .addUse(Call->Arguments[2]) // Coordinate.
1745 .addImm(SPIRV::ImageOperand::Lod)
1746 .addUse(Lod);
1747
1748 if (NeedsExtraction)
1749 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1750 .addDef(Call->ReturnRegister)
1751 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1752 .addUse(TempRegister)
1753 .addImm(0);
1754 } else if (HasMsaa) {
1755 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1756 .addDef(Call->ReturnRegister)
1757 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1758 .addUse(Image)
1759 .addUse(Call->Arguments[1]) // Coordinate.
1760 .addImm(SPIRV::ImageOperand::Sample)
1761 .addUse(Call->Arguments[2]);
1762 } else {
1763 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1764 .addDef(Call->ReturnRegister)
1765 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1766 .addUse(Image)
1767 .addUse(Call->Arguments[1]); // Coordinate.
1768 }
1769 return true;
1770}
1771
1773 MachineIRBuilder &MIRBuilder,
1774 SPIRVGlobalRegistry *GR) {
1775 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1776 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1777 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1778 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1779 .addUse(Call->Arguments[0]) // Image.
1780 .addUse(Call->Arguments[1]) // Coordinate.
1781 .addUse(Call->Arguments[2]); // Texel.
1782 return true;
1783}
1784
1785static bool generateSampleImageInst(const StringRef DemangledCall,
1786 const SPIRV::IncomingCall *Call,
1787 MachineIRBuilder &MIRBuilder,
1788 SPIRVGlobalRegistry *GR) {
1789 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1790 if (Call->Builtin->Name.contains_insensitive(
1791 "__translate_sampler_initializer")) {
1792 // Build sampler literal.
1793 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1794 Register Sampler = GR->buildConstantSampler(
1795 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1797 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1798 return Sampler.isValid();
1799 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1800 // Create OpSampledImage.
1801 Register Image = Call->Arguments[0];
1802 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1803 SPIRVType *SampledImageType =
1804 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1805 Register SampledImage =
1806 Call->ReturnRegister.isValid()
1807 ? Call->ReturnRegister
1808 : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1809 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1810 .addDef(SampledImage)
1811 .addUse(GR->getSPIRVTypeID(SampledImageType))
1812 .addUse(Image)
1813 .addUse(Call->Arguments[1]); // Sampler.
1814 return true;
1815 } else if (Call->Builtin->Name.contains_insensitive(
1816 "__spirv_ImageSampleExplicitLod")) {
1817 // Sample an image using an explicit level of detail.
1818 std::string ReturnType = DemangledCall.str();
1819 if (DemangledCall.contains("_R")) {
1820 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
1821 ReturnType = ReturnType.substr(0, ReturnType.find('('));
1822 }
1823 SPIRVType *Type =
1824 Call->ReturnType
1825 ? Call->ReturnType
1826 : GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder);
1827 if (!Type) {
1828 std::string DiagMsg =
1829 "Unable to recognize SPIRV type name: " + ReturnType;
1830 report_fatal_error(DiagMsg.c_str());
1831 }
1832 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1833 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1834 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
1835
1836 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1837 .addDef(Call->ReturnRegister)
1839 .addUse(Call->Arguments[0]) // Image.
1840 .addUse(Call->Arguments[1]) // Coordinate.
1841 .addImm(SPIRV::ImageOperand::Lod)
1842 .addUse(Call->Arguments[3]);
1843 return true;
1844 }
1845 return false;
1846}
1847
1849 MachineIRBuilder &MIRBuilder) {
1850 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1851 Call->Arguments[1], Call->Arguments[2]);
1852 return true;
1853}
1854
1856 MachineIRBuilder &MIRBuilder,
1857 SPIRVGlobalRegistry *GR) {
1858 return buildOpFromWrapper(MIRBuilder, SPIRV::OpCompositeConstruct, Call,
1859 GR->getSPIRVTypeID(Call->ReturnType));
1860}
1861
1863 MachineIRBuilder &MIRBuilder,
1864 SPIRVGlobalRegistry *GR) {
1865 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1866 unsigned Opcode =
1867 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1868 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR;
1869 unsigned ArgSz = Call->Arguments.size();
1870 unsigned LiteralIdx = 0;
1871 if (Opcode == SPIRV::OpCooperativeMatrixLoadKHR && ArgSz > 3)
1872 LiteralIdx = 3;
1873 else if (Opcode == SPIRV::OpCooperativeMatrixStoreKHR && ArgSz > 4)
1874 LiteralIdx = 4;
1876 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1877 if (LiteralIdx > 0)
1878 ImmArgs.push_back(getConstFromIntrinsic(Call->Arguments[LiteralIdx], MRI));
1879 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
1880 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
1881 SPIRVType *CoopMatrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1882 if (!CoopMatrType)
1883 report_fatal_error("Can't find a register's type definition");
1884 MIRBuilder.buildInstr(Opcode)
1885 .addDef(Call->ReturnRegister)
1886 .addUse(TypeReg)
1887 .addUse(CoopMatrType->getOperand(0).getReg());
1888 return true;
1889 }
1890 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
1891 IsSet ? TypeReg : Register(0), ImmArgs);
1892}
1893
1895 MachineIRBuilder &MIRBuilder,
1896 SPIRVGlobalRegistry *GR) {
1897 // Lookup the instruction opcode in the TableGen records.
1898 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1899 unsigned Opcode =
1900 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1901 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1902
1903 switch (Opcode) {
1904 case SPIRV::OpSpecConstant: {
1905 // Build the SpecID decoration.
1906 unsigned SpecId =
1907 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
1908 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
1909 {SpecId});
1910 // Determine the constant MI.
1911 Register ConstRegister = Call->Arguments[1];
1912 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
1913 assert(Const &&
1914 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1915 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
1916 "Argument should be either an int or floating-point constant");
1917 // Determine the opcode and built the OpSpec MI.
1918 const MachineOperand &ConstOperand = Const->getOperand(1);
1919 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
1920 assert(ConstOperand.isCImm() && "Int constant operand is expected");
1921 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
1922 ? SPIRV::OpSpecConstantTrue
1923 : SPIRV::OpSpecConstantFalse;
1924 }
1925 auto MIB = MIRBuilder.buildInstr(Opcode)
1926 .addDef(Call->ReturnRegister)
1927 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1928
1929 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
1930 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
1931 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1932 else
1933 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
1934 }
1935 return true;
1936 }
1937 case SPIRV::OpSpecConstantComposite: {
1938 auto MIB = MIRBuilder.buildInstr(Opcode)
1939 .addDef(Call->ReturnRegister)
1940 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1941 for (unsigned i = 0; i < Call->Arguments.size(); i++)
1942 MIB.addUse(Call->Arguments[i]);
1943 return true;
1944 }
1945 default:
1946 return false;
1947 }
1948}
1949
1950static bool buildNDRange(const SPIRV::IncomingCall *Call,
1951 MachineIRBuilder &MIRBuilder,
1952 SPIRVGlobalRegistry *GR) {
1953 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1954 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1955 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1956 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
1957 PtrType->getOperand(2).isReg());
1958 Register TypeReg = PtrType->getOperand(2).getReg();
1960 MachineFunction &MF = MIRBuilder.getMF();
1961 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1962 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
1963 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
1964 // three other arguments, so pass zero constant on absence.
1965 unsigned NumArgs = Call->Arguments.size();
1966 assert(NumArgs >= 2);
1967 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
1968 MRI->setRegClass(GlobalWorkSize, &SPIRV::IDRegClass);
1969 Register LocalWorkSize =
1970 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
1971 if (LocalWorkSize.isValid())
1972 MRI->setRegClass(LocalWorkSize, &SPIRV::IDRegClass);
1973 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
1974 if (GlobalWorkOffset.isValid())
1975 MRI->setRegClass(GlobalWorkOffset, &SPIRV::IDRegClass);
1976 if (NumArgs < 4) {
1977 Register Const;
1978 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
1979 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
1980 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
1981 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
1982 DefInstr->getOperand(3).isReg());
1983 Register GWSPtr = DefInstr->getOperand(3).getReg();
1984 if (!MRI->getRegClassOrNull(GWSPtr))
1985 MRI->setRegClass(GWSPtr, &SPIRV::IDRegClass);
1986 // TODO: Maybe simplify generation of the type of the fields.
1987 unsigned Size = Call->Builtin->Name == "ndrange_3D" ? 3 : 2;
1988 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
1990 Type *FieldTy = ArrayType::get(BaseTy, Size);
1991 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder);
1992 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1993 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
1994 MIRBuilder.buildInstr(SPIRV::OpLoad)
1995 .addDef(GlobalWorkSize)
1996 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
1997 .addUse(GWSPtr);
1998 const SPIRVSubtarget &ST =
1999 cast<SPIRVSubtarget>(MIRBuilder.getMF().getSubtarget());
2000 Const = GR->getOrCreateConstIntArray(0, Size, *MIRBuilder.getInsertPt(),
2001 SpvFieldTy, *ST.getInstrInfo());
2002 } else {
2003 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy);
2004 }
2005 if (!LocalWorkSize.isValid())
2006 LocalWorkSize = Const;
2007 if (!GlobalWorkOffset.isValid())
2008 GlobalWorkOffset = Const;
2009 }
2010 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
2011 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
2012 .addDef(TmpReg)
2013 .addUse(TypeReg)
2014 .addUse(GlobalWorkSize)
2015 .addUse(LocalWorkSize)
2016 .addUse(GlobalWorkOffset);
2017 return MIRBuilder.buildInstr(SPIRV::OpStore)
2018 .addUse(Call->Arguments[0])
2019 .addUse(TmpReg);
2020}
2021
2022// TODO: maybe move to the global register.
2023static SPIRVType *
2025 SPIRVGlobalRegistry *GR) {
2026 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
2027 Type *OpaqueType = StructType::getTypeByName(Context, "spirv.DeviceEvent");
2028 if (!OpaqueType)
2029 OpaqueType = StructType::getTypeByName(Context, "opencl.clk_event_t");
2030 if (!OpaqueType)
2031 OpaqueType = StructType::create(Context, "spirv.DeviceEvent");
2032 unsigned SC0 = storageClassToAddressSpace(SPIRV::StorageClass::Function);
2033 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2034 Type *PtrType = PointerType::get(PointerType::get(OpaqueType, SC0), SC1);
2035 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder);
2036}
2037
2039 MachineIRBuilder &MIRBuilder,
2040 SPIRVGlobalRegistry *GR) {
2041 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2042 const DataLayout &DL = MIRBuilder.getDataLayout();
2043 bool IsSpirvOp = Call->isSpirvOp();
2044 bool HasEvents = Call->Builtin->Name.contains("events") || IsSpirvOp;
2045 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
2046
2047 // Make vararg instructions before OpEnqueueKernel.
2048 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
2049 // local size operands as an array, so we need to unpack them.
2050 SmallVector<Register, 16> LocalSizes;
2051 if (Call->Builtin->Name.contains("_varargs") || IsSpirvOp) {
2052 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2053 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2054 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
2055 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
2056 GepMI->getOperand(3).isReg());
2057 Register ArrayReg = GepMI->getOperand(3).getReg();
2058 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
2059 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
2060 assert(LocalSizeTy && "Local size type is expected");
2061 const uint64_t LocalSizeNum =
2062 cast<ArrayType>(LocalSizeTy)->getNumElements();
2063 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
2064 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
2065 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
2066 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2067 for (unsigned I = 0; I < LocalSizeNum; ++I) {
2068 Register Reg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
2069 MRI->setType(Reg, LLType);
2070 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
2071 auto GEPInst = MIRBuilder.buildIntrinsic(
2072 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
2073 GEPInst
2074 .addImm(GepMI->getOperand(2).getImm()) // In bound.
2075 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
2076 .addUse(buildConstantIntReg(0, MIRBuilder, GR)) // Indices.
2077 .addUse(buildConstantIntReg(I, MIRBuilder, GR));
2078 LocalSizes.push_back(Reg);
2079 }
2080 }
2081
2082 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
2083 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
2084 .addDef(Call->ReturnRegister)
2085 .addUse(GR->getSPIRVTypeID(Int32Ty));
2086
2087 // Copy all arguments before block invoke function pointer.
2088 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2089 for (unsigned i = 0; i < BlockFIdx; i++)
2090 MIB.addUse(Call->Arguments[i]);
2091
2092 // If there are no event arguments in the original call, add dummy ones.
2093 if (!HasEvents) {
2094 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Dummy num events.
2095 Register NullPtr = GR->getOrCreateConstNullPtr(
2096 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
2097 MIB.addUse(NullPtr); // Dummy wait events.
2098 MIB.addUse(NullPtr); // Dummy ret event.
2099 }
2100
2101 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
2102 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
2103 // Invoke: Pointer to invoke function.
2104 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
2105
2106 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2107 // Param: Pointer to block literal.
2108 MIB.addUse(BlockLiteralReg);
2109
2110 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
2111 // TODO: these numbers should be obtained from block literal structure.
2112 // Param Size: Size of block literal structure.
2113 MIB.addUse(buildConstantIntReg(DL.getTypeStoreSize(PType), MIRBuilder, GR));
2114 // Param Aligment: Aligment of block literal structure.
2115 MIB.addUse(
2116 buildConstantIntReg(DL.getPrefTypeAlign(PType).value(), MIRBuilder, GR));
2117
2118 for (unsigned i = 0; i < LocalSizes.size(); i++)
2119 MIB.addUse(LocalSizes[i]);
2120 return true;
2121}
2122
2124 MachineIRBuilder &MIRBuilder,
2125 SPIRVGlobalRegistry *GR) {
2126 // Lookup the instruction opcode in the TableGen records.
2127 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2128 unsigned Opcode =
2129 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2130
2131 switch (Opcode) {
2132 case SPIRV::OpRetainEvent:
2133 case SPIRV::OpReleaseEvent:
2134 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2135 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
2136 case SPIRV::OpCreateUserEvent:
2137 case SPIRV::OpGetDefaultQueue:
2138 return MIRBuilder.buildInstr(Opcode)
2139 .addDef(Call->ReturnRegister)
2140 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
2141 case SPIRV::OpIsValidEvent:
2142 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2143 return MIRBuilder.buildInstr(Opcode)
2144 .addDef(Call->ReturnRegister)
2145 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2146 .addUse(Call->Arguments[0]);
2147 case SPIRV::OpSetUserEventStatus:
2148 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2149 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2150 return MIRBuilder.buildInstr(Opcode)
2151 .addUse(Call->Arguments[0])
2152 .addUse(Call->Arguments[1]);
2153 case SPIRV::OpCaptureEventProfilingInfo:
2154 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2155 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2156 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
2157 return MIRBuilder.buildInstr(Opcode)
2158 .addUse(Call->Arguments[0])
2159 .addUse(Call->Arguments[1])
2160 .addUse(Call->Arguments[2]);
2161 case SPIRV::OpBuildNDRange:
2162 return buildNDRange(Call, MIRBuilder, GR);
2163 case SPIRV::OpEnqueueKernel:
2164 return buildEnqueueKernel(Call, MIRBuilder, GR);
2165 default:
2166 return false;
2167 }
2168}
2169
2171 MachineIRBuilder &MIRBuilder,
2172 SPIRVGlobalRegistry *GR) {
2173 // Lookup the instruction opcode in the TableGen records.
2174 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2175 unsigned Opcode =
2176 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2177
2178 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2179 Register TypeReg = GR->getSPIRVTypeID(Call->ReturnType);
2180 if (Call->isSpirvOp())
2181 return buildOpFromWrapper(MIRBuilder, Opcode, Call,
2182 IsSet ? TypeReg : Register(0));
2183
2184 auto Scope = buildConstantIntReg(SPIRV::Scope::Workgroup, MIRBuilder, GR);
2185
2186 switch (Opcode) {
2187 case SPIRV::OpGroupAsyncCopy: {
2188 SPIRVType *NewType =
2189 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2190 ? nullptr
2191 : GR->getOrCreateSPIRVTypeByName("spirv.Event", MIRBuilder);
2192 Register TypeReg = GR->getSPIRVTypeID(NewType ? NewType : Call->ReturnType);
2193 unsigned NumArgs = Call->Arguments.size();
2194 Register EventReg = Call->Arguments[NumArgs - 1];
2195 bool Res = MIRBuilder.buildInstr(Opcode)
2196 .addDef(Call->ReturnRegister)
2197 .addUse(TypeReg)
2198 .addUse(Scope)
2199 .addUse(Call->Arguments[0])
2200 .addUse(Call->Arguments[1])
2201 .addUse(Call->Arguments[2])
2202 .addUse(Call->Arguments.size() > 4
2203 ? Call->Arguments[3]
2204 : buildConstantIntReg(1, MIRBuilder, GR))
2205 .addUse(EventReg);
2206 if (NewType != nullptr)
2207 insertAssignInstr(Call->ReturnRegister, nullptr, NewType, GR, MIRBuilder,
2208 MIRBuilder.getMF().getRegInfo());
2209 return Res;
2210 }
2211 case SPIRV::OpGroupWaitEvents:
2212 return MIRBuilder.buildInstr(Opcode)
2213 .addUse(Scope)
2214 .addUse(Call->Arguments[0])
2215 .addUse(Call->Arguments[1]);
2216 default:
2217 return false;
2218 }
2219}
2220
2221static bool generateConvertInst(const StringRef DemangledCall,
2222 const SPIRV::IncomingCall *Call,
2223 MachineIRBuilder &MIRBuilder,
2224 SPIRVGlobalRegistry *GR) {
2225 // Lookup the conversion builtin in the TableGen records.
2226 const SPIRV::ConvertBuiltin *Builtin =
2227 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2228
2229 if (Builtin->IsSaturated)
2230 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2231 SPIRV::Decoration::SaturatedConversion, {});
2232 if (Builtin->IsRounded)
2233 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
2234 SPIRV::Decoration::FPRoundingMode,
2235 {(unsigned)Builtin->RoundingMode});
2236
2237 std::string NeedExtMsg; // no errors if empty
2238 bool IsRightComponentsNumber = true; // check if input/output accepts vectors
2239 unsigned Opcode = SPIRV::OpNop;
2240 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
2241 // Int -> ...
2242 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2243 // Int -> Int
2244 if (Builtin->IsSaturated)
2245 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
2246 : SPIRV::OpSatConvertSToU;
2247 else
2248 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
2249 : SPIRV::OpSConvert;
2250 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2251 SPIRV::OpTypeFloat)) {
2252 // Int -> Float
2253 if (Builtin->IsBfloat16) {
2254 const auto *ST = static_cast<const SPIRVSubtarget *>(
2255 &MIRBuilder.getMF().getSubtarget());
2256 if (!ST->canUseExtension(
2257 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2258 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2259 IsRightComponentsNumber =
2260 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2261 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2262 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2263 } else {
2264 bool IsSourceSigned =
2265 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
2266 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2267 }
2268 }
2269 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
2270 SPIRV::OpTypeFloat)) {
2271 // Float -> ...
2272 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
2273 // Float -> Int
2274 if (Builtin->IsBfloat16) {
2275 const auto *ST = static_cast<const SPIRVSubtarget *>(
2276 &MIRBuilder.getMF().getSubtarget());
2277 if (!ST->canUseExtension(
2278 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2279 NeedExtMsg = "SPV_INTEL_bfloat16_conversion";
2280 IsRightComponentsNumber =
2281 GR->getScalarOrVectorComponentCount(Call->Arguments[0]) ==
2282 GR->getScalarOrVectorComponentCount(Call->ReturnRegister);
2283 Opcode = SPIRV::OpConvertFToBF16INTEL;
2284 } else {
2285 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
2286 : SPIRV::OpConvertFToU;
2287 }
2288 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
2289 SPIRV::OpTypeFloat)) {
2290 // Float -> Float
2291 Opcode = SPIRV::OpFConvert;
2292 }
2293 }
2294
2295 if (!NeedExtMsg.empty()) {
2296 std::string DiagMsg = std::string(Builtin->Name) +
2297 ": the builtin requires the following SPIR-V "
2298 "extension: " +
2299 NeedExtMsg;
2300 report_fatal_error(DiagMsg.c_str(), false);
2301 }
2302 if (!IsRightComponentsNumber) {
2303 std::string DiagMsg =
2304 std::string(Builtin->Name) +
2305 ": result and argument must have the same number of components";
2306 report_fatal_error(DiagMsg.c_str(), false);
2307 }
2308 assert(Opcode != SPIRV::OpNop &&
2309 "Conversion between the types not implemented!");
2310
2311 MIRBuilder.buildInstr(Opcode)
2312 .addDef(Call->ReturnRegister)
2313 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2314 .addUse(Call->Arguments[0]);
2315 return true;
2316}
2317
2319 MachineIRBuilder &MIRBuilder,
2320 SPIRVGlobalRegistry *GR) {
2321 // Lookup the vector load/store builtin in the TableGen records.
2322 const SPIRV::VectorLoadStoreBuiltin *Builtin =
2323 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2324 Call->Builtin->Set);
2325 // Build extended instruction.
2326 auto MIB =
2327 MIRBuilder.buildInstr(SPIRV::OpExtInst)
2328 .addDef(Call->ReturnRegister)
2329 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
2330 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2331 .addImm(Builtin->Number);
2332 for (auto Argument : Call->Arguments)
2333 MIB.addUse(Argument);
2334 if (Builtin->Name.contains("load") && Builtin->ElementCount > 1)
2335 MIB.addImm(Builtin->ElementCount);
2336
2337 // Rounding mode should be passed as a last argument in the MI for builtins
2338 // like "vstorea_halfn_r".
2339 if (Builtin->IsRounded)
2340 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
2341 return true;
2342}
2343
2345 MachineIRBuilder &MIRBuilder,
2346 SPIRVGlobalRegistry *GR) {
2347 // Lookup the instruction opcode in the TableGen records.
2348 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
2349 unsigned Opcode =
2350 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
2351 bool IsLoad = Opcode == SPIRV::OpLoad;
2352 // Build the instruction.
2353 auto MIB = MIRBuilder.buildInstr(Opcode);
2354 if (IsLoad) {
2355 MIB.addDef(Call->ReturnRegister);
2356 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
2357 }
2358 // Add a pointer to the value to load/store.
2359 MIB.addUse(Call->Arguments[0]);
2360 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
2361 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
2362 // Add a value to store.
2363 if (!IsLoad) {
2364 MIB.addUse(Call->Arguments[1]);
2365 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
2366 }
2367 // Add optional memory attributes and an alignment.
2368 unsigned NumArgs = Call->Arguments.size();
2369 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3) {
2370 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
2371 MRI->setRegClass(Call->Arguments[IsLoad ? 1 : 2], &SPIRV::IDRegClass);
2372 }
2373 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4) {
2374 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
2375 MRI->setRegClass(Call->Arguments[IsLoad ? 2 : 3], &SPIRV::IDRegClass);
2376 }
2377 return true;
2378}
2379
2380/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
2381/// and external instruction \p Set.
2382namespace SPIRV {
2383std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
2384 SPIRV::InstructionSet::InstructionSet Set,
2385 MachineIRBuilder &MIRBuilder,
2386 const Register OrigRet, const Type *OrigRetTy,
2387 const SmallVectorImpl<Register> &Args,
2388 SPIRVGlobalRegistry *GR) {
2389 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
2390
2391 // SPIR-V type and return register.
2392 Register ReturnRegister = OrigRet;
2393 SPIRVType *ReturnType = nullptr;
2394 if (OrigRetTy && !OrigRetTy->isVoidTy()) {
2395 ReturnType = GR->assignTypeToVReg(OrigRetTy, OrigRet, MIRBuilder);
2396 if (!MIRBuilder.getMRI()->getRegClassOrNull(ReturnRegister))
2397 MIRBuilder.getMRI()->setRegClass(ReturnRegister, &SPIRV::IDRegClass);
2398 } else if (OrigRetTy && OrigRetTy->isVoidTy()) {
2399 ReturnRegister = MIRBuilder.getMRI()->createVirtualRegister(&IDRegClass);
2400 MIRBuilder.getMRI()->setType(ReturnRegister, LLT::scalar(32));
2401 ReturnType = GR->assignTypeToVReg(OrigRetTy, ReturnRegister, MIRBuilder);
2402 }
2403
2404 // Lookup the builtin in the TableGen records.
2405 std::unique_ptr<const IncomingCall> Call =
2406 lookupBuiltin(DemangledCall, Set, ReturnRegister, ReturnType, Args);
2407
2408 if (!Call) {
2409 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
2410 return std::nullopt;
2411 }
2412
2413 // TODO: check if the provided args meet the builtin requirments.
2414 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2415 "Too few arguments to generate the builtin");
2416 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2417 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
2418
2419 // Match the builtin with implementation based on the grouping.
2420 switch (Call->Builtin->Group) {
2421 case SPIRV::Extended:
2422 return generateExtInst(Call.get(), MIRBuilder, GR);
2423 case SPIRV::Relational:
2424 return generateRelationalInst(Call.get(), MIRBuilder, GR);
2425 case SPIRV::Group:
2426 return generateGroupInst(Call.get(), MIRBuilder, GR);
2427 case SPIRV::Variable:
2428 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
2429 case SPIRV::Atomic:
2430 return generateAtomicInst(Call.get(), MIRBuilder, GR);
2431 case SPIRV::AtomicFloating:
2432 return generateAtomicFloatingInst(Call.get(), MIRBuilder, GR);
2433 case SPIRV::Barrier:
2434 return generateBarrierInst(Call.get(), MIRBuilder, GR);
2435 case SPIRV::CastToPtr:
2436 return generateCastToPtrInst(Call.get(), MIRBuilder);
2437 case SPIRV::Dot:
2438 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
2439 case SPIRV::Wave:
2440 return generateWaveInst(Call.get(), MIRBuilder, GR);
2441 case SPIRV::GetQuery:
2442 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
2443 case SPIRV::ImageSizeQuery:
2444 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
2445 case SPIRV::ImageMiscQuery:
2446 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
2447 case SPIRV::ReadImage:
2448 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2449 case SPIRV::WriteImage:
2450 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
2451 case SPIRV::SampleImage:
2452 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
2453 case SPIRV::Select:
2454 return generateSelectInst(Call.get(), MIRBuilder);
2455 case SPIRV::Construct:
2456 return generateConstructInst(Call.get(), MIRBuilder, GR);
2457 case SPIRV::SpecConstant:
2458 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
2459 case SPIRV::Enqueue:
2460 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
2461 case SPIRV::AsyncCopy:
2462 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
2463 case SPIRV::Convert:
2464 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
2465 case SPIRV::VectorLoadStore:
2466 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
2467 case SPIRV::LoadStore:
2468 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
2469 case SPIRV::IntelSubgroups:
2470 return generateIntelSubgroupsInst(Call.get(), MIRBuilder, GR);
2471 case SPIRV::GroupUniform:
2472 return generateGroupUniformInst(Call.get(), MIRBuilder, GR);
2473 case SPIRV::KernelClock:
2474 return generateKernelClockInst(Call.get(), MIRBuilder, GR);
2475 case SPIRV::CoopMatr:
2476 return generateCoopMatrInst(Call.get(), MIRBuilder, GR);
2477 }
2478 return false;
2479}
2480
2482 unsigned ArgIdx, LLVMContext &Ctx) {
2483 SmallVector<StringRef, 10> BuiltinArgsTypeStrs;
2484 StringRef BuiltinArgs =
2485 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
2486 BuiltinArgs.split(BuiltinArgsTypeStrs, ',', -1, false);
2487 if (ArgIdx >= BuiltinArgsTypeStrs.size())
2488 return nullptr;
2489 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
2490
2491 // Parse strings representing OpenCL builtin types.
2492 if (hasBuiltinTypePrefix(TypeStr)) {
2493 // OpenCL builtin types in demangled call strings have the following format:
2494 // e.g. ocl_image2d_ro
2495 [[maybe_unused]] bool IsOCLBuiltinType = TypeStr.consume_front("ocl_");
2496 assert(IsOCLBuiltinType && "Invalid OpenCL builtin prefix");
2497
2498 // Check if this is pointer to a builtin type and not just pointer
2499 // representing a builtin type. In case it is a pointer to builtin type,
2500 // this will require additional handling in the method calling
2501 // parseBuiltinCallArgumentBaseType(...) as this function only retrieves the
2502 // base types.
2503 if (TypeStr.ends_with("*"))
2504 TypeStr = TypeStr.slice(0, TypeStr.find_first_of(" *"));
2505
2506 return parseBuiltinTypeNameToTargetExtType("opencl." + TypeStr.str() + "_t",
2507 Ctx);
2508 }
2509
2510 // Parse type name in either "typeN" or "type vector[N]" format, where
2511 // N is the number of elements of the vector.
2512 Type *BaseType;
2513 unsigned VecElts = 0;
2514
2515 BaseType = parseBasicTypeName(TypeStr, Ctx);
2516 if (!BaseType)
2517 // Unable to recognize SPIRV type name.
2518 return nullptr;
2519
2520 if (BaseType->isVoidTy())
2522
2523 // Handle "typeN*" or "type vector[N]*".
2524 TypeStr.consume_back("*");
2525
2526 if (TypeStr.consume_front(" vector["))
2527 TypeStr = TypeStr.substr(0, TypeStr.find(']'));
2528
2529 TypeStr.getAsInteger(10, VecElts);
2530 if (VecElts > 0)
2531 BaseType = VectorType::get(BaseType, VecElts, false);
2532
2533 return BaseType;
2534}
2535
2539};
2540
2541#define GET_BuiltinTypes_DECL
2542#define GET_BuiltinTypes_IMPL
2543
2547};
2548
2549#define GET_OpenCLTypes_DECL
2550#define GET_OpenCLTypes_IMPL
2551
2552#include "SPIRVGenTables.inc"
2553} // namespace SPIRV
2554
2555//===----------------------------------------------------------------------===//
2556// Misc functions for parsing builtin types.
2557//===----------------------------------------------------------------------===//
2558
2560 if (Name.starts_with("void"))
2561 return Type::getVoidTy(Context);
2562 else if (Name.starts_with("int") || Name.starts_with("uint"))
2563 return Type::getInt32Ty(Context);
2564 else if (Name.starts_with("float"))
2565 return Type::getFloatTy(Context);
2566 else if (Name.starts_with("half"))
2567 return Type::getHalfTy(Context);
2568 report_fatal_error("Unable to recognize type!");
2569}
2570
2571//===----------------------------------------------------------------------===//
2572// Implementation functions for builtin types.
2573//===----------------------------------------------------------------------===//
2574
2576 const SPIRV::BuiltinType *TypeRecord,
2577 MachineIRBuilder &MIRBuilder,
2578 SPIRVGlobalRegistry *GR) {
2579 unsigned Opcode = TypeRecord->Opcode;
2580 // Create or get an existing type from GlobalRegistry.
2581 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
2582}
2583
2585 SPIRVGlobalRegistry *GR) {
2586 // Create or get an existing type from GlobalRegistry.
2587 return GR->getOrCreateOpTypeSampler(MIRBuilder);
2588}
2589
2590static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
2591 MachineIRBuilder &MIRBuilder,
2592 SPIRVGlobalRegistry *GR) {
2593 assert(ExtensionType->getNumIntParameters() == 1 &&
2594 "Invalid number of parameters for SPIR-V pipe builtin!");
2595 // Create or get an existing type from GlobalRegistry.
2596 return GR->getOrCreateOpTypePipe(MIRBuilder,
2597 SPIRV::AccessQualifier::AccessQualifier(
2598 ExtensionType->getIntParameter(0)));
2599}
2600
2601static SPIRVType *getCoopMatrType(const TargetExtType *ExtensionType,
2602 MachineIRBuilder &MIRBuilder,
2603 SPIRVGlobalRegistry *GR) {
2604 assert(ExtensionType->getNumIntParameters() == 4 &&
2605 "Invalid number of parameters for SPIR-V coop matrices builtin!");
2606 assert(ExtensionType->getNumTypeParameters() == 1 &&
2607 "SPIR-V coop matrices builtin type must have a type parameter!");
2608 const SPIRVType *ElemType =
2609 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2610 // Create or get an existing type from GlobalRegistry.
2611 return GR->getOrCreateOpTypeCoopMatr(
2612 MIRBuilder, ExtensionType, ElemType, ExtensionType->getIntParameter(0),
2613 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2614 ExtensionType->getIntParameter(3));
2615}
2616
2617static SPIRVType *
2618getImageType(const TargetExtType *ExtensionType,
2619 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2620 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
2621 assert(ExtensionType->getNumTypeParameters() == 1 &&
2622 "SPIR-V image builtin type must have sampled type parameter!");
2623 const SPIRVType *SampledType =
2624 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2625 assert(ExtensionType->getNumIntParameters() == 7 &&
2626 "Invalid number of parameters for SPIR-V image builtin!");
2627 // Create or get an existing type from GlobalRegistry.
2628 return GR->getOrCreateOpTypeImage(
2629 MIRBuilder, SampledType,
2630 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2631 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2632 ExtensionType->getIntParameter(3), ExtensionType->getIntParameter(4),
2633 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2634 Qualifier == SPIRV::AccessQualifier::WriteOnly
2635 ? SPIRV::AccessQualifier::WriteOnly
2636 : SPIRV::AccessQualifier::AccessQualifier(
2637 ExtensionType->getIntParameter(6)));
2638}
2639
2641 MachineIRBuilder &MIRBuilder,
2642 SPIRVGlobalRegistry *GR) {
2643 SPIRVType *OpaqueImageType = getImageType(
2644 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2645 // Create or get an existing type from GlobalRegistry.
2646 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
2647}
2648
2649namespace SPIRV {
2651 LLVMContext &Context) {
2652 StringRef NameWithParameters = TypeName;
2653
2654 // Pointers-to-opaque-structs representing OpenCL types are first translated
2655 // to equivalent SPIR-V types. OpenCL builtin type names should have the
2656 // following format: e.g. %opencl.event_t
2657 if (NameWithParameters.starts_with("opencl.")) {
2658 const SPIRV::OpenCLType *OCLTypeRecord =
2659 SPIRV::lookupOpenCLType(NameWithParameters);
2660 if (!OCLTypeRecord)
2661 report_fatal_error("Missing TableGen record for OpenCL type: " +
2662 NameWithParameters);
2663 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
2664 // Continue with the SPIR-V builtin type...
2665 }
2666
2667 // Names of the opaque structs representing a SPIR-V builtins without
2668 // parameters should have the following format: e.g. %spirv.Event
2669 assert(NameWithParameters.starts_with("spirv.") &&
2670 "Unknown builtin opaque type!");
2671
2672 // Parameterized SPIR-V builtins names follow this format:
2673 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
2674 if (!NameWithParameters.contains('_'))
2675 return TargetExtType::get(Context, NameWithParameters);
2676
2677 SmallVector<StringRef> Parameters;
2678 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
2679 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
2680
2681 SmallVector<Type *, 1> TypeParameters;
2682 bool HasTypeParameter = !isDigit(Parameters[0][0]);
2683 if (HasTypeParameter)
2684 TypeParameters.push_back(parseTypeString(Parameters[0], Context));
2685 SmallVector<unsigned> IntParameters;
2686 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2687 unsigned IntParameter = 0;
2688 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2689 (void)ValidLiteral;
2690 assert(ValidLiteral &&
2691 "Invalid format of SPIR-V builtin parameter literal!");
2692 IntParameters.push_back(IntParameter);
2693 }
2694 return TargetExtType::get(Context,
2695 NameWithParameters.substr(0, BaseNameLength),
2696 TypeParameters, IntParameters);
2697}
2698
2700 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2701 MachineIRBuilder &MIRBuilder,
2702 SPIRVGlobalRegistry *GR) {
2703 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
2704 // target(...) target extension types or pointers-to-opaque-structs. The
2705 // approach relying on structs is deprecated and works only in the non-opaque
2706 // pointer mode (-opaque-pointers=0).
2707 // In order to maintain compatibility with LLVM IR generated by older versions
2708 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
2709 // "translated" to target extension types. This translation is temporary and
2710 // will be removed in the future release of LLVM.
2711 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
2712 if (!BuiltinType)
2714 OpaqueType->getStructName().str(), MIRBuilder.getContext());
2715
2716 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
2717
2718 const StringRef Name = BuiltinType->getName();
2719 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
2720
2721 // Lookup the demangled builtin type in the TableGen records.
2722 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2723 if (!TypeRecord)
2724 report_fatal_error("Missing TableGen record for builtin type: " + Name);
2725
2726 // "Lower" the BuiltinType into TargetType. The following get<...>Type methods
2727 // use the implementation details from TableGen records or TargetExtType
2728 // parameters to either create a new OpType<...> machine instruction or get an
2729 // existing equivalent SPIRVType from GlobalRegistry.
2730 SPIRVType *TargetType;
2731 switch (TypeRecord->Opcode) {
2732 case SPIRV::OpTypeImage:
2733 TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR);
2734 break;
2735 case SPIRV::OpTypePipe:
2736 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
2737 break;
2738 case SPIRV::OpTypeDeviceEvent:
2739 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
2740 break;
2741 case SPIRV::OpTypeSampler:
2742 TargetType = getSamplerType(MIRBuilder, GR);
2743 break;
2744 case SPIRV::OpTypeSampledImage:
2745 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
2746 break;
2747 case SPIRV::OpTypeCooperativeMatrixKHR:
2748 TargetType = getCoopMatrType(BuiltinType, MIRBuilder, GR);
2749 break;
2750 default:
2751 TargetType =
2752 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
2753 break;
2754 }
2755
2756 // Emit OpName instruction if a new OpType<...> instruction was added
2757 // (equivalent type was not found in GlobalRegistry).
2758 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
2759 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
2760
2761 return TargetType;
2762}
2763} // namespace SPIRV
2764} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
return RetTy
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
This file contains some functions that are useful when dealing with strings.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
APInt bitcastToAPInt() const
Definition: APFloat.h:1254
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:982
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:213
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1499
This class represents an incoming formal argument to a Function.
Definition: Argument.h:31
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:647
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:782
@ ICMP_EQ
equal
Definition: InstrTypes.h:778
const APFloat & getValueAPF() const
Definition: Constants.h:312
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:146
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Tagged union holding either a T or a Error.
Definition: Error.h:481
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:539
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:358
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:278
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:64
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:57
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:100
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:69
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:569
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:579
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, MachineFunction &MF)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
SPIRVType * assignTypeToVReg(const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr, bool EmitIR=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:586
void push_back(const T &Elt)
Definition: SmallVector.h:426
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1209
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:693
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
Definition: StringRef.h:648
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition: StringRef.h:463
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:223
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:564
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition: StringRef.h:258
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:429
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:677
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:417
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
Definition: StringRef.h:628
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition: StringRef.h:370
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:290
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
Definition: StringRef.h:270
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:44
StringSwitch & EndsWith(StringLiteral S, T Value)
Definition: StringSwitch.h:76
Class to represent struct types.
Definition: DerivedTypes.h:216
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
Definition: Type.cpp:632
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:513
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:720
unsigned getNumIntParameters() const
Definition: DerivedTypes.h:765
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types=std::nullopt, ArrayRef< unsigned > Ints=std::nullopt)
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:796
Type * getTypeParameter(unsigned i) const
Definition: DerivedTypes.h:755
unsigned getNumTypeParameters() const
Definition: DerivedTypes.h:756
unsigned getIntParameter(unsigned i) const
Definition: DerivedTypes.h:764
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
LLVM Value Representation.
Definition: Value.h:74
Value(Type *Ty, unsigned scid)
Definition: Value.cpp:53
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
Definition: Type.cpp:676
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:353
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition: Core.cpp:884
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
StorageClass
Definition: XCOFF.h:170
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:100
unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:166
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildConstantIntReg(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, unsigned BitWidth=32)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, bool hasLinkageTy=true)
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:80
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:273
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:218
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:117
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:167
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
Definition: SPIRVUtils.cpp:404
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:254
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:191
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool hasBuiltinTypePrefix(StringRef Name)
Definition: SPIRVUtils.cpp:376
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:285
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:279
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:271
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
BuiltIn::BuiltIn Value
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode