LLVM 17.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVUtils.h"
18#include "llvm/IR/IntrinsicsSPIRV.h"
19#include <string>
20#include <tuple>
21
22#define DEBUG_TYPE "spirv-builtins"
23
24namespace llvm {
25namespace SPIRV {
26#define GET_BuiltinGroup_DECL
27#include "SPIRVGenTables.inc"
28
31 InstructionSet::InstructionSet Set;
32 BuiltinGroup Group;
33 uint8_t MinNumArgs;
34 uint8_t MaxNumArgs;
35};
36
37#define GET_DemangledBuiltins_DECL
38#define GET_DemangledBuiltins_IMPL
39
41 const std::string BuiltinName;
43
47
54};
55
58 InstructionSet::InstructionSet Set;
60};
61
62#define GET_NativeBuiltins_DECL
63#define GET_NativeBuiltins_IMPL
64
69 bool IsElect;
79};
80
81#define GET_GroupBuiltins_DECL
82#define GET_GroupBuiltins_IMPL
83
84struct GetBuiltin {
86 InstructionSet::InstructionSet Set;
87 BuiltIn::BuiltIn Value;
88};
89
90using namespace BuiltIn;
91#define GET_GetBuiltins_DECL
92#define GET_GetBuiltins_IMPL
93
96 InstructionSet::InstructionSet Set;
98};
99
100#define GET_ImageQueryBuiltins_DECL
101#define GET_ImageQueryBuiltins_IMPL
102
105 InstructionSet::InstructionSet Set;
109 FPRoundingMode::FPRoundingMode RoundingMode;
110};
111
114 InstructionSet::InstructionSet Set;
117 FPRoundingMode::FPRoundingMode RoundingMode;
118};
119
120using namespace FPRoundingMode;
121#define GET_ConvertBuiltins_DECL
122#define GET_ConvertBuiltins_IMPL
123
124using namespace InstructionSet;
125#define GET_VectorLoadStoreBuiltins_DECL
126#define GET_VectorLoadStoreBuiltins_IMPL
127
128#define GET_CLMemoryScope_DECL
129#define GET_CLSamplerAddressingMode_DECL
130#define GET_CLMemoryFenceFlags_DECL
131#define GET_ExtendedBuiltins_DECL
132#include "SPIRVGenTables.inc"
133} // namespace SPIRV
134
135//===----------------------------------------------------------------------===//
136// Misc functions for looking up builtins and veryfying requirements using
137// TableGen records
138//===----------------------------------------------------------------------===//
139
140/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
141/// the provided \p DemangledCall and specified \p Set.
142///
143/// The lookup follows the following algorithm, returning the first successful
144/// match:
145/// 1. Search with the plain demangled name (expecting a 1:1 match).
146/// 2. Search with the prefix before or suffix after the demangled name
147/// signyfying the type of the first argument.
148///
149/// \returns Wrapper around the demangled call and found builtin definition.
150static std::unique_ptr<const SPIRV::IncomingCall>
152 SPIRV::InstructionSet::InstructionSet Set,
153 Register ReturnRegister, const SPIRVType *ReturnType,
155 // Extract the builtin function name and types of arguments from the call
156 // skeleton.
157 std::string BuiltinName =
158 DemangledCall.substr(0, DemangledCall.find('(')).str();
159
160 // Check if the extracted name contains type information between angle
161 // brackets. If so, the builtin is an instantiated template - needs to have
162 // the information after angle brackets and return type removed.
163 if (BuiltinName.find('<') && BuiltinName.back() == '>') {
164 BuiltinName = BuiltinName.substr(0, BuiltinName.find('<'));
165 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(" ") + 1);
166 }
167
168 // Check if the extracted name begins with "__spirv_ImageSampleExplicitLod"
169 // contains return type information at the end "_R<type>", if so extract the
170 // plain builtin name without the type information.
171 if (StringRef(BuiltinName).contains("__spirv_ImageSampleExplicitLod") &&
172 StringRef(BuiltinName).contains("_R")) {
173 BuiltinName = BuiltinName.substr(0, BuiltinName.find("_R"));
174 }
175
176 SmallVector<StringRef, 10> BuiltinArgumentTypes;
177 StringRef BuiltinArgs =
178 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
179 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
180
181 // Look up the builtin in the defined set. Start with the plain demangled
182 // name, expecting a 1:1 match in the defined builtin set.
183 const SPIRV::DemangledBuiltin *Builtin;
184 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
185 return std::make_unique<SPIRV::IncomingCall>(
186 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
187
188 // If the initial look up was unsuccessful and the demangled call takes at
189 // least 1 argument, add a prefix or suffix signifying the type of the first
190 // argument and repeat the search.
191 if (BuiltinArgumentTypes.size() >= 1) {
192 char FirstArgumentType = BuiltinArgumentTypes[0][0];
193 // Prefix to be added to the builtin's name for lookup.
194 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
195 std::string Prefix;
196
197 switch (FirstArgumentType) {
198 // Unsigned:
199 case 'u':
200 if (Set == SPIRV::InstructionSet::OpenCL_std)
201 Prefix = "u_";
202 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
203 Prefix = "u";
204 break;
205 // Signed:
206 case 'c':
207 case 's':
208 case 'i':
209 case 'l':
210 if (Set == SPIRV::InstructionSet::OpenCL_std)
211 Prefix = "s_";
212 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
213 Prefix = "s";
214 break;
215 // Floating-point:
216 case 'f':
217 case 'd':
218 case 'h':
219 if (Set == SPIRV::InstructionSet::OpenCL_std ||
220 Set == SPIRV::InstructionSet::GLSL_std_450)
221 Prefix = "f";
222 break;
223 }
224
225 // If argument-type name prefix was added, look up the builtin again.
226 if (!Prefix.empty() &&
227 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
228 return std::make_unique<SPIRV::IncomingCall>(
229 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
230
231 // If lookup with a prefix failed, find a suffix to be added to the
232 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
233 // an unsigned value has a suffix "u".
234 std::string Suffix;
235
236 switch (FirstArgumentType) {
237 // Unsigned:
238 case 'u':
239 Suffix = "u";
240 break;
241 // Signed:
242 case 'c':
243 case 's':
244 case 'i':
245 case 'l':
246 Suffix = "s";
247 break;
248 // Floating-point:
249 case 'f':
250 case 'd':
251 case 'h':
252 Suffix = "f";
253 break;
254 }
255
256 // If argument-type name suffix was added, look up the builtin again.
257 if (!Suffix.empty() &&
258 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
259 return std::make_unique<SPIRV::IncomingCall>(
260 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
261 }
262
263 // No builtin with such name was found in the set.
264 return nullptr;
265}
266
267//===----------------------------------------------------------------------===//
268// Helper functions for building misc instructions
269//===----------------------------------------------------------------------===//
270
271/// Helper function building either a resulting scalar or vector bool register
272/// depending on the expected \p ResultType.
273///
274/// \returns Tuple of the resulting register and its type.
275static std::tuple<Register, SPIRVType *>
276buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
278 LLT Type;
279 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
280
281 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
282 unsigned VectorElements = ResultType->getOperand(2).getImm();
283 BoolType =
284 GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder);
286 cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
287 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
288 } else {
289 Type = LLT::scalar(1);
290 }
291
292 Register ResultRegister =
294 MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::IDRegClass);
295 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
296 return std::make_tuple(ResultRegister, BoolType);
297}
298
299/// Helper function for building either a vector or scalar select instruction
300/// depending on the expected \p ResultType.
301static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
302 Register ReturnRegister, Register SourceRegister,
303 const SPIRVType *ReturnType,
305 Register TrueConst, FalseConst;
306
307 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
308 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
310 TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType);
311 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType);
312 } else {
313 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType);
314 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType);
315 }
316 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
317 FalseConst);
318}
319
320/// Helper function for building a load instruction loading into the
321/// \p DestinationReg.
323 MachineIRBuilder &MIRBuilder,
324 SPIRVGlobalRegistry *GR, LLT LowLevelType,
325 Register DestinationReg = Register(0)) {
326 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
327 if (!DestinationReg.isValid()) {
328 DestinationReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
329 MRI->setType(DestinationReg, LLT::scalar(32));
330 GR->assignSPIRVTypeToVReg(BaseType, DestinationReg, MIRBuilder.getMF());
331 }
332 // TODO: consider using correct address space and alignment (p0 is canonical
333 // type for selection though).
335 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
336 return DestinationReg;
337}
338
339/// Helper function for building a load instruction for loading a builtin global
340/// variable of \p BuiltinValue value.
342 SPIRVType *VariableType,
344 SPIRV::BuiltIn::BuiltIn BuiltinValue,
345 LLT LLType,
346 Register Reg = Register(0)) {
347 Register NewRegister =
348 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
349 MIRBuilder.getMRI()->setType(NewRegister,
350 LLT::pointer(0, GR->getPointerSize()));
352 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
353 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
354
355 // Set up the global OpVariable with the necessary builtin decorations.
356 Register Variable = GR->buildGlobalVariable(
357 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
358 SPIRV::StorageClass::Input, nullptr, true, true,
359 SPIRV::LinkageType::Import, MIRBuilder, false);
360
361 // Load the value from the global variable.
362 Register LoadedRegister =
363 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
364 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
365 return LoadedRegister;
366}
367
368/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
369/// and its definition, set the new register as a destination of the definition,
370/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
371/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
372/// SPIRVPreLegalizer.cpp.
373extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
374 SPIRVGlobalRegistry *GR,
375 MachineIRBuilder &MIB,
376 MachineRegisterInfo &MRI);
377
378// TODO: Move to TableGen.
379static SPIRV::MemorySemantics::MemorySemantics
380getSPIRVMemSemantics(std::memory_order MemOrder) {
381 switch (MemOrder) {
382 case std::memory_order::memory_order_relaxed:
383 return SPIRV::MemorySemantics::None;
384 case std::memory_order::memory_order_acquire:
385 return SPIRV::MemorySemantics::Acquire;
386 case std::memory_order::memory_order_release:
387 return SPIRV::MemorySemantics::Release;
388 case std::memory_order::memory_order_acq_rel:
389 return SPIRV::MemorySemantics::AcquireRelease;
390 case std::memory_order::memory_order_seq_cst:
391 return SPIRV::MemorySemantics::SequentiallyConsistent;
392 default:
393 llvm_unreachable("Unknown CL memory scope");
394 }
395}
396
397static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
398 switch (ClScope) {
399 case SPIRV::CLMemoryScope::memory_scope_work_item:
400 return SPIRV::Scope::Invocation;
401 case SPIRV::CLMemoryScope::memory_scope_work_group:
402 return SPIRV::Scope::Workgroup;
403 case SPIRV::CLMemoryScope::memory_scope_device:
404 return SPIRV::Scope::Device;
405 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
406 return SPIRV::Scope::CrossDevice;
407 case SPIRV::CLMemoryScope::memory_scope_sub_group:
408 return SPIRV::Scope::Subgroup;
409 }
410 llvm_unreachable("Unknown CL memory scope");
411}
412
415 unsigned BitWidth = 32) {
416 SPIRVType *IntType = GR->getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder);
417 return GR->buildConstantInt(Val, MIRBuilder, IntType);
418}
419
420static Register buildScopeReg(Register CLScopeRegister,
421 SPIRV::Scope::Scope Scope,
422 MachineIRBuilder &MIRBuilder,
425 if (CLScopeRegister.isValid()) {
426 auto CLScope =
427 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
428 Scope = getSPIRVScope(CLScope);
429
430 if (CLScope == static_cast<unsigned>(Scope)) {
431 MRI->setRegClass(CLScopeRegister, &SPIRV::IDRegClass);
432 return CLScopeRegister;
433 }
434 }
435 return buildConstantIntReg(Scope, MIRBuilder, GR);
436}
437
438static Register buildMemSemanticsReg(Register SemanticsRegister,
439 Register PtrRegister, unsigned &Semantics,
440 MachineIRBuilder &MIRBuilder,
442 if (SemanticsRegister.isValid()) {
443 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
444 std::memory_order Order =
445 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
446 Semantics =
447 getSPIRVMemSemantics(Order) |
449
450 if (Order == Semantics) {
451 MRI->setRegClass(SemanticsRegister, &SPIRV::IDRegClass);
452 return SemanticsRegister;
453 }
454 }
455 return buildConstantIntReg(Semantics, MIRBuilder, GR);
456}
457
458/// Helper function for translating atomic init to OpStore.
460 MachineIRBuilder &MIRBuilder) {
461 assert(Call->Arguments.size() == 2 &&
462 "Need 2 arguments for atomic init translation");
463 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
464 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
465 MIRBuilder.buildInstr(SPIRV::OpStore)
466 .addUse(Call->Arguments[0])
467 .addUse(Call->Arguments[1]);
468 return true;
469}
470
471/// Helper function for building an atomic load instruction.
473 MachineIRBuilder &MIRBuilder,
475 Register PtrRegister = Call->Arguments[0];
476 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
477 // TODO: if true insert call to __translate_ocl_memory_sccope before
478 // OpAtomicLoad and the function implementation. We can use Translator's
479 // output for transcoding/atomic_explicit_arguments.cl as an example.
480 Register ScopeRegister;
481 if (Call->Arguments.size() > 1) {
482 ScopeRegister = Call->Arguments[1];
483 MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::IDRegClass);
484 } else
485 ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
486
487 Register MemSemanticsReg;
488 if (Call->Arguments.size() > 2) {
489 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
490 MemSemanticsReg = Call->Arguments[2];
491 MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
492 } else {
493 int Semantics =
494 SPIRV::MemorySemantics::SequentiallyConsistent |
496 MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
497 }
498
499 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
500 .addDef(Call->ReturnRegister)
501 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
502 .addUse(PtrRegister)
503 .addUse(ScopeRegister)
504 .addUse(MemSemanticsReg);
505 return true;
506}
507
508/// Helper function for building an atomic store instruction.
510 MachineIRBuilder &MIRBuilder,
512 Register ScopeRegister =
513 buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
514 Register PtrRegister = Call->Arguments[0];
515 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
516 int Semantics =
517 SPIRV::MemorySemantics::SequentiallyConsistent |
519 Register MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
520 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
521 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
522 .addUse(PtrRegister)
523 .addUse(ScopeRegister)
524 .addUse(MemSemanticsReg)
525 .addUse(Call->Arguments[1]);
526 return true;
527}
528
529/// Helper function for building an atomic compare-exchange instruction.
531 MachineIRBuilder &MIRBuilder,
533 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
534 unsigned Opcode =
535 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
536 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
537 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
538
539 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
540 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
541 Register Desired = Call->Arguments[2]; // Value (C Desired).
542 MRI->setRegClass(ObjectPtr, &SPIRV::IDRegClass);
543 MRI->setRegClass(ExpectedArg, &SPIRV::IDRegClass);
544 MRI->setRegClass(Desired, &SPIRV::IDRegClass);
545 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
546 LLT DesiredLLT = MRI->getType(Desired);
547
548 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
549 SPIRV::OpTypePointer);
550 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
551 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
552 : ExpectedType == SPIRV::OpTypePointer);
553 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
554
555 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
556 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
557 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
558 SpvObjectPtrTy->getOperand(1).getImm());
559 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
560
561 Register MemSemEqualReg;
562 Register MemSemUnequalReg;
563 uint64_t MemSemEqual =
564 IsCmpxchg
565 ? SPIRV::MemorySemantics::None
566 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
567 uint64_t MemSemUnequal =
568 IsCmpxchg
569 ? SPIRV::MemorySemantics::None
570 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
571 if (Call->Arguments.size() >= 4) {
572 assert(Call->Arguments.size() >= 5 &&
573 "Need 5+ args for explicit atomic cmpxchg");
574 auto MemOrdEq =
575 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
576 auto MemOrdNeq =
577 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
578 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
579 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
580 if (MemOrdEq == MemSemEqual)
581 MemSemEqualReg = Call->Arguments[3];
582 if (MemOrdNeq == MemSemEqual)
583 MemSemUnequalReg = Call->Arguments[4];
584 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
585 MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass);
586 }
587 if (!MemSemEqualReg.isValid())
588 MemSemEqualReg = buildConstantIntReg(MemSemEqual, MIRBuilder, GR);
589 if (!MemSemUnequalReg.isValid())
590 MemSemUnequalReg = buildConstantIntReg(MemSemUnequal, MIRBuilder, GR);
591
592 Register ScopeReg;
593 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
594 if (Call->Arguments.size() >= 6) {
595 assert(Call->Arguments.size() == 6 &&
596 "Extra args for explicit atomic cmpxchg");
597 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
598 getIConstVal(Call->Arguments[5], MRI));
599 Scope = getSPIRVScope(ClScope);
600 if (ClScope == static_cast<unsigned>(Scope))
601 ScopeReg = Call->Arguments[5];
602 MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass);
603 }
604 if (!ScopeReg.isValid())
605 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
606
607 Register Expected = IsCmpxchg
608 ? ExpectedArg
609 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
610 GR, LLT::scalar(32));
611 MRI->setType(Expected, DesiredLLT);
612 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
613 : Call->ReturnRegister;
614 if (!MRI->getRegClassOrNull(Tmp))
615 MRI->setRegClass(Tmp, &SPIRV::IDRegClass);
616 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
617
618 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
619 MIRBuilder.buildInstr(Opcode)
620 .addDef(Tmp)
621 .addUse(GR->getSPIRVTypeID(IntTy))
622 .addUse(ObjectPtr)
623 .addUse(ScopeReg)
624 .addUse(MemSemEqualReg)
625 .addUse(MemSemUnequalReg)
626 .addUse(Desired)
628 if (!IsCmpxchg) {
629 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
630 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
631 }
632 return true;
633}
634
635/// Helper function for building an atomic load instruction.
636static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
637 MachineIRBuilder &MIRBuilder,
639 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
640 Register ScopeRegister =
641 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
642
643 assert(Call->Arguments.size() <= 4 &&
644 "Too many args for explicit atomic RMW");
645 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
646 MIRBuilder, GR, MRI);
647
648 Register PtrRegister = Call->Arguments[0];
649 unsigned Semantics = SPIRV::MemorySemantics::None;
650 MRI->setRegClass(PtrRegister, &SPIRV::IDRegClass);
651 Register MemSemanticsReg =
652 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
653 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
654 Semantics, MIRBuilder, GR);
655 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
656 MIRBuilder.buildInstr(Opcode)
657 .addDef(Call->ReturnRegister)
658 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
659 .addUse(PtrRegister)
660 .addUse(ScopeRegister)
661 .addUse(MemSemanticsReg)
662 .addUse(Call->Arguments[1]);
663 return true;
664}
665
666/// Helper function for building atomic flag instructions (e.g.
667/// OpAtomicFlagTestAndSet).
669 unsigned Opcode, MachineIRBuilder &MIRBuilder,
671 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
672 Register PtrRegister = Call->Arguments[0];
673 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
674 Register MemSemanticsReg =
675 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
676 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
677 Semantics, MIRBuilder, GR);
678
679 assert((Opcode != SPIRV::OpAtomicFlagClear ||
680 (Semantics != SPIRV::MemorySemantics::Acquire &&
681 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
682 "Invalid memory order argument!");
683
684 Register ScopeRegister =
685 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
686 ScopeRegister =
687 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
688
689 auto MIB = MIRBuilder.buildInstr(Opcode);
690 if (Opcode == SPIRV::OpAtomicFlagTestAndSet)
691 MIB.addDef(Call->ReturnRegister)
692 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
693
694 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
695 return true;
696}
697
698/// Helper function for building barriers, i.e., memory/control ordering
699/// operations.
700static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
701 MachineIRBuilder &MIRBuilder,
703 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
704 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
705 unsigned MemSemantics = SPIRV::MemorySemantics::None;
706
707 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
708 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
709
710 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
711 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
712
713 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
714 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
715
716 if (Opcode == SPIRV::OpMemoryBarrier) {
717 std::memory_order MemOrder =
718 static_cast<std::memory_order>(getIConstVal(Call->Arguments[1], MRI));
719 MemSemantics = getSPIRVMemSemantics(MemOrder) | MemSemantics;
720 } else {
721 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
722 }
723
724 Register MemSemanticsReg;
725 if (MemFlags == MemSemantics) {
726 MemSemanticsReg = Call->Arguments[0];
727 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
728 } else
729 MemSemanticsReg = buildConstantIntReg(MemSemantics, MIRBuilder, GR);
730
731 Register ScopeReg;
732 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
733 SPIRV::Scope::Scope MemScope = Scope;
734 if (Call->Arguments.size() >= 2) {
735 assert(
736 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
737 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
738 "Extra args for explicitly scoped barrier");
739 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
740 : Call->Arguments[1];
741 SPIRV::CLMemoryScope CLScope =
742 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
743 MemScope = getSPIRVScope(CLScope);
744 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
745 (Opcode == SPIRV::OpMemoryBarrier))
746 Scope = MemScope;
747
748 if (CLScope == static_cast<unsigned>(Scope)) {
749 ScopeReg = Call->Arguments[1];
750 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
751 }
752 }
753
754 if (!ScopeReg.isValid())
755 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
756
757 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
758 if (Opcode != SPIRV::OpMemoryBarrier)
759 MIB.addUse(buildConstantIntReg(MemScope, MIRBuilder, GR));
760 MIB.addUse(MemSemanticsReg);
761 return true;
762}
763
764static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
765 switch (dim) {
766 case SPIRV::Dim::DIM_1D:
767 case SPIRV::Dim::DIM_Buffer:
768 return 1;
769 case SPIRV::Dim::DIM_2D:
770 case SPIRV::Dim::DIM_Cube:
771 case SPIRV::Dim::DIM_Rect:
772 return 2;
773 case SPIRV::Dim::DIM_3D:
774 return 3;
775 default:
776 llvm_unreachable("Cannot get num components for given Dim");
777 }
778}
779
780/// Helper function for obtaining the number of size components.
781static unsigned getNumSizeComponents(SPIRVType *imgType) {
782 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
783 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
784 unsigned numComps = getNumComponentsForDim(dim);
785 bool arrayed = imgType->getOperand(4).getImm() == 1;
786 return arrayed ? numComps + 1 : numComps;
787}
788
789//===----------------------------------------------------------------------===//
790// Implementation functions for each builtin group
791//===----------------------------------------------------------------------===//
792
793static bool generateExtInst(const SPIRV::IncomingCall *Call,
794 MachineIRBuilder &MIRBuilder,
796 // Lookup the extended instruction number in the TableGen records.
797 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
799 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
800
801 // Build extended instruction.
802 auto MIB =
803 MIRBuilder.buildInstr(SPIRV::OpExtInst)
804 .addDef(Call->ReturnRegister)
805 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
806 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
807 .addImm(Number);
808
809 for (auto Argument : Call->Arguments)
810 MIB.addUse(Argument);
811 return true;
812}
813
815 MachineIRBuilder &MIRBuilder,
817 // Lookup the instruction opcode in the TableGen records.
818 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
819 unsigned Opcode =
820 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
821
822 Register CompareRegister;
823 SPIRVType *RelationType;
824 std::tie(CompareRegister, RelationType) =
825 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
826
827 // Build relational instruction.
828 auto MIB = MIRBuilder.buildInstr(Opcode)
829 .addDef(CompareRegister)
830 .addUse(GR->getSPIRVTypeID(RelationType));
831
832 for (auto Argument : Call->Arguments)
833 MIB.addUse(Argument);
834
835 // Build select instruction.
836 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
837 Call->ReturnType, GR);
838}
839
841 MachineIRBuilder &MIRBuilder,
843 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
844 const SPIRV::GroupBuiltin *GroupBuiltin =
845 SPIRV::lookupGroupBuiltin(Builtin->Name);
846 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
847 Register Arg0;
848 if (GroupBuiltin->HasBoolArg) {
849 Register ConstRegister = Call->Arguments[0];
850 auto ArgInstruction = getDefInstrMaybeConstant(ConstRegister, MRI);
851 // TODO: support non-constant bool values.
852 assert(ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT &&
853 "Only constant bool value args are supported");
854 if (GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() !=
855 SPIRV::OpTypeBool)
856 Arg0 = GR->buildConstantInt(getIConstVal(ConstRegister, MRI), MIRBuilder,
857 GR->getOrCreateSPIRVBoolType(MIRBuilder));
858 }
859
860 Register GroupResultRegister = Call->ReturnRegister;
861 SPIRVType *GroupResultType = Call->ReturnType;
862
863 // TODO: maybe we need to check whether the result type is already boolean
864 // and in this case do not insert select instruction.
865 const bool HasBoolReturnTy =
866 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
867 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
868 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
869
870 if (HasBoolReturnTy)
871 std::tie(GroupResultRegister, GroupResultType) =
872 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
873
874 auto Scope = Builtin->Name.startswith("sub_group") ? SPIRV::Scope::Subgroup
875 : SPIRV::Scope::Workgroup;
876 Register ScopeRegister = buildConstantIntReg(Scope, MIRBuilder, GR);
877
878 // Build work/sub group instruction.
879 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
880 .addDef(GroupResultRegister)
881 .addUse(GR->getSPIRVTypeID(GroupResultType))
882 .addUse(ScopeRegister);
883
884 if (!GroupBuiltin->NoGroupOperation)
885 MIB.addImm(GroupBuiltin->GroupOperation);
886 if (Call->Arguments.size() > 0) {
887 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
888 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
889 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
890 MIB.addUse(Call->Arguments[i]);
891 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
892 }
893 }
894
895 // Build select instruction.
896 if (HasBoolReturnTy)
897 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
898 Call->ReturnType, GR);
899 return true;
900}
901
902// These queries ask for a single size_t result for a given dimension index, e.g
903// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
904// these values are all vec3 types, so we need to extract the correct index or
905// return defaultVal (0 or 1 depending on the query). We also handle extending
906// or tuncating in case size_t does not match the expected result type's
907// bitwidth.
908//
909// For a constant index >= 3 we generate:
910// %res = OpConstant %SizeT 0
911//
912// For other indices we generate:
913// %g = OpVariable %ptr_V3_SizeT Input
914// OpDecorate %g BuiltIn XXX
915// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
916// OpDecorate %g Constant
917// %loadedVec = OpLoad %V3_SizeT %g
918//
919// Then, if the index is constant < 3, we generate:
920// %res = OpCompositeExtract %SizeT %loadedVec idx
921// If the index is dynamic, we generate:
922// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
923// %cmp = OpULessThan %bool %idx %const_3
924// %res = OpSelect %SizeT %cmp %tmp %const_0
925//
926// If the bitwidth of %res does not match the expected return type, we add an
927// extend or truncate.
929 MachineIRBuilder &MIRBuilder,
931 SPIRV::BuiltIn::BuiltIn BuiltinValue,
932 uint64_t DefaultValue) {
933 Register IndexRegister = Call->Arguments[0];
934 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
935 const unsigned PointerSize = GR->getPointerSize();
936 const SPIRVType *PointerSizeType =
937 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
938 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
939 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
940
941 // Set up the final register to do truncation or extension on at the end.
942 Register ToTruncate = Call->ReturnRegister;
943
944 // If the index is constant, we can statically determine if it is in range.
945 bool IsConstantIndex =
946 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
947
948 // If it's out of range (max dimension is 3), we can just return the constant
949 // default value (0 or 1 depending on which query function).
950 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
951 Register DefaultReg = Call->ReturnRegister;
952 if (PointerSize != ResultWidth) {
953 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
954 MRI->setRegClass(DefaultReg, &SPIRV::IDRegClass);
955 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
956 MIRBuilder.getMF());
957 ToTruncate = DefaultReg;
958 }
959 auto NewRegister =
960 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
961 MIRBuilder.buildCopy(DefaultReg, NewRegister);
962 } else { // If it could be in range, we need to load from the given builtin.
963 auto Vec3Ty =
964 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder);
965 Register LoadedVector =
966 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
967 LLT::fixed_vector(3, PointerSize));
968 // Set up the vreg to extract the result to (possibly a new temporary one).
969 Register Extracted = Call->ReturnRegister;
970 if (!IsConstantIndex || PointerSize != ResultWidth) {
971 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
972 MRI->setRegClass(Extracted, &SPIRV::IDRegClass);
973 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
974 }
975 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
976 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
977 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
978 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true);
979 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
980
981 // If the index is dynamic, need check if it's < 3, and then use a select.
982 if (!IsConstantIndex) {
983 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
984 *MRI);
985
986 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
987 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
988
989 Register CompareRegister =
990 MRI->createGenericVirtualRegister(LLT::scalar(1));
991 MRI->setRegClass(CompareRegister, &SPIRV::IDRegClass);
992 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
993
994 // Use G_ICMP to check if idxVReg < 3.
995 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
996 GR->buildConstantInt(3, MIRBuilder, IndexType));
997
998 // Get constant for the default value (0 or 1 depending on which
999 // function).
1000 Register DefaultRegister =
1001 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1002
1003 // Get a register for the selection result (possibly a new temporary one).
1004 Register SelectionResult = Call->ReturnRegister;
1005 if (PointerSize != ResultWidth) {
1006 SelectionResult =
1007 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1008 MRI->setRegClass(SelectionResult, &SPIRV::IDRegClass);
1009 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1010 MIRBuilder.getMF());
1011 }
1012 // Create the final G_SELECT to return the extracted value or the default.
1013 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1014 DefaultRegister);
1015 ToTruncate = SelectionResult;
1016 } else {
1017 ToTruncate = Extracted;
1018 }
1019 }
1020 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1021 if (PointerSize != ResultWidth)
1022 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1023 return true;
1024}
1025
1027 MachineIRBuilder &MIRBuilder,
1028 SPIRVGlobalRegistry *GR) {
1029 // Lookup the builtin variable record.
1030 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1031 SPIRV::BuiltIn::BuiltIn Value =
1032 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1033
1034 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1035 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1036
1037 // Build a load instruction for the builtin variable.
1038 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1039 LLT LLType;
1040 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1041 LLType =
1042 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1043 else
1044 LLType = LLT::scalar(BitWidth);
1045
1046 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1047 LLType, Call->ReturnRegister);
1048}
1049
1051 MachineIRBuilder &MIRBuilder,
1052 SPIRVGlobalRegistry *GR) {
1053 // Lookup the instruction opcode in the TableGen records.
1054 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1055 unsigned Opcode =
1056 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1057
1058 switch (Opcode) {
1059 case SPIRV::OpStore:
1060 return buildAtomicInitInst(Call, MIRBuilder);
1061 case SPIRV::OpAtomicLoad:
1062 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1063 case SPIRV::OpAtomicStore:
1064 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1065 case SPIRV::OpAtomicCompareExchange:
1066 case SPIRV::OpAtomicCompareExchangeWeak:
1067 return buildAtomicCompareExchangeInst(Call, MIRBuilder, GR);
1068 case SPIRV::OpAtomicIAdd:
1069 case SPIRV::OpAtomicISub:
1070 case SPIRV::OpAtomicOr:
1071 case SPIRV::OpAtomicXor:
1072 case SPIRV::OpAtomicAnd:
1073 case SPIRV::OpAtomicExchange:
1074 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1075 case SPIRV::OpMemoryBarrier:
1076 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1077 case SPIRV::OpAtomicFlagTestAndSet:
1078 case SPIRV::OpAtomicFlagClear:
1079 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1080 default:
1081 return false;
1082 }
1083}
1084
1086 MachineIRBuilder &MIRBuilder,
1087 SPIRVGlobalRegistry *GR) {
1088 // Lookup the instruction opcode in the TableGen records.
1089 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1090 unsigned Opcode =
1091 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1092
1093 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1094}
1095
1097 MachineIRBuilder &MIRBuilder,
1098 SPIRVGlobalRegistry *GR) {
1099 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1100 bool IsVec = Opcode == SPIRV::OpTypeVector;
1101 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1102 MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1103 .addDef(Call->ReturnRegister)
1104 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1105 .addUse(Call->Arguments[0])
1106 .addUse(Call->Arguments[1]);
1107 return true;
1108}
1109
1111 MachineIRBuilder &MIRBuilder,
1112 SPIRVGlobalRegistry *GR) {
1113 // Lookup the builtin record.
1114 SPIRV::BuiltIn::BuiltIn Value =
1115 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1116 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1117 Value == SPIRV::BuiltIn::WorkgroupSize ||
1118 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1119 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1120}
1121
1123 MachineIRBuilder &MIRBuilder,
1124 SPIRVGlobalRegistry *GR) {
1125 // Lookup the image size query component number in the TableGen records.
1126 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1127 uint32_t Component =
1128 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1129 // Query result may either be a vector or a scalar. If return type is not a
1130 // vector, expect only a single size component. Otherwise get the number of
1131 // expected components.
1132 SPIRVType *RetTy = Call->ReturnType;
1133 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1134 ? RetTy->getOperand(2).getImm()
1135 : 1;
1136 // Get the actual number of query result/size components.
1137 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1138 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1139 Register QueryResult = Call->ReturnRegister;
1140 SPIRVType *QueryResultType = Call->ReturnType;
1141 if (NumExpectedRetComponents != NumActualRetComponents) {
1142 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1143 LLT::fixed_vector(NumActualRetComponents, 32));
1144 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::IDRegClass);
1145 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1146 QueryResultType = GR->getOrCreateSPIRVVectorType(
1147 IntTy, NumActualRetComponents, MIRBuilder);
1148 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1149 }
1150 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1151 unsigned Opcode =
1152 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1153 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1154 auto MIB = MIRBuilder.buildInstr(Opcode)
1155 .addDef(QueryResult)
1156 .addUse(GR->getSPIRVTypeID(QueryResultType))
1157 .addUse(Call->Arguments[0]);
1158 if (!IsDimBuf)
1159 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Lod id.
1160 if (NumExpectedRetComponents == NumActualRetComponents)
1161 return true;
1162 if (NumExpectedRetComponents == 1) {
1163 // Only 1 component is expected, build OpCompositeExtract instruction.
1164 unsigned ExtractedComposite =
1165 Component == 3 ? NumActualRetComponents - 1 : Component;
1166 assert(ExtractedComposite < NumActualRetComponents &&
1167 "Invalid composite index!");
1168 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1169 .addDef(Call->ReturnRegister)
1170 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1171 .addUse(QueryResult)
1172 .addImm(ExtractedComposite);
1173 } else {
1174 // More than 1 component is expected, fill a new vector.
1175 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1176 .addDef(Call->ReturnRegister)
1177 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1178 .addUse(QueryResult)
1179 .addUse(QueryResult);
1180 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1181 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1182 }
1183 return true;
1184}
1185
1187 MachineIRBuilder &MIRBuilder,
1188 SPIRVGlobalRegistry *GR) {
1189 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1190 "Image samples query result must be of int type!");
1191
1192 // Lookup the instruction opcode in the TableGen records.
1193 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1194 unsigned Opcode =
1195 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1196
1197 Register Image = Call->Arguments[0];
1198 MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::IDRegClass);
1199 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1200 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1201
1202 switch (Opcode) {
1203 case SPIRV::OpImageQuerySamples:
1204 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1205 "Image must be of 2D dimensionality");
1206 break;
1207 case SPIRV::OpImageQueryLevels:
1208 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1209 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1210 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1211 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1212 "Image must be of 1D/2D/3D/Cube dimensionality");
1213 break;
1214 }
1215
1216 MIRBuilder.buildInstr(Opcode)
1217 .addDef(Call->ReturnRegister)
1218 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1219 .addUse(Image);
1220 return true;
1221}
1222
1223// TODO: Move to TableGen.
1224static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1226 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1227 case SPIRV::CLK_ADDRESS_CLAMP:
1228 return SPIRV::SamplerAddressingMode::Clamp;
1229 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1230 return SPIRV::SamplerAddressingMode::ClampToEdge;
1231 case SPIRV::CLK_ADDRESS_REPEAT:
1232 return SPIRV::SamplerAddressingMode::Repeat;
1233 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1234 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1235 case SPIRV::CLK_ADDRESS_NONE:
1236 return SPIRV::SamplerAddressingMode::None;
1237 default:
1238 llvm_unreachable("Unknown CL address mode");
1239 }
1240}
1241
1242static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
1243 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1244}
1245
1246static SPIRV::SamplerFilterMode::SamplerFilterMode
1248 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1249 return SPIRV::SamplerFilterMode::Linear;
1250 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1251 return SPIRV::SamplerFilterMode::Nearest;
1252 return SPIRV::SamplerFilterMode::Nearest;
1253}
1254
1255static bool generateReadImageInst(const StringRef DemangledCall,
1256 const SPIRV::IncomingCall *Call,
1257 MachineIRBuilder &MIRBuilder,
1258 SPIRVGlobalRegistry *GR) {
1259 Register Image = Call->Arguments[0];
1260 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1261 MRI->setRegClass(Image, &SPIRV::IDRegClass);
1262 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1263 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
1264 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
1265 if (HasOclSampler || HasMsaa)
1266 MRI->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1267 if (HasOclSampler) {
1268 Register Sampler = Call->Arguments[1];
1269
1270 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1271 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
1272 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
1273 Sampler = GR->buildConstantSampler(
1275 getSamplerParamFromBitmask(SamplerMask),
1276 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder,
1277 GR->getSPIRVTypeForVReg(Sampler));
1278 }
1279 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1280 SPIRVType *SampledImageType =
1281 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1282 Register SampledImage = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1283
1284 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1285 .addDef(SampledImage)
1286 .addUse(GR->getSPIRVTypeID(SampledImageType))
1287 .addUse(Image)
1288 .addUse(Sampler);
1289
1291 MIRBuilder);
1292 SPIRVType *TempType = Call->ReturnType;
1293 bool NeedsExtraction = false;
1294 if (TempType->getOpcode() != SPIRV::OpTypeVector) {
1295 TempType =
1296 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1297 NeedsExtraction = true;
1298 }
1299 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(TempType));
1300 Register TempRegister = MRI->createGenericVirtualRegister(LLType);
1301 MRI->setRegClass(TempRegister, &SPIRV::IDRegClass);
1302 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
1303
1304 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1305 .addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister)
1306 .addUse(GR->getSPIRVTypeID(TempType))
1307 .addUse(SampledImage)
1308 .addUse(Call->Arguments[2]) // Coordinate.
1309 .addImm(SPIRV::ImageOperand::Lod)
1310 .addUse(Lod);
1311
1312 if (NeedsExtraction)
1313 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1314 .addDef(Call->ReturnRegister)
1315 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1316 .addUse(TempRegister)
1317 .addImm(0);
1318 } else if (HasMsaa) {
1319 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1320 .addDef(Call->ReturnRegister)
1321 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1322 .addUse(Image)
1323 .addUse(Call->Arguments[1]) // Coordinate.
1324 .addImm(SPIRV::ImageOperand::Sample)
1325 .addUse(Call->Arguments[2]);
1326 } else {
1327 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1328 .addDef(Call->ReturnRegister)
1329 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1330 .addUse(Image)
1331 .addUse(Call->Arguments[1]); // Coordinate.
1332 }
1333 return true;
1334}
1335
1337 MachineIRBuilder &MIRBuilder,
1338 SPIRVGlobalRegistry *GR) {
1339 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1340 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1341 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1342 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1343 .addUse(Call->Arguments[0]) // Image.
1344 .addUse(Call->Arguments[1]) // Coordinate.
1345 .addUse(Call->Arguments[2]); // Texel.
1346 return true;
1347}
1348
1349static bool generateSampleImageInst(const StringRef DemangledCall,
1350 const SPIRV::IncomingCall *Call,
1351 MachineIRBuilder &MIRBuilder,
1352 SPIRVGlobalRegistry *GR) {
1353 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1354 if (Call->Builtin->Name.contains_insensitive(
1355 "__translate_sampler_initializer")) {
1356 // Build sampler literal.
1357 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1358 Register Sampler = GR->buildConstantSampler(
1359 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1361 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1362 return Sampler.isValid();
1363 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1364 // Create OpSampledImage.
1365 Register Image = Call->Arguments[0];
1366 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1367 SPIRVType *SampledImageType =
1368 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1369 Register SampledImage =
1370 Call->ReturnRegister.isValid()
1371 ? Call->ReturnRegister
1372 : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1373 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1374 .addDef(SampledImage)
1375 .addUse(GR->getSPIRVTypeID(SampledImageType))
1376 .addUse(Image)
1377 .addUse(Call->Arguments[1]); // Sampler.
1378 return true;
1379 } else if (Call->Builtin->Name.contains_insensitive(
1380 "__spirv_ImageSampleExplicitLod")) {
1381 // Sample an image using an explicit level of detail.
1382 std::string ReturnType = DemangledCall.str();
1383 if (DemangledCall.contains("_R")) {
1384 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
1385 ReturnType = ReturnType.substr(0, ReturnType.find('('));
1386 }
1387 SPIRVType *Type = GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder);
1388 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1389 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1390 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
1391
1392 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1393 .addDef(Call->ReturnRegister)
1395 .addUse(Call->Arguments[0]) // Image.
1396 .addUse(Call->Arguments[1]) // Coordinate.
1397 .addImm(SPIRV::ImageOperand::Lod)
1398 .addUse(Call->Arguments[3]);
1399 return true;
1400 }
1401 return false;
1402}
1403
1405 MachineIRBuilder &MIRBuilder) {
1406 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1407 Call->Arguments[1], Call->Arguments[2]);
1408 return true;
1409}
1410
1412 MachineIRBuilder &MIRBuilder,
1413 SPIRVGlobalRegistry *GR) {
1414 // Lookup the instruction opcode in the TableGen records.
1415 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1416 unsigned Opcode =
1417 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1418 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1419
1420 switch (Opcode) {
1421 case SPIRV::OpSpecConstant: {
1422 // Build the SpecID decoration.
1423 unsigned SpecId =
1424 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
1425 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
1426 {SpecId});
1427 // Determine the constant MI.
1428 Register ConstRegister = Call->Arguments[1];
1429 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
1430 assert(Const &&
1431 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1432 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
1433 "Argument should be either an int or floating-point constant");
1434 // Determine the opcode and built the OpSpec MI.
1435 const MachineOperand &ConstOperand = Const->getOperand(1);
1436 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
1437 assert(ConstOperand.isCImm() && "Int constant operand is expected");
1438 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
1439 ? SPIRV::OpSpecConstantTrue
1440 : SPIRV::OpSpecConstantFalse;
1441 }
1442 auto MIB = MIRBuilder.buildInstr(Opcode)
1443 .addDef(Call->ReturnRegister)
1444 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1445
1446 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
1447 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
1448 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1449 else
1450 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
1451 }
1452 return true;
1453 }
1454 case SPIRV::OpSpecConstantComposite: {
1455 auto MIB = MIRBuilder.buildInstr(Opcode)
1456 .addDef(Call->ReturnRegister)
1457 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1458 for (unsigned i = 0; i < Call->Arguments.size(); i++)
1459 MIB.addUse(Call->Arguments[i]);
1460 return true;
1461 }
1462 default:
1463 return false;
1464 }
1465}
1466
1467static bool buildNDRange(const SPIRV::IncomingCall *Call,
1468 MachineIRBuilder &MIRBuilder,
1469 SPIRVGlobalRegistry *GR) {
1470 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1471 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1472 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1473 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
1474 PtrType->getOperand(2).isReg());
1475 Register TypeReg = PtrType->getOperand(2).getReg();
1477 MachineFunction &MF = MIRBuilder.getMF();
1478 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1479 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
1480 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
1481 // three other arguments, so pass zero constant on absence.
1482 unsigned NumArgs = Call->Arguments.size();
1483 assert(NumArgs >= 2);
1484 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
1485 MRI->setRegClass(GlobalWorkSize, &SPIRV::IDRegClass);
1486 Register LocalWorkSize =
1487 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
1488 if (LocalWorkSize.isValid())
1489 MRI->setRegClass(LocalWorkSize, &SPIRV::IDRegClass);
1490 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
1491 if (GlobalWorkOffset.isValid())
1492 MRI->setRegClass(GlobalWorkOffset, &SPIRV::IDRegClass);
1493 if (NumArgs < 4) {
1494 Register Const;
1495 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
1496 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
1497 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
1498 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
1499 DefInstr->getOperand(3).isReg());
1500 Register GWSPtr = DefInstr->getOperand(3).getReg();
1501 if (!MRI->getRegClassOrNull(GWSPtr))
1502 MRI->setRegClass(GWSPtr, &SPIRV::IDRegClass);
1503 // TODO: Maybe simplify generation of the type of the fields.
1504 unsigned Size = Call->Builtin->Name.equals("ndrange_3D") ? 3 : 2;
1505 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
1507 Type *FieldTy = ArrayType::get(BaseTy, Size);
1508 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder);
1509 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1510 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
1511 MIRBuilder.buildInstr(SPIRV::OpLoad)
1512 .addDef(GlobalWorkSize)
1513 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
1514 .addUse(GWSPtr);
1515 Const = GR->getOrCreateConsIntArray(0, MIRBuilder, SpvFieldTy);
1516 } else {
1517 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy);
1518 }
1519 if (!LocalWorkSize.isValid())
1520 LocalWorkSize = Const;
1521 if (!GlobalWorkOffset.isValid())
1522 GlobalWorkOffset = Const;
1523 }
1524 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
1525 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
1526 .addDef(TmpReg)
1527 .addUse(TypeReg)
1528 .addUse(GlobalWorkSize)
1529 .addUse(LocalWorkSize)
1530 .addUse(GlobalWorkOffset);
1531 return MIRBuilder.buildInstr(SPIRV::OpStore)
1532 .addUse(Call->Arguments[0])
1533 .addUse(TmpReg);
1534}
1535
1538 // We expect the following sequence of instructions:
1539 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
1540 // or = G_GLOBAL_VALUE @block_literal_global
1541 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
1542 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
1543 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
1544 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
1545 MI->getOperand(1).isReg());
1546 Register BitcastReg = MI->getOperand(1).getReg();
1547 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
1548 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
1549 BitcastMI->getOperand(2).isReg());
1550 Register ValueReg = BitcastMI->getOperand(2).getReg();
1551 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
1552 return ValueMI;
1553}
1554
1555// Return an integer constant corresponding to the given register and
1556// defined in spv_track_constant.
1557// TODO: maybe unify with prelegalizer pass.
1559 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
1560 assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
1561 DefMI->getOperand(2).isReg());
1562 MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
1563 assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
1564 DefMI2->getOperand(1).isCImm());
1565 return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
1566}
1567
1568// Return type of the instruction result from spv_assign_type intrinsic.
1569// TODO: maybe unify with prelegalizer pass.
1571 MachineInstr *NextMI = MI->getNextNode();
1572 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
1573 NextMI = NextMI->getNextNode();
1574 Register ValueReg = MI->getOperand(0).getReg();
1575 if (!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) ||
1576 NextMI->getOperand(1).getReg() != ValueReg)
1577 return nullptr;
1578 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
1579 assert(Ty && "Type is expected");
1580 return getTypedPtrEltType(Ty);
1581}
1582
1583static const Type *getBlockStructType(Register ParamReg,
1585 // In principle, this information should be passed to us from Clang via
1586 // an elementtype attribute. However, said attribute requires that
1587 // the function call be an intrinsic, which is not. Instead, we rely on being
1588 // able to trace this to the declaration of a variable: OpenCL C specification
1589 // section 6.12.5 should guarantee that we can do this.
1590 MachineInstr *MI = getBlockStructInstr(ParamReg, MRI);
1591 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
1592 return getTypedPtrEltType(MI->getOperand(1).getGlobal()->getType());
1593 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
1594 "Blocks in OpenCL C must be traceable to allocation site");
1595 return getMachineInstrType(MI);
1596}
1597
1598// TODO: maybe move to the global register.
1599static SPIRVType *
1601 SPIRVGlobalRegistry *GR) {
1602 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
1603 Type *OpaqueType = StructType::getTypeByName(Context, "spirv.DeviceEvent");
1604 if (!OpaqueType)
1605 OpaqueType = StructType::getTypeByName(Context, "opencl.clk_event_t");
1606 if (!OpaqueType)
1607 OpaqueType = StructType::create(Context, "spirv.DeviceEvent");
1608 unsigned SC0 = storageClassToAddressSpace(SPIRV::StorageClass::Function);
1609 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
1610 Type *PtrType = PointerType::get(PointerType::get(OpaqueType, SC0), SC1);
1611 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder);
1612}
1613
1615 MachineIRBuilder &MIRBuilder,
1616 SPIRVGlobalRegistry *GR) {
1617 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1618 const DataLayout &DL = MIRBuilder.getDataLayout();
1619 bool HasEvents = Call->Builtin->Name.find("events") != StringRef::npos;
1620 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1621
1622 // Make vararg instructions before OpEnqueueKernel.
1623 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
1624 // local size operands as an array, so we need to unpack them.
1625 SmallVector<Register, 16> LocalSizes;
1626 if (Call->Builtin->Name.find("_varargs") != StringRef::npos) {
1627 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
1628 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
1629 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
1630 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
1631 GepMI->getOperand(3).isReg());
1632 Register ArrayReg = GepMI->getOperand(3).getReg();
1633 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
1634 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
1635 assert(LocalSizeTy && "Local size type is expected");
1636 const uint64_t LocalSizeNum =
1637 cast<ArrayType>(LocalSizeTy)->getNumElements();
1638 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
1639 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
1640 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
1641 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
1642 for (unsigned I = 0; I < LocalSizeNum; ++I) {
1643 Register Reg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1644 MRI->setType(Reg, LLType);
1645 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
1646 auto GEPInst = MIRBuilder.buildIntrinsic(Intrinsic::spv_gep,
1647 ArrayRef<Register>{Reg}, true);
1648 GEPInst
1649 .addImm(GepMI->getOperand(2).getImm()) // In bound.
1650 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
1651 .addUse(buildConstantIntReg(0, MIRBuilder, GR)) // Indices.
1652 .addUse(buildConstantIntReg(I, MIRBuilder, GR));
1653 LocalSizes.push_back(Reg);
1654 }
1655 }
1656
1657 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
1658 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
1659 .addDef(Call->ReturnRegister)
1661
1662 // Copy all arguments before block invoke function pointer.
1663 const unsigned BlockFIdx = HasEvents ? 6 : 3;
1664 for (unsigned i = 0; i < BlockFIdx; i++)
1665 MIB.addUse(Call->Arguments[i]);
1666
1667 // If there are no event arguments in the original call, add dummy ones.
1668 if (!HasEvents) {
1669 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Dummy num events.
1670 Register NullPtr = GR->getOrCreateConstNullPtr(
1671 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
1672 MIB.addUse(NullPtr); // Dummy wait events.
1673 MIB.addUse(NullPtr); // Dummy ret event.
1674 }
1675
1676 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
1677 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
1678 // Invoke: Pointer to invoke function.
1679 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
1680
1681 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
1682 // Param: Pointer to block literal.
1683 MIB.addUse(BlockLiteralReg);
1684
1685 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
1686 // TODO: these numbers should be obtained from block literal structure.
1687 // Param Size: Size of block literal structure.
1688 MIB.addUse(buildConstantIntReg(DL.getTypeStoreSize(PType), MIRBuilder, GR));
1689 // Param Aligment: Aligment of block literal structure.
1690 MIB.addUse(
1691 buildConstantIntReg(DL.getPrefTypeAlign(PType).value(), MIRBuilder, GR));
1692
1693 for (unsigned i = 0; i < LocalSizes.size(); i++)
1694 MIB.addUse(LocalSizes[i]);
1695 return true;
1696}
1697
1699 MachineIRBuilder &MIRBuilder,
1700 SPIRVGlobalRegistry *GR) {
1701 // Lookup the instruction opcode in the TableGen records.
1702 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1703 unsigned Opcode =
1704 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1705
1706 switch (Opcode) {
1707 case SPIRV::OpRetainEvent:
1708 case SPIRV::OpReleaseEvent:
1709 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1710 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
1711 case SPIRV::OpCreateUserEvent:
1712 case SPIRV::OpGetDefaultQueue:
1713 return MIRBuilder.buildInstr(Opcode)
1714 .addDef(Call->ReturnRegister)
1715 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1716 case SPIRV::OpIsValidEvent:
1717 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1718 return MIRBuilder.buildInstr(Opcode)
1719 .addDef(Call->ReturnRegister)
1720 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1721 .addUse(Call->Arguments[0]);
1722 case SPIRV::OpSetUserEventStatus:
1723 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1724 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1725 return MIRBuilder.buildInstr(Opcode)
1726 .addUse(Call->Arguments[0])
1727 .addUse(Call->Arguments[1]);
1728 case SPIRV::OpCaptureEventProfilingInfo:
1729 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1730 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1731 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1732 return MIRBuilder.buildInstr(Opcode)
1733 .addUse(Call->Arguments[0])
1734 .addUse(Call->Arguments[1])
1735 .addUse(Call->Arguments[2]);
1736 case SPIRV::OpBuildNDRange:
1737 return buildNDRange(Call, MIRBuilder, GR);
1738 case SPIRV::OpEnqueueKernel:
1739 return buildEnqueueKernel(Call, MIRBuilder, GR);
1740 default:
1741 return false;
1742 }
1743}
1744
1746 MachineIRBuilder &MIRBuilder,
1747 SPIRVGlobalRegistry *GR) {
1748 // Lookup the instruction opcode in the TableGen records.
1749 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1750 unsigned Opcode =
1751 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1752 auto Scope = buildConstantIntReg(SPIRV::Scope::Workgroup, MIRBuilder, GR);
1753
1754 switch (Opcode) {
1755 case SPIRV::OpGroupAsyncCopy:
1756 return MIRBuilder.buildInstr(Opcode)
1757 .addDef(Call->ReturnRegister)
1758 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1759 .addUse(Scope)
1760 .addUse(Call->Arguments[0])
1761 .addUse(Call->Arguments[1])
1762 .addUse(Call->Arguments[2])
1763 .addUse(buildConstantIntReg(1, MIRBuilder, GR))
1764 .addUse(Call->Arguments[3]);
1765 case SPIRV::OpGroupWaitEvents:
1766 return MIRBuilder.buildInstr(Opcode)
1767 .addUse(Scope)
1768 .addUse(Call->Arguments[0])
1769 .addUse(Call->Arguments[1]);
1770 default:
1771 return false;
1772 }
1773}
1774
1775static bool generateConvertInst(const StringRef DemangledCall,
1776 const SPIRV::IncomingCall *Call,
1777 MachineIRBuilder &MIRBuilder,
1778 SPIRVGlobalRegistry *GR) {
1779 // Lookup the conversion builtin in the TableGen records.
1780 const SPIRV::ConvertBuiltin *Builtin =
1781 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
1782
1783 if (Builtin->IsSaturated)
1784 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
1785 SPIRV::Decoration::SaturatedConversion, {});
1786 if (Builtin->IsRounded)
1787 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
1788 SPIRV::Decoration::FPRoundingMode,
1789 {(unsigned)Builtin->RoundingMode});
1790
1791 unsigned Opcode = SPIRV::OpNop;
1792 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
1793 // Int -> ...
1794 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
1795 // Int -> Int
1796 if (Builtin->IsSaturated)
1797 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
1798 : SPIRV::OpSatConvertSToU;
1799 else
1800 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
1801 : SPIRV::OpSConvert;
1802 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
1803 SPIRV::OpTypeFloat)) {
1804 // Int -> Float
1805 bool IsSourceSigned =
1806 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
1807 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
1808 }
1809 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
1810 SPIRV::OpTypeFloat)) {
1811 // Float -> ...
1812 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt))
1813 // Float -> Int
1814 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
1815 : SPIRV::OpConvertFToU;
1816 else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
1817 SPIRV::OpTypeFloat))
1818 // Float -> Float
1819 Opcode = SPIRV::OpFConvert;
1820 }
1821
1822 assert(Opcode != SPIRV::OpNop &&
1823 "Conversion between the types not implemented!");
1824
1825 MIRBuilder.buildInstr(Opcode)
1826 .addDef(Call->ReturnRegister)
1827 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1828 .addUse(Call->Arguments[0]);
1829 return true;
1830}
1831
1833 MachineIRBuilder &MIRBuilder,
1834 SPIRVGlobalRegistry *GR) {
1835 // Lookup the vector load/store builtin in the TableGen records.
1836 const SPIRV::VectorLoadStoreBuiltin *Builtin =
1837 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
1838 Call->Builtin->Set);
1839 // Build extended instruction.
1840 auto MIB =
1841 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1842 .addDef(Call->ReturnRegister)
1843 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1844 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1845 .addImm(Builtin->Number);
1846 for (auto Argument : Call->Arguments)
1847 MIB.addUse(Argument);
1848
1849 // Rounding mode should be passed as a last argument in the MI for builtins
1850 // like "vstorea_halfn_r".
1851 if (Builtin->IsRounded)
1852 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
1853 return true;
1854}
1855
1857 MachineIRBuilder &MIRBuilder,
1858 SPIRVGlobalRegistry *GR) {
1859 // Lookup the instruction opcode in the TableGen records.
1860 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1861 unsigned Opcode =
1862 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1863 bool IsLoad = Opcode == SPIRV::OpLoad;
1864 // Build the instruction.
1865 auto MIB = MIRBuilder.buildInstr(Opcode);
1866 if (IsLoad) {
1867 MIB.addDef(Call->ReturnRegister);
1868 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
1869 }
1870 // Add a pointer to the value to load/store.
1871 MIB.addUse(Call->Arguments[0]);
1872 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1873 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1874 // Add a value to store.
1875 if (!IsLoad) {
1876 MIB.addUse(Call->Arguments[1]);
1877 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1878 }
1879 // Add optional memory attributes and an alignment.
1880 unsigned NumArgs = Call->Arguments.size();
1881 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3) {
1882 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
1883 MRI->setRegClass(Call->Arguments[IsLoad ? 1 : 2], &SPIRV::IDRegClass);
1884 }
1885 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4) {
1886 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
1887 MRI->setRegClass(Call->Arguments[IsLoad ? 2 : 3], &SPIRV::IDRegClass);
1888 }
1889 return true;
1890}
1891
1892/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
1893/// and external instruction \p Set.
1894namespace SPIRV {
1895std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
1896 SPIRV::InstructionSet::InstructionSet Set,
1897 MachineIRBuilder &MIRBuilder,
1898 const Register OrigRet, const Type *OrigRetTy,
1899 const SmallVectorImpl<Register> &Args,
1900 SPIRVGlobalRegistry *GR) {
1901 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
1902
1903 // SPIR-V type and return register.
1904 Register ReturnRegister = OrigRet;
1905 SPIRVType *ReturnType = nullptr;
1906 if (OrigRetTy && !OrigRetTy->isVoidTy()) {
1907 ReturnType = GR->assignTypeToVReg(OrigRetTy, OrigRet, MIRBuilder);
1908 if (!MIRBuilder.getMRI()->getRegClassOrNull(ReturnRegister))
1909 MIRBuilder.getMRI()->setRegClass(ReturnRegister, &SPIRV::IDRegClass);
1910 } else if (OrigRetTy && OrigRetTy->isVoidTy()) {
1911 ReturnRegister = MIRBuilder.getMRI()->createVirtualRegister(&IDRegClass);
1912 MIRBuilder.getMRI()->setType(ReturnRegister, LLT::scalar(32));
1913 ReturnType = GR->assignTypeToVReg(OrigRetTy, ReturnRegister, MIRBuilder);
1914 }
1915
1916 // Lookup the builtin in the TableGen records.
1917 std::unique_ptr<const IncomingCall> Call =
1918 lookupBuiltin(DemangledCall, Set, ReturnRegister, ReturnType, Args);
1919
1920 if (!Call) {
1921 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
1922 return std::nullopt;
1923 }
1924
1925 // TODO: check if the provided args meet the builtin requirments.
1926 assert(Args.size() >= Call->Builtin->MinNumArgs &&
1927 "Too few arguments to generate the builtin");
1928 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
1929 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
1930
1931 // Match the builtin with implementation based on the grouping.
1932 switch (Call->Builtin->Group) {
1933 case SPIRV::Extended:
1934 return generateExtInst(Call.get(), MIRBuilder, GR);
1935 case SPIRV::Relational:
1936 return generateRelationalInst(Call.get(), MIRBuilder, GR);
1937 case SPIRV::Group:
1938 return generateGroupInst(Call.get(), MIRBuilder, GR);
1939 case SPIRV::Variable:
1940 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
1941 case SPIRV::Atomic:
1942 return generateAtomicInst(Call.get(), MIRBuilder, GR);
1943 case SPIRV::Barrier:
1944 return generateBarrierInst(Call.get(), MIRBuilder, GR);
1945 case SPIRV::Dot:
1946 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
1947 case SPIRV::GetQuery:
1948 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
1949 case SPIRV::ImageSizeQuery:
1950 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
1951 case SPIRV::ImageMiscQuery:
1952 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
1953 case SPIRV::ReadImage:
1954 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
1955 case SPIRV::WriteImage:
1956 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
1957 case SPIRV::SampleImage:
1958 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
1959 case SPIRV::Select:
1960 return generateSelectInst(Call.get(), MIRBuilder);
1961 case SPIRV::SpecConstant:
1962 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
1963 case SPIRV::Enqueue:
1964 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
1965 case SPIRV::AsyncCopy:
1966 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
1967 case SPIRV::Convert:
1968 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
1969 case SPIRV::VectorLoadStore:
1970 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
1971 case SPIRV::LoadStore:
1972 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
1973 }
1974 return false;
1975}
1976
1980};
1981
1982#define GET_BuiltinTypes_DECL
1983#define GET_BuiltinTypes_IMPL
1984
1988};
1989
1990#define GET_OpenCLTypes_DECL
1991#define GET_OpenCLTypes_IMPL
1992
1993#include "SPIRVGenTables.inc"
1994} // namespace SPIRV
1995
1996//===----------------------------------------------------------------------===//
1997// Misc functions for parsing builtin types.
1998//===----------------------------------------------------------------------===//
1999
2001 if (Name.startswith("void"))
2002 return Type::getVoidTy(Context);
2003 else if (Name.startswith("int") || Name.startswith("uint"))
2004 return Type::getInt32Ty(Context);
2005 else if (Name.startswith("float"))
2006 return Type::getFloatTy(Context);
2007 else if (Name.startswith("half"))
2008 return Type::getHalfTy(Context);
2009 llvm_unreachable("Unable to recognize type!");
2010}
2011
2012static const TargetExtType *parseToTargetExtType(const Type *OpaqueType,
2013 MachineIRBuilder &MIRBuilder) {
2014 assert(isSpecialOpaqueType(OpaqueType) &&
2015 "Not a SPIR-V/OpenCL special opaque type!");
2016 assert(!OpaqueType->isTargetExtTy() &&
2017 "This already is SPIR-V/OpenCL TargetExtType!");
2018
2019 StringRef NameWithParameters = OpaqueType->getStructName();
2020
2021 // Pointers-to-opaque-structs representing OpenCL types are first translated
2022 // to equivalent SPIR-V types. OpenCL builtin type names should have the
2023 // following format: e.g. %opencl.event_t
2024 if (NameWithParameters.startswith("opencl.")) {
2025 const SPIRV::OpenCLType *OCLTypeRecord =
2026 SPIRV::lookupOpenCLType(NameWithParameters);
2027 if (!OCLTypeRecord)
2028 report_fatal_error("Missing TableGen record for OpenCL type: " +
2029 NameWithParameters);
2030 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
2031 // Continue with the SPIR-V builtin type...
2032 }
2033
2034 // Names of the opaque structs representing a SPIR-V builtins without
2035 // parameters should have the following format: e.g. %spirv.Event
2036 assert(NameWithParameters.startswith("spirv.") &&
2037 "Unknown builtin opaque type!");
2038
2039 // Parameterized SPIR-V builtins names follow this format:
2040 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
2041 if (NameWithParameters.find('_') == std::string::npos)
2042 return TargetExtType::get(OpaqueType->getContext(), NameWithParameters);
2043
2044 SmallVector<StringRef> Parameters;
2045 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
2046 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
2047
2048 SmallVector<Type *, 1> TypeParameters;
2049 bool HasTypeParameter = !isDigit(Parameters[0][0]);
2050 if (HasTypeParameter)
2051 TypeParameters.push_back(parseTypeString(
2052 Parameters[0], MIRBuilder.getMF().getFunction().getContext()));
2053 SmallVector<unsigned> IntParameters;
2054 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2055 unsigned IntParameter = 0;
2056 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2057 assert(ValidLiteral &&
2058 "Invalid format of SPIR-V builtin parameter literal!");
2059 IntParameters.push_back(IntParameter);
2060 }
2061 return TargetExtType::get(OpaqueType->getContext(),
2062 NameWithParameters.substr(0, BaseNameLength),
2063 TypeParameters, IntParameters);
2064}
2065
2066//===----------------------------------------------------------------------===//
2067// Implementation functions for builtin types.
2068//===----------------------------------------------------------------------===//
2069
2071 const SPIRV::BuiltinType *TypeRecord,
2072 MachineIRBuilder &MIRBuilder,
2073 SPIRVGlobalRegistry *GR) {
2074 unsigned Opcode = TypeRecord->Opcode;
2075 // Create or get an existing type from GlobalRegistry.
2076 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
2077}
2078
2080 SPIRVGlobalRegistry *GR) {
2081 // Create or get an existing type from GlobalRegistry.
2082 return GR->getOrCreateOpTypeSampler(MIRBuilder);
2083}
2084
2085static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
2086 MachineIRBuilder &MIRBuilder,
2087 SPIRVGlobalRegistry *GR) {
2088 assert(ExtensionType->getNumIntParameters() == 1 &&
2089 "Invalid number of parameters for SPIR-V pipe builtin!");
2090 // Create or get an existing type from GlobalRegistry.
2091 return GR->getOrCreateOpTypePipe(MIRBuilder,
2092 SPIRV::AccessQualifier::AccessQualifier(
2093 ExtensionType->getIntParameter(0)));
2094}
2095
2096static SPIRVType *
2097getImageType(const TargetExtType *ExtensionType,
2098 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2099 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
2100 assert(ExtensionType->getNumTypeParameters() == 1 &&
2101 "SPIR-V image builtin type must have sampled type parameter!");
2102 const SPIRVType *SampledType =
2103 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2104 assert(ExtensionType->getNumIntParameters() == 7 &&
2105 "Invalid number of parameters for SPIR-V image builtin!");
2106 // Create or get an existing type from GlobalRegistry.
2107 return GR->getOrCreateOpTypeImage(
2108 MIRBuilder, SampledType,
2109 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2110 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2111 ExtensionType->getIntParameter(3), ExtensionType->getIntParameter(4),
2112 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2113 Qualifier == SPIRV::AccessQualifier::WriteOnly
2114 ? SPIRV::AccessQualifier::WriteOnly
2115 : SPIRV::AccessQualifier::AccessQualifier(
2116 ExtensionType->getIntParameter(6)));
2117}
2118
2120 MachineIRBuilder &MIRBuilder,
2121 SPIRVGlobalRegistry *GR) {
2122 SPIRVType *OpaqueImageType = getImageType(
2123 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2124 // Create or get an existing type from GlobalRegistry.
2125 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
2126}
2127
2128namespace SPIRV {
2130 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2131 MachineIRBuilder &MIRBuilder,
2132 SPIRVGlobalRegistry *GR) {
2133 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
2134 // target(...) target extension types or pointers-to-opaque-structs. The
2135 // approach relying on structs is deprecated and works only in the non-opaque
2136 // pointer mode (-opaque-pointers=0).
2137 // In order to maintain compatibility with LLVM IR generated by older versions
2138 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
2139 // "translated" to target extension types. This translation is temporary and
2140 // will be removed in the future release of LLVM.
2141 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
2142 if (!BuiltinType)
2143 BuiltinType = parseToTargetExtType(OpaqueType, MIRBuilder);
2144
2145 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
2146
2147 const StringRef Name = BuiltinType->getName();
2148 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
2149
2150 // Lookup the demangled builtin type in the TableGen records.
2151 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2152 if (!TypeRecord)
2153 report_fatal_error("Missing TableGen record for builtin type: " + Name);
2154
2155 // "Lower" the BuiltinType into TargetType. The following get<...>Type methods
2156 // use the implementation details from TableGen records or TargetExtType
2157 // parameters to either create a new OpType<...> machine instruction or get an
2158 // existing equivalent SPIRVType from GlobalRegistry.
2159 SPIRVType *TargetType;
2160 switch (TypeRecord->Opcode) {
2161 case SPIRV::OpTypeImage:
2162 TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR);
2163 break;
2164 case SPIRV::OpTypePipe:
2165 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
2166 break;
2167 case SPIRV::OpTypeDeviceEvent:
2168 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
2169 break;
2170 case SPIRV::OpTypeSampler:
2171 TargetType = getSamplerType(MIRBuilder, GR);
2172 break;
2173 case SPIRV::OpTypeSampledImage:
2174 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
2175 break;
2176 default:
2177 TargetType =
2178 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
2179 break;
2180 }
2181
2182 // Emit OpName instruction if a new OpType<...> instruction was added
2183 // (equivalent type was not found in GlobalRegistry).
2184 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
2185 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
2186
2187 return TargetType;
2188}
2189} // namespace SPIRV
2190} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
return RetTy
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
IntegerType * Int32Ty
LLVMContext & Context
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:470
APInt bitcastToAPInt() const
Definition: APFloat.h:1184
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:931
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:214
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1498
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:708
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:736
@ ICMP_EQ
equal
Definition: InstrTypes.h:732
const APFloat & getValueAPF() const
Definition: Constants.h:296
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:136
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Tagged union holding either a T or a Error.
Definition: Error.h:470
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:536
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:319
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:339
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:56
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:49
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:76
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:516
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:526
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
bool isValid() const
Definition: Register.h:126
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
Register getOrCreateConsIntVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg) const
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, MachineFunction &MF)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * assignTypeToVReg(const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntArray(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr, bool EmitIR=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:698
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:569
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:434
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:682
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:422
bool startswith(StringRef Prefix) const
Definition: StringRef.h:261
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition: StringRef.h:375
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:295
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:213
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
Definition: Type.cpp:693
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:574
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:750
unsigned getNumIntParameters() const
Definition: DerivedTypes.h:795
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types=std::nullopt, ArrayRef< unsigned > Ints=std::nullopt)
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:877
Type * getTypeParameter(unsigned i) const
Definition: DerivedTypes.h:785
unsigned getNumTypeParameters() const
Definition: DerivedTypes.h:786
unsigned getIntParameter(unsigned i) const
Definition: DerivedTypes.h:794
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition: Type.h:207
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
LLVM Value Representation.
Definition: Value.h:74
Value(Type *Ty, unsigned scid)
Definition: Value.cpp:53
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition: Core.cpp:799
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
StorageClass
Definition: XCOFF.h:169
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:95
unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:133
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildConstantIntReg(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, unsigned BitWidth=32)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0))
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:78
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:223
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:173
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:112
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool isSpecialOpaqueType(const Type *Ty)
Definition: SPIRVUtils.cpp:338
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:145
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:229
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
const Type * getTypedPtrEltType(const Type *Ty)
Definition: SPIRVUtils.cpp:325
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:209
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:184
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:234
static const TargetExtType * parseToTargetExtType(const Type *OpaqueType, MachineIRBuilder &MIRBuilder)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:244
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
BuiltIn::BuiltIn Value
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode