LLVM 18.0.0git
SPIRVBuiltins.cpp
Go to the documentation of this file.
1//===- SPIRVBuiltins.cpp - SPIR-V Built-in Functions ------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements lowering builtin function calls and types using their
10// demangled names and TableGen records.
11//
12//===----------------------------------------------------------------------===//
13
14#include "SPIRVBuiltins.h"
15#include "SPIRV.h"
16#include "SPIRVUtils.h"
19#include "llvm/IR/IntrinsicsSPIRV.h"
20#include <string>
21#include <tuple>
22
23#define DEBUG_TYPE "spirv-builtins"
24
25namespace llvm {
26namespace SPIRV {
27#define GET_BuiltinGroup_DECL
28#include "SPIRVGenTables.inc"
29
32 InstructionSet::InstructionSet Set;
33 BuiltinGroup Group;
34 uint8_t MinNumArgs;
35 uint8_t MaxNumArgs;
36};
37
38#define GET_DemangledBuiltins_DECL
39#define GET_DemangledBuiltins_IMPL
40
42 const std::string BuiltinName;
44
48
55};
56
59 InstructionSet::InstructionSet Set;
61};
62
63#define GET_NativeBuiltins_DECL
64#define GET_NativeBuiltins_IMPL
65
70 bool IsElect;
80};
81
82#define GET_GroupBuiltins_DECL
83#define GET_GroupBuiltins_IMPL
84
85struct GetBuiltin {
87 InstructionSet::InstructionSet Set;
88 BuiltIn::BuiltIn Value;
89};
90
91using namespace BuiltIn;
92#define GET_GetBuiltins_DECL
93#define GET_GetBuiltins_IMPL
94
97 InstructionSet::InstructionSet Set;
99};
100
101#define GET_ImageQueryBuiltins_DECL
102#define GET_ImageQueryBuiltins_IMPL
103
106 InstructionSet::InstructionSet Set;
110 FPRoundingMode::FPRoundingMode RoundingMode;
111};
112
115 InstructionSet::InstructionSet Set;
118 FPRoundingMode::FPRoundingMode RoundingMode;
119};
120
121using namespace FPRoundingMode;
122#define GET_ConvertBuiltins_DECL
123#define GET_ConvertBuiltins_IMPL
124
125using namespace InstructionSet;
126#define GET_VectorLoadStoreBuiltins_DECL
127#define GET_VectorLoadStoreBuiltins_IMPL
128
129#define GET_CLMemoryScope_DECL
130#define GET_CLSamplerAddressingMode_DECL
131#define GET_CLMemoryFenceFlags_DECL
132#define GET_ExtendedBuiltins_DECL
133#include "SPIRVGenTables.inc"
134} // namespace SPIRV
135
136//===----------------------------------------------------------------------===//
137// Misc functions for looking up builtins and veryfying requirements using
138// TableGen records
139//===----------------------------------------------------------------------===//
140
141/// Looks up the demangled builtin call in the SPIRVBuiltins.td records using
142/// the provided \p DemangledCall and specified \p Set.
143///
144/// The lookup follows the following algorithm, returning the first successful
145/// match:
146/// 1. Search with the plain demangled name (expecting a 1:1 match).
147/// 2. Search with the prefix before or suffix after the demangled name
148/// signyfying the type of the first argument.
149///
150/// \returns Wrapper around the demangled call and found builtin definition.
151static std::unique_ptr<const SPIRV::IncomingCall>
153 SPIRV::InstructionSet::InstructionSet Set,
154 Register ReturnRegister, const SPIRVType *ReturnType,
156 // Extract the builtin function name and types of arguments from the call
157 // skeleton.
158 std::string BuiltinName =
159 DemangledCall.substr(0, DemangledCall.find('(')).str();
160
161 // Check if the extracted name contains type information between angle
162 // brackets. If so, the builtin is an instantiated template - needs to have
163 // the information after angle brackets and return type removed.
164 if (BuiltinName.find('<') && BuiltinName.back() == '>') {
165 BuiltinName = BuiltinName.substr(0, BuiltinName.find('<'));
166 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(" ") + 1);
167 }
168
169 // Check if the extracted name begins with "__spirv_ImageSampleExplicitLod"
170 // contains return type information at the end "_R<type>", if so extract the
171 // plain builtin name without the type information.
172 if (StringRef(BuiltinName).contains("__spirv_ImageSampleExplicitLod") &&
173 StringRef(BuiltinName).contains("_R")) {
174 BuiltinName = BuiltinName.substr(0, BuiltinName.find("_R"));
175 }
176
177 SmallVector<StringRef, 10> BuiltinArgumentTypes;
178 StringRef BuiltinArgs =
179 DemangledCall.slice(DemangledCall.find('(') + 1, DemangledCall.find(')'));
180 BuiltinArgs.split(BuiltinArgumentTypes, ',', -1, false);
181
182 // Look up the builtin in the defined set. Start with the plain demangled
183 // name, expecting a 1:1 match in the defined builtin set.
184 const SPIRV::DemangledBuiltin *Builtin;
185 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
186 return std::make_unique<SPIRV::IncomingCall>(
187 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
188
189 // If the initial look up was unsuccessful and the demangled call takes at
190 // least 1 argument, add a prefix or suffix signifying the type of the first
191 // argument and repeat the search.
192 if (BuiltinArgumentTypes.size() >= 1) {
193 char FirstArgumentType = BuiltinArgumentTypes[0][0];
194 // Prefix to be added to the builtin's name for lookup.
195 // For example, OpenCL "abs" taking an unsigned value has a prefix "u_".
196 std::string Prefix;
197
198 switch (FirstArgumentType) {
199 // Unsigned:
200 case 'u':
201 if (Set == SPIRV::InstructionSet::OpenCL_std)
202 Prefix = "u_";
203 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
204 Prefix = "u";
205 break;
206 // Signed:
207 case 'c':
208 case 's':
209 case 'i':
210 case 'l':
211 if (Set == SPIRV::InstructionSet::OpenCL_std)
212 Prefix = "s_";
213 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
214 Prefix = "s";
215 break;
216 // Floating-point:
217 case 'f':
218 case 'd':
219 case 'h':
220 if (Set == SPIRV::InstructionSet::OpenCL_std ||
221 Set == SPIRV::InstructionSet::GLSL_std_450)
222 Prefix = "f";
223 break;
224 }
225
226 // If argument-type name prefix was added, look up the builtin again.
227 if (!Prefix.empty() &&
228 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
229 return std::make_unique<SPIRV::IncomingCall>(
230 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
231
232 // If lookup with a prefix failed, find a suffix to be added to the
233 // builtin's name for lookup. For example, OpenCL "group_reduce_max" taking
234 // an unsigned value has a suffix "u".
235 std::string Suffix;
236
237 switch (FirstArgumentType) {
238 // Unsigned:
239 case 'u':
240 Suffix = "u";
241 break;
242 // Signed:
243 case 'c':
244 case 's':
245 case 'i':
246 case 'l':
247 Suffix = "s";
248 break;
249 // Floating-point:
250 case 'f':
251 case 'd':
252 case 'h':
253 Suffix = "f";
254 break;
255 }
256
257 // If argument-type name suffix was added, look up the builtin again.
258 if (!Suffix.empty() &&
259 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
260 return std::make_unique<SPIRV::IncomingCall>(
261 BuiltinName, Builtin, ReturnRegister, ReturnType, Arguments);
262 }
263
264 // No builtin with such name was found in the set.
265 return nullptr;
266}
267
268//===----------------------------------------------------------------------===//
269// Helper functions for building misc instructions
270//===----------------------------------------------------------------------===//
271
272/// Helper function building either a resulting scalar or vector bool register
273/// depending on the expected \p ResultType.
274///
275/// \returns Tuple of the resulting register and its type.
276static std::tuple<Register, SPIRVType *>
277buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType,
279 LLT Type;
280 SPIRVType *BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
281
282 if (ResultType->getOpcode() == SPIRV::OpTypeVector) {
283 unsigned VectorElements = ResultType->getOperand(2).getImm();
284 BoolType =
285 GR->getOrCreateSPIRVVectorType(BoolType, VectorElements, MIRBuilder);
287 cast<FixedVectorType>(GR->getTypeForSPIRVType(BoolType));
288 Type = LLT::vector(LLVMVectorType->getElementCount(), 1);
289 } else {
290 Type = LLT::scalar(1);
291 }
292
293 Register ResultRegister =
295 MIRBuilder.getMRI()->setRegClass(ResultRegister, &SPIRV::IDRegClass);
296 GR->assignSPIRVTypeToVReg(BoolType, ResultRegister, MIRBuilder.getMF());
297 return std::make_tuple(ResultRegister, BoolType);
298}
299
300/// Helper function for building either a vector or scalar select instruction
301/// depending on the expected \p ResultType.
302static bool buildSelectInst(MachineIRBuilder &MIRBuilder,
303 Register ReturnRegister, Register SourceRegister,
304 const SPIRVType *ReturnType,
306 Register TrueConst, FalseConst;
307
308 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
309 unsigned Bits = GR->getScalarOrVectorBitWidth(ReturnType);
311 TrueConst = GR->getOrCreateConsIntVector(AllOnes, MIRBuilder, ReturnType);
312 FalseConst = GR->getOrCreateConsIntVector(0, MIRBuilder, ReturnType);
313 } else {
314 TrueConst = GR->buildConstantInt(1, MIRBuilder, ReturnType);
315 FalseConst = GR->buildConstantInt(0, MIRBuilder, ReturnType);
316 }
317 return MIRBuilder.buildSelect(ReturnRegister, SourceRegister, TrueConst,
318 FalseConst);
319}
320
321/// Helper function for building a load instruction loading into the
322/// \p DestinationReg.
324 MachineIRBuilder &MIRBuilder,
325 SPIRVGlobalRegistry *GR, LLT LowLevelType,
326 Register DestinationReg = Register(0)) {
327 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
328 if (!DestinationReg.isValid()) {
329 DestinationReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
330 MRI->setType(DestinationReg, LLT::scalar(32));
331 GR->assignSPIRVTypeToVReg(BaseType, DestinationReg, MIRBuilder.getMF());
332 }
333 // TODO: consider using correct address space and alignment (p0 is canonical
334 // type for selection though).
336 MIRBuilder.buildLoad(DestinationReg, PtrRegister, PtrInfo, Align());
337 return DestinationReg;
338}
339
340/// Helper function for building a load instruction for loading a builtin global
341/// variable of \p BuiltinValue value.
343 SPIRVType *VariableType,
345 SPIRV::BuiltIn::BuiltIn BuiltinValue,
346 LLT LLType,
347 Register Reg = Register(0)) {
348 Register NewRegister =
349 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
350 MIRBuilder.getMRI()->setType(NewRegister,
351 LLT::pointer(0, GR->getPointerSize()));
353 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
354 GR->assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
355
356 // Set up the global OpVariable with the necessary builtin decorations.
357 Register Variable = GR->buildGlobalVariable(
358 NewRegister, PtrType, getLinkStringForBuiltIn(BuiltinValue), nullptr,
359 SPIRV::StorageClass::Input, nullptr, true, true,
360 SPIRV::LinkageType::Import, MIRBuilder, false);
361
362 // Load the value from the global variable.
363 Register LoadedRegister =
364 buildLoadInst(VariableType, Variable, MIRBuilder, GR, LLType, Reg);
365 MIRBuilder.getMRI()->setType(LoadedRegister, LLType);
366 return LoadedRegister;
367}
368
369/// Helper external function for inserting ASSIGN_TYPE instuction between \p Reg
370/// and its definition, set the new register as a destination of the definition,
371/// assign SPIRVType to both registers. If SpirvTy is provided, use it as
372/// SPIRVType in ASSIGN_TYPE, otherwise create it from \p Ty. Defined in
373/// SPIRVPreLegalizer.cpp.
374extern Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy,
375 SPIRVGlobalRegistry *GR,
376 MachineIRBuilder &MIB,
377 MachineRegisterInfo &MRI);
378
379// TODO: Move to TableGen.
380static SPIRV::MemorySemantics::MemorySemantics
381getSPIRVMemSemantics(std::memory_order MemOrder) {
382 switch (MemOrder) {
383 case std::memory_order::memory_order_relaxed:
384 return SPIRV::MemorySemantics::None;
385 case std::memory_order::memory_order_acquire:
386 return SPIRV::MemorySemantics::Acquire;
387 case std::memory_order::memory_order_release:
388 return SPIRV::MemorySemantics::Release;
389 case std::memory_order::memory_order_acq_rel:
390 return SPIRV::MemorySemantics::AcquireRelease;
391 case std::memory_order::memory_order_seq_cst:
392 return SPIRV::MemorySemantics::SequentiallyConsistent;
393 default:
394 llvm_unreachable("Unknown CL memory scope");
395 }
396}
397
398static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope) {
399 switch (ClScope) {
400 case SPIRV::CLMemoryScope::memory_scope_work_item:
401 return SPIRV::Scope::Invocation;
402 case SPIRV::CLMemoryScope::memory_scope_work_group:
403 return SPIRV::Scope::Workgroup;
404 case SPIRV::CLMemoryScope::memory_scope_device:
405 return SPIRV::Scope::Device;
406 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
407 return SPIRV::Scope::CrossDevice;
408 case SPIRV::CLMemoryScope::memory_scope_sub_group:
409 return SPIRV::Scope::Subgroup;
410 }
411 llvm_unreachable("Unknown CL memory scope");
412}
413
416 unsigned BitWidth = 32) {
417 SPIRVType *IntType = GR->getOrCreateSPIRVIntegerType(BitWidth, MIRBuilder);
418 return GR->buildConstantInt(Val, MIRBuilder, IntType);
419}
420
421static Register buildScopeReg(Register CLScopeRegister,
422 SPIRV::Scope::Scope Scope,
423 MachineIRBuilder &MIRBuilder,
426 if (CLScopeRegister.isValid()) {
427 auto CLScope =
428 static_cast<SPIRV::CLMemoryScope>(getIConstVal(CLScopeRegister, MRI));
429 Scope = getSPIRVScope(CLScope);
430
431 if (CLScope == static_cast<unsigned>(Scope)) {
432 MRI->setRegClass(CLScopeRegister, &SPIRV::IDRegClass);
433 return CLScopeRegister;
434 }
435 }
436 return buildConstantIntReg(Scope, MIRBuilder, GR);
437}
438
439static Register buildMemSemanticsReg(Register SemanticsRegister,
440 Register PtrRegister, unsigned &Semantics,
441 MachineIRBuilder &MIRBuilder,
443 if (SemanticsRegister.isValid()) {
444 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
445 std::memory_order Order =
446 static_cast<std::memory_order>(getIConstVal(SemanticsRegister, MRI));
447 Semantics =
448 getSPIRVMemSemantics(Order) |
450
451 if (Order == Semantics) {
452 MRI->setRegClass(SemanticsRegister, &SPIRV::IDRegClass);
453 return SemanticsRegister;
454 }
455 }
456 return buildConstantIntReg(Semantics, MIRBuilder, GR);
457}
458
459/// Helper function for translating atomic init to OpStore.
461 MachineIRBuilder &MIRBuilder) {
462 assert(Call->Arguments.size() == 2 &&
463 "Need 2 arguments for atomic init translation");
464 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
465 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
466 MIRBuilder.buildInstr(SPIRV::OpStore)
467 .addUse(Call->Arguments[0])
468 .addUse(Call->Arguments[1]);
469 return true;
470}
471
472/// Helper function for building an atomic load instruction.
474 MachineIRBuilder &MIRBuilder,
476 Register PtrRegister = Call->Arguments[0];
477 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
478 // TODO: if true insert call to __translate_ocl_memory_sccope before
479 // OpAtomicLoad and the function implementation. We can use Translator's
480 // output for transcoding/atomic_explicit_arguments.cl as an example.
481 Register ScopeRegister;
482 if (Call->Arguments.size() > 1) {
483 ScopeRegister = Call->Arguments[1];
484 MIRBuilder.getMRI()->setRegClass(ScopeRegister, &SPIRV::IDRegClass);
485 } else
486 ScopeRegister = buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
487
488 Register MemSemanticsReg;
489 if (Call->Arguments.size() > 2) {
490 // TODO: Insert call to __translate_ocl_memory_order before OpAtomicLoad.
491 MemSemanticsReg = Call->Arguments[2];
492 MIRBuilder.getMRI()->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
493 } else {
494 int Semantics =
495 SPIRV::MemorySemantics::SequentiallyConsistent |
497 MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
498 }
499
500 MIRBuilder.buildInstr(SPIRV::OpAtomicLoad)
501 .addDef(Call->ReturnRegister)
502 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
503 .addUse(PtrRegister)
504 .addUse(ScopeRegister)
505 .addUse(MemSemanticsReg);
506 return true;
507}
508
509/// Helper function for building an atomic store instruction.
511 MachineIRBuilder &MIRBuilder,
513 Register ScopeRegister =
514 buildConstantIntReg(SPIRV::Scope::Device, MIRBuilder, GR);
515 Register PtrRegister = Call->Arguments[0];
516 MIRBuilder.getMRI()->setRegClass(PtrRegister, &SPIRV::IDRegClass);
517 int Semantics =
518 SPIRV::MemorySemantics::SequentiallyConsistent |
520 Register MemSemanticsReg = buildConstantIntReg(Semantics, MIRBuilder, GR);
521 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
522 MIRBuilder.buildInstr(SPIRV::OpAtomicStore)
523 .addUse(PtrRegister)
524 .addUse(ScopeRegister)
525 .addUse(MemSemanticsReg)
526 .addUse(Call->Arguments[1]);
527 return true;
528}
529
530/// Helper function for building an atomic compare-exchange instruction.
532 MachineIRBuilder &MIRBuilder,
534 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
535 unsigned Opcode =
536 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
537 bool IsCmpxchg = Call->Builtin->Name.contains("cmpxchg");
538 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
539
540 Register ObjectPtr = Call->Arguments[0]; // Pointer (volatile A *object.)
541 Register ExpectedArg = Call->Arguments[1]; // Comparator (C* expected).
542 Register Desired = Call->Arguments[2]; // Value (C Desired).
543 MRI->setRegClass(ObjectPtr, &SPIRV::IDRegClass);
544 MRI->setRegClass(ExpectedArg, &SPIRV::IDRegClass);
545 MRI->setRegClass(Desired, &SPIRV::IDRegClass);
546 SPIRVType *SpvDesiredTy = GR->getSPIRVTypeForVReg(Desired);
547 LLT DesiredLLT = MRI->getType(Desired);
548
549 assert(GR->getSPIRVTypeForVReg(ObjectPtr)->getOpcode() ==
550 SPIRV::OpTypePointer);
551 unsigned ExpectedType = GR->getSPIRVTypeForVReg(ExpectedArg)->getOpcode();
552 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
553 : ExpectedType == SPIRV::OpTypePointer);
554 assert(GR->isScalarOfType(Desired, SPIRV::OpTypeInt));
555
556 SPIRVType *SpvObjectPtrTy = GR->getSPIRVTypeForVReg(ObjectPtr);
557 assert(SpvObjectPtrTy->getOperand(2).isReg() && "SPIRV type is expected");
558 auto StorageClass = static_cast<SPIRV::StorageClass::StorageClass>(
559 SpvObjectPtrTy->getOperand(1).getImm());
560 auto MemSemStorage = getMemSemanticsForStorageClass(StorageClass);
561
562 Register MemSemEqualReg;
563 Register MemSemUnequalReg;
564 uint64_t MemSemEqual =
565 IsCmpxchg
566 ? SPIRV::MemorySemantics::None
567 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
568 uint64_t MemSemUnequal =
569 IsCmpxchg
570 ? SPIRV::MemorySemantics::None
571 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
572 if (Call->Arguments.size() >= 4) {
573 assert(Call->Arguments.size() >= 5 &&
574 "Need 5+ args for explicit atomic cmpxchg");
575 auto MemOrdEq =
576 static_cast<std::memory_order>(getIConstVal(Call->Arguments[3], MRI));
577 auto MemOrdNeq =
578 static_cast<std::memory_order>(getIConstVal(Call->Arguments[4], MRI));
579 MemSemEqual = getSPIRVMemSemantics(MemOrdEq) | MemSemStorage;
580 MemSemUnequal = getSPIRVMemSemantics(MemOrdNeq) | MemSemStorage;
581 if (MemOrdEq == MemSemEqual)
582 MemSemEqualReg = Call->Arguments[3];
583 if (MemOrdNeq == MemSemEqual)
584 MemSemUnequalReg = Call->Arguments[4];
585 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
586 MRI->setRegClass(Call->Arguments[4], &SPIRV::IDRegClass);
587 }
588 if (!MemSemEqualReg.isValid())
589 MemSemEqualReg = buildConstantIntReg(MemSemEqual, MIRBuilder, GR);
590 if (!MemSemUnequalReg.isValid())
591 MemSemUnequalReg = buildConstantIntReg(MemSemUnequal, MIRBuilder, GR);
592
593 Register ScopeReg;
594 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
595 if (Call->Arguments.size() >= 6) {
596 assert(Call->Arguments.size() == 6 &&
597 "Extra args for explicit atomic cmpxchg");
598 auto ClScope = static_cast<SPIRV::CLMemoryScope>(
599 getIConstVal(Call->Arguments[5], MRI));
600 Scope = getSPIRVScope(ClScope);
601 if (ClScope == static_cast<unsigned>(Scope))
602 ScopeReg = Call->Arguments[5];
603 MRI->setRegClass(Call->Arguments[5], &SPIRV::IDRegClass);
604 }
605 if (!ScopeReg.isValid())
606 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
607
608 Register Expected = IsCmpxchg
609 ? ExpectedArg
610 : buildLoadInst(SpvDesiredTy, ExpectedArg, MIRBuilder,
611 GR, LLT::scalar(32));
612 MRI->setType(Expected, DesiredLLT);
613 Register Tmp = !IsCmpxchg ? MRI->createGenericVirtualRegister(DesiredLLT)
614 : Call->ReturnRegister;
615 if (!MRI->getRegClassOrNull(Tmp))
616 MRI->setRegClass(Tmp, &SPIRV::IDRegClass);
617 GR->assignSPIRVTypeToVReg(SpvDesiredTy, Tmp, MIRBuilder.getMF());
618
619 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
620 MIRBuilder.buildInstr(Opcode)
621 .addDef(Tmp)
622 .addUse(GR->getSPIRVTypeID(IntTy))
623 .addUse(ObjectPtr)
624 .addUse(ScopeReg)
625 .addUse(MemSemEqualReg)
626 .addUse(MemSemUnequalReg)
627 .addUse(Desired)
629 if (!IsCmpxchg) {
630 MIRBuilder.buildInstr(SPIRV::OpStore).addUse(ExpectedArg).addUse(Tmp);
631 MIRBuilder.buildICmp(CmpInst::ICMP_EQ, Call->ReturnRegister, Tmp, Expected);
632 }
633 return true;
634}
635
636/// Helper function for building an atomic load instruction.
637static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
638 MachineIRBuilder &MIRBuilder,
640 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
641 Register ScopeRegister =
642 Call->Arguments.size() >= 4 ? Call->Arguments[3] : Register();
643
644 assert(Call->Arguments.size() <= 4 &&
645 "Too many args for explicit atomic RMW");
646 ScopeRegister = buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
647 MIRBuilder, GR, MRI);
648
649 Register PtrRegister = Call->Arguments[0];
650 unsigned Semantics = SPIRV::MemorySemantics::None;
651 MRI->setRegClass(PtrRegister, &SPIRV::IDRegClass);
652 Register MemSemanticsReg =
653 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
654 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
655 Semantics, MIRBuilder, GR);
656 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
657 MIRBuilder.buildInstr(Opcode)
658 .addDef(Call->ReturnRegister)
659 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
660 .addUse(PtrRegister)
661 .addUse(ScopeRegister)
662 .addUse(MemSemanticsReg)
663 .addUse(Call->Arguments[1]);
664 return true;
665}
666
667/// Helper function for building atomic flag instructions (e.g.
668/// OpAtomicFlagTestAndSet).
670 unsigned Opcode, MachineIRBuilder &MIRBuilder,
672 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
673 Register PtrRegister = Call->Arguments[0];
674 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
675 Register MemSemanticsReg =
676 Call->Arguments.size() >= 2 ? Call->Arguments[1] : Register();
677 MemSemanticsReg = buildMemSemanticsReg(MemSemanticsReg, PtrRegister,
678 Semantics, MIRBuilder, GR);
679
680 assert((Opcode != SPIRV::OpAtomicFlagClear ||
681 (Semantics != SPIRV::MemorySemantics::Acquire &&
682 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
683 "Invalid memory order argument!");
684
685 Register ScopeRegister =
686 Call->Arguments.size() >= 3 ? Call->Arguments[2] : Register();
687 ScopeRegister =
688 buildScopeReg(ScopeRegister, SPIRV::Scope::Device, MIRBuilder, GR, MRI);
689
690 auto MIB = MIRBuilder.buildInstr(Opcode);
691 if (Opcode == SPIRV::OpAtomicFlagTestAndSet)
692 MIB.addDef(Call->ReturnRegister)
693 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
694
695 MIB.addUse(PtrRegister).addUse(ScopeRegister).addUse(MemSemanticsReg);
696 return true;
697}
698
699/// Helper function for building barriers, i.e., memory/control ordering
700/// operations.
701static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode,
702 MachineIRBuilder &MIRBuilder,
704 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
705 unsigned MemFlags = getIConstVal(Call->Arguments[0], MRI);
706 unsigned MemSemantics = SPIRV::MemorySemantics::None;
707
708 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
709 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
710
711 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
712 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
713
714 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
715 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
716
717 if (Opcode == SPIRV::OpMemoryBarrier) {
718 std::memory_order MemOrder =
719 static_cast<std::memory_order>(getIConstVal(Call->Arguments[1], MRI));
720 MemSemantics = getSPIRVMemSemantics(MemOrder) | MemSemantics;
721 } else {
722 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
723 }
724
725 Register MemSemanticsReg;
726 if (MemFlags == MemSemantics) {
727 MemSemanticsReg = Call->Arguments[0];
728 MRI->setRegClass(MemSemanticsReg, &SPIRV::IDRegClass);
729 } else
730 MemSemanticsReg = buildConstantIntReg(MemSemantics, MIRBuilder, GR);
731
732 Register ScopeReg;
733 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
734 SPIRV::Scope::Scope MemScope = Scope;
735 if (Call->Arguments.size() >= 2) {
736 assert(
737 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
738 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
739 "Extra args for explicitly scoped barrier");
740 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
741 : Call->Arguments[1];
742 SPIRV::CLMemoryScope CLScope =
743 static_cast<SPIRV::CLMemoryScope>(getIConstVal(ScopeArg, MRI));
744 MemScope = getSPIRVScope(CLScope);
745 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
746 (Opcode == SPIRV::OpMemoryBarrier))
747 Scope = MemScope;
748
749 if (CLScope == static_cast<unsigned>(Scope)) {
750 ScopeReg = Call->Arguments[1];
751 MRI->setRegClass(ScopeReg, &SPIRV::IDRegClass);
752 }
753 }
754
755 if (!ScopeReg.isValid())
756 ScopeReg = buildConstantIntReg(Scope, MIRBuilder, GR);
757
758 auto MIB = MIRBuilder.buildInstr(Opcode).addUse(ScopeReg);
759 if (Opcode != SPIRV::OpMemoryBarrier)
760 MIB.addUse(buildConstantIntReg(MemScope, MIRBuilder, GR));
761 MIB.addUse(MemSemanticsReg);
762 return true;
763}
764
765static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim) {
766 switch (dim) {
767 case SPIRV::Dim::DIM_1D:
768 case SPIRV::Dim::DIM_Buffer:
769 return 1;
770 case SPIRV::Dim::DIM_2D:
771 case SPIRV::Dim::DIM_Cube:
772 case SPIRV::Dim::DIM_Rect:
773 return 2;
774 case SPIRV::Dim::DIM_3D:
775 return 3;
776 default:
777 llvm_unreachable("Cannot get num components for given Dim");
778 }
779}
780
781/// Helper function for obtaining the number of size components.
782static unsigned getNumSizeComponents(SPIRVType *imgType) {
783 assert(imgType->getOpcode() == SPIRV::OpTypeImage);
784 auto dim = static_cast<SPIRV::Dim::Dim>(imgType->getOperand(2).getImm());
785 unsigned numComps = getNumComponentsForDim(dim);
786 bool arrayed = imgType->getOperand(4).getImm() == 1;
787 return arrayed ? numComps + 1 : numComps;
788}
789
790//===----------------------------------------------------------------------===//
791// Implementation functions for each builtin group
792//===----------------------------------------------------------------------===//
793
794static bool generateExtInst(const SPIRV::IncomingCall *Call,
795 MachineIRBuilder &MIRBuilder,
797 // Lookup the extended instruction number in the TableGen records.
798 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
800 SPIRV::lookupExtendedBuiltin(Builtin->Name, Builtin->Set)->Number;
801
802 // Build extended instruction.
803 auto MIB =
804 MIRBuilder.buildInstr(SPIRV::OpExtInst)
805 .addDef(Call->ReturnRegister)
806 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
807 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
808 .addImm(Number);
809
810 for (auto Argument : Call->Arguments)
811 MIB.addUse(Argument);
812 return true;
813}
814
816 MachineIRBuilder &MIRBuilder,
818 // Lookup the instruction opcode in the TableGen records.
819 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
820 unsigned Opcode =
821 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
822
823 Register CompareRegister;
824 SPIRVType *RelationType;
825 std::tie(CompareRegister, RelationType) =
826 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
827
828 // Build relational instruction.
829 auto MIB = MIRBuilder.buildInstr(Opcode)
830 .addDef(CompareRegister)
831 .addUse(GR->getSPIRVTypeID(RelationType));
832
833 for (auto Argument : Call->Arguments)
834 MIB.addUse(Argument);
835
836 // Build select instruction.
837 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
838 Call->ReturnType, GR);
839}
840
842 MachineIRBuilder &MIRBuilder,
844 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
845 const SPIRV::GroupBuiltin *GroupBuiltin =
846 SPIRV::lookupGroupBuiltin(Builtin->Name);
847 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
848 Register Arg0;
849 if (GroupBuiltin->HasBoolArg) {
850 Register ConstRegister = Call->Arguments[0];
851 auto ArgInstruction = getDefInstrMaybeConstant(ConstRegister, MRI);
852 // TODO: support non-constant bool values.
853 assert(ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT &&
854 "Only constant bool value args are supported");
855 if (GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode() !=
856 SPIRV::OpTypeBool)
857 Arg0 = GR->buildConstantInt(getIConstVal(ConstRegister, MRI), MIRBuilder,
858 GR->getOrCreateSPIRVBoolType(MIRBuilder));
859 }
860
861 Register GroupResultRegister = Call->ReturnRegister;
862 SPIRVType *GroupResultType = Call->ReturnType;
863
864 // TODO: maybe we need to check whether the result type is already boolean
865 // and in this case do not insert select instruction.
866 const bool HasBoolReturnTy =
867 GroupBuiltin->IsElect || GroupBuiltin->IsAllOrAny ||
868 GroupBuiltin->IsAllEqual || GroupBuiltin->IsLogical ||
869 GroupBuiltin->IsInverseBallot || GroupBuiltin->IsBallotBitExtract;
870
871 if (HasBoolReturnTy)
872 std::tie(GroupResultRegister, GroupResultType) =
873 buildBoolRegister(MIRBuilder, Call->ReturnType, GR);
874
875 auto Scope = Builtin->Name.startswith("sub_group") ? SPIRV::Scope::Subgroup
876 : SPIRV::Scope::Workgroup;
877 Register ScopeRegister = buildConstantIntReg(Scope, MIRBuilder, GR);
878
879 // Build work/sub group instruction.
880 auto MIB = MIRBuilder.buildInstr(GroupBuiltin->Opcode)
881 .addDef(GroupResultRegister)
882 .addUse(GR->getSPIRVTypeID(GroupResultType))
883 .addUse(ScopeRegister);
884
885 if (!GroupBuiltin->NoGroupOperation)
886 MIB.addImm(GroupBuiltin->GroupOperation);
887 if (Call->Arguments.size() > 0) {
888 MIB.addUse(Arg0.isValid() ? Arg0 : Call->Arguments[0]);
889 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
890 for (unsigned i = 1; i < Call->Arguments.size(); i++) {
891 MIB.addUse(Call->Arguments[i]);
892 MRI->setRegClass(Call->Arguments[i], &SPIRV::IDRegClass);
893 }
894 }
895
896 // Build select instruction.
897 if (HasBoolReturnTy)
898 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
899 Call->ReturnType, GR);
900 return true;
901}
902
903// These queries ask for a single size_t result for a given dimension index, e.g
904// size_t get_global_id(uint dimindex). In SPIR-V, the builtins corresonding to
905// these values are all vec3 types, so we need to extract the correct index or
906// return defaultVal (0 or 1 depending on the query). We also handle extending
907// or tuncating in case size_t does not match the expected result type's
908// bitwidth.
909//
910// For a constant index >= 3 we generate:
911// %res = OpConstant %SizeT 0
912//
913// For other indices we generate:
914// %g = OpVariable %ptr_V3_SizeT Input
915// OpDecorate %g BuiltIn XXX
916// OpDecorate %g LinkageAttributes "__spirv_BuiltInXXX"
917// OpDecorate %g Constant
918// %loadedVec = OpLoad %V3_SizeT %g
919//
920// Then, if the index is constant < 3, we generate:
921// %res = OpCompositeExtract %SizeT %loadedVec idx
922// If the index is dynamic, we generate:
923// %tmp = OpVectorExtractDynamic %SizeT %loadedVec %idx
924// %cmp = OpULessThan %bool %idx %const_3
925// %res = OpSelect %SizeT %cmp %tmp %const_0
926//
927// If the bitwidth of %res does not match the expected return type, we add an
928// extend or truncate.
930 MachineIRBuilder &MIRBuilder,
932 SPIRV::BuiltIn::BuiltIn BuiltinValue,
933 uint64_t DefaultValue) {
934 Register IndexRegister = Call->Arguments[0];
935 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
936 const unsigned PointerSize = GR->getPointerSize();
937 const SPIRVType *PointerSizeType =
938 GR->getOrCreateSPIRVIntegerType(PointerSize, MIRBuilder);
939 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
940 auto IndexInstruction = getDefInstrMaybeConstant(IndexRegister, MRI);
941
942 // Set up the final register to do truncation or extension on at the end.
943 Register ToTruncate = Call->ReturnRegister;
944
945 // If the index is constant, we can statically determine if it is in range.
946 bool IsConstantIndex =
947 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
948
949 // If it's out of range (max dimension is 3), we can just return the constant
950 // default value (0 or 1 depending on which query function).
951 if (IsConstantIndex && getIConstVal(IndexRegister, MRI) >= 3) {
952 Register DefaultReg = Call->ReturnRegister;
953 if (PointerSize != ResultWidth) {
954 DefaultReg = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
955 MRI->setRegClass(DefaultReg, &SPIRV::IDRegClass);
956 GR->assignSPIRVTypeToVReg(PointerSizeType, DefaultReg,
957 MIRBuilder.getMF());
958 ToTruncate = DefaultReg;
959 }
960 auto NewRegister =
961 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
962 MIRBuilder.buildCopy(DefaultReg, NewRegister);
963 } else { // If it could be in range, we need to load from the given builtin.
964 auto Vec3Ty =
965 GR->getOrCreateSPIRVVectorType(PointerSizeType, 3, MIRBuilder);
966 Register LoadedVector =
967 buildBuiltinVariableLoad(MIRBuilder, Vec3Ty, GR, BuiltinValue,
968 LLT::fixed_vector(3, PointerSize));
969 // Set up the vreg to extract the result to (possibly a new temporary one).
970 Register Extracted = Call->ReturnRegister;
971 if (!IsConstantIndex || PointerSize != ResultWidth) {
972 Extracted = MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
973 MRI->setRegClass(Extracted, &SPIRV::IDRegClass);
974 GR->assignSPIRVTypeToVReg(PointerSizeType, Extracted, MIRBuilder.getMF());
975 }
976 // Use Intrinsic::spv_extractelt so dynamic vs static extraction is
977 // handled later: extr = spv_extractelt LoadedVector, IndexRegister.
978 MachineInstrBuilder ExtractInst = MIRBuilder.buildIntrinsic(
979 Intrinsic::spv_extractelt, ArrayRef<Register>{Extracted}, true, false);
980 ExtractInst.addUse(LoadedVector).addUse(IndexRegister);
981
982 // If the index is dynamic, need check if it's < 3, and then use a select.
983 if (!IsConstantIndex) {
984 insertAssignInstr(Extracted, nullptr, PointerSizeType, GR, MIRBuilder,
985 *MRI);
986
987 auto IndexType = GR->getSPIRVTypeForVReg(IndexRegister);
988 auto BoolType = GR->getOrCreateSPIRVBoolType(MIRBuilder);
989
990 Register CompareRegister =
991 MRI->createGenericVirtualRegister(LLT::scalar(1));
992 MRI->setRegClass(CompareRegister, &SPIRV::IDRegClass);
993 GR->assignSPIRVTypeToVReg(BoolType, CompareRegister, MIRBuilder.getMF());
994
995 // Use G_ICMP to check if idxVReg < 3.
996 MIRBuilder.buildICmp(CmpInst::ICMP_ULT, CompareRegister, IndexRegister,
997 GR->buildConstantInt(3, MIRBuilder, IndexType));
998
999 // Get constant for the default value (0 or 1 depending on which
1000 // function).
1001 Register DefaultRegister =
1002 GR->buildConstantInt(DefaultValue, MIRBuilder, PointerSizeType);
1003
1004 // Get a register for the selection result (possibly a new temporary one).
1005 Register SelectionResult = Call->ReturnRegister;
1006 if (PointerSize != ResultWidth) {
1007 SelectionResult =
1008 MRI->createGenericVirtualRegister(LLT::scalar(PointerSize));
1009 MRI->setRegClass(SelectionResult, &SPIRV::IDRegClass);
1010 GR->assignSPIRVTypeToVReg(PointerSizeType, SelectionResult,
1011 MIRBuilder.getMF());
1012 }
1013 // Create the final G_SELECT to return the extracted value or the default.
1014 MIRBuilder.buildSelect(SelectionResult, CompareRegister, Extracted,
1015 DefaultRegister);
1016 ToTruncate = SelectionResult;
1017 } else {
1018 ToTruncate = Extracted;
1019 }
1020 }
1021 // Alter the result's bitwidth if it does not match the SizeT value extracted.
1022 if (PointerSize != ResultWidth)
1023 MIRBuilder.buildZExtOrTrunc(Call->ReturnRegister, ToTruncate);
1024 return true;
1025}
1026
1028 MachineIRBuilder &MIRBuilder,
1029 SPIRVGlobalRegistry *GR) {
1030 // Lookup the builtin variable record.
1031 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1032 SPIRV::BuiltIn::BuiltIn Value =
1033 SPIRV::lookupGetBuiltin(Builtin->Name, Builtin->Set)->Value;
1034
1035 if (Value == SPIRV::BuiltIn::GlobalInvocationId)
1036 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, 0);
1037
1038 // Build a load instruction for the builtin variable.
1039 unsigned BitWidth = GR->getScalarOrVectorBitWidth(Call->ReturnType);
1040 LLT LLType;
1041 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1042 LLType =
1043 LLT::fixed_vector(Call->ReturnType->getOperand(2).getImm(), BitWidth);
1044 else
1045 LLType = LLT::scalar(BitWidth);
1046
1047 return buildBuiltinVariableLoad(MIRBuilder, Call->ReturnType, GR, Value,
1048 LLType, Call->ReturnRegister);
1049}
1050
1052 MachineIRBuilder &MIRBuilder,
1053 SPIRVGlobalRegistry *GR) {
1054 // Lookup the instruction opcode in the TableGen records.
1055 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1056 unsigned Opcode =
1057 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1058
1059 switch (Opcode) {
1060 case SPIRV::OpStore:
1061 return buildAtomicInitInst(Call, MIRBuilder);
1062 case SPIRV::OpAtomicLoad:
1063 return buildAtomicLoadInst(Call, MIRBuilder, GR);
1064 case SPIRV::OpAtomicStore:
1065 return buildAtomicStoreInst(Call, MIRBuilder, GR);
1066 case SPIRV::OpAtomicCompareExchange:
1067 case SPIRV::OpAtomicCompareExchangeWeak:
1068 return buildAtomicCompareExchangeInst(Call, MIRBuilder, GR);
1069 case SPIRV::OpAtomicIAdd:
1070 case SPIRV::OpAtomicISub:
1071 case SPIRV::OpAtomicOr:
1072 case SPIRV::OpAtomicXor:
1073 case SPIRV::OpAtomicAnd:
1074 case SPIRV::OpAtomicExchange:
1075 return buildAtomicRMWInst(Call, Opcode, MIRBuilder, GR);
1076 case SPIRV::OpMemoryBarrier:
1077 return buildBarrierInst(Call, SPIRV::OpMemoryBarrier, MIRBuilder, GR);
1078 case SPIRV::OpAtomicFlagTestAndSet:
1079 case SPIRV::OpAtomicFlagClear:
1080 return buildAtomicFlagInst(Call, Opcode, MIRBuilder, GR);
1081 default:
1082 return false;
1083 }
1084}
1085
1087 MachineIRBuilder &MIRBuilder,
1088 SPIRVGlobalRegistry *GR) {
1089 // Lookup the instruction opcode in the TableGen records.
1090 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1091 unsigned Opcode =
1092 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1093
1094 return buildBarrierInst(Call, Opcode, MIRBuilder, GR);
1095}
1096
1098 MachineIRBuilder &MIRBuilder,
1099 SPIRVGlobalRegistry *GR) {
1100 unsigned Opcode = GR->getSPIRVTypeForVReg(Call->Arguments[0])->getOpcode();
1101 bool IsVec = Opcode == SPIRV::OpTypeVector;
1102 // Use OpDot only in case of vector args and OpFMul in case of scalar args.
1103 MIRBuilder.buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1104 .addDef(Call->ReturnRegister)
1105 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1106 .addUse(Call->Arguments[0])
1107 .addUse(Call->Arguments[1]);
1108 return true;
1109}
1110
1112 MachineIRBuilder &MIRBuilder,
1113 SPIRVGlobalRegistry *GR) {
1114 // Lookup the builtin record.
1115 SPIRV::BuiltIn::BuiltIn Value =
1116 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->Value;
1117 uint64_t IsDefault = (Value == SPIRV::BuiltIn::GlobalSize ||
1118 Value == SPIRV::BuiltIn::WorkgroupSize ||
1119 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1120 return genWorkgroupQuery(Call, MIRBuilder, GR, Value, IsDefault ? 1 : 0);
1121}
1122
1124 MachineIRBuilder &MIRBuilder,
1125 SPIRVGlobalRegistry *GR) {
1126 // Lookup the image size query component number in the TableGen records.
1127 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1128 uint32_t Component =
1129 SPIRV::lookupImageQueryBuiltin(Builtin->Name, Builtin->Set)->Component;
1130 // Query result may either be a vector or a scalar. If return type is not a
1131 // vector, expect only a single size component. Otherwise get the number of
1132 // expected components.
1133 SPIRVType *RetTy = Call->ReturnType;
1134 unsigned NumExpectedRetComponents = RetTy->getOpcode() == SPIRV::OpTypeVector
1135 ? RetTy->getOperand(2).getImm()
1136 : 1;
1137 // Get the actual number of query result/size components.
1138 SPIRVType *ImgType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1139 unsigned NumActualRetComponents = getNumSizeComponents(ImgType);
1140 Register QueryResult = Call->ReturnRegister;
1141 SPIRVType *QueryResultType = Call->ReturnType;
1142 if (NumExpectedRetComponents != NumActualRetComponents) {
1143 QueryResult = MIRBuilder.getMRI()->createGenericVirtualRegister(
1144 LLT::fixed_vector(NumActualRetComponents, 32));
1145 MIRBuilder.getMRI()->setRegClass(QueryResult, &SPIRV::IDRegClass);
1146 SPIRVType *IntTy = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1147 QueryResultType = GR->getOrCreateSPIRVVectorType(
1148 IntTy, NumActualRetComponents, MIRBuilder);
1149 GR->assignSPIRVTypeToVReg(QueryResultType, QueryResult, MIRBuilder.getMF());
1150 }
1151 bool IsDimBuf = ImgType->getOperand(2).getImm() == SPIRV::Dim::DIM_Buffer;
1152 unsigned Opcode =
1153 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1154 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1155 auto MIB = MIRBuilder.buildInstr(Opcode)
1156 .addDef(QueryResult)
1157 .addUse(GR->getSPIRVTypeID(QueryResultType))
1158 .addUse(Call->Arguments[0]);
1159 if (!IsDimBuf)
1160 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Lod id.
1161 if (NumExpectedRetComponents == NumActualRetComponents)
1162 return true;
1163 if (NumExpectedRetComponents == 1) {
1164 // Only 1 component is expected, build OpCompositeExtract instruction.
1165 unsigned ExtractedComposite =
1166 Component == 3 ? NumActualRetComponents - 1 : Component;
1167 assert(ExtractedComposite < NumActualRetComponents &&
1168 "Invalid composite index!");
1169 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1170 .addDef(Call->ReturnRegister)
1171 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1172 .addUse(QueryResult)
1173 .addImm(ExtractedComposite);
1174 } else {
1175 // More than 1 component is expected, fill a new vector.
1176 auto MIB = MIRBuilder.buildInstr(SPIRV::OpVectorShuffle)
1177 .addDef(Call->ReturnRegister)
1178 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1179 .addUse(QueryResult)
1180 .addUse(QueryResult);
1181 for (unsigned i = 0; i < NumExpectedRetComponents; ++i)
1182 MIB.addImm(i < NumActualRetComponents ? i : 0xffffffff);
1183 }
1184 return true;
1185}
1186
1188 MachineIRBuilder &MIRBuilder,
1189 SPIRVGlobalRegistry *GR) {
1190 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1191 "Image samples query result must be of int type!");
1192
1193 // Lookup the instruction opcode in the TableGen records.
1194 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1195 unsigned Opcode =
1196 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1197
1198 Register Image = Call->Arguments[0];
1199 MIRBuilder.getMRI()->setRegClass(Image, &SPIRV::IDRegClass);
1200 SPIRV::Dim::Dim ImageDimensionality = static_cast<SPIRV::Dim::Dim>(
1201 GR->getSPIRVTypeForVReg(Image)->getOperand(2).getImm());
1202
1203 switch (Opcode) {
1204 case SPIRV::OpImageQuerySamples:
1205 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1206 "Image must be of 2D dimensionality");
1207 break;
1208 case SPIRV::OpImageQueryLevels:
1209 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1210 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1211 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1212 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1213 "Image must be of 1D/2D/3D/Cube dimensionality");
1214 break;
1215 }
1216
1217 MIRBuilder.buildInstr(Opcode)
1218 .addDef(Call->ReturnRegister)
1219 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1220 .addUse(Image);
1221 return true;
1222}
1223
1224// TODO: Move to TableGen.
1225static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1227 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1228 case SPIRV::CLK_ADDRESS_CLAMP:
1229 return SPIRV::SamplerAddressingMode::Clamp;
1230 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1231 return SPIRV::SamplerAddressingMode::ClampToEdge;
1232 case SPIRV::CLK_ADDRESS_REPEAT:
1233 return SPIRV::SamplerAddressingMode::Repeat;
1234 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1235 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1236 case SPIRV::CLK_ADDRESS_NONE:
1237 return SPIRV::SamplerAddressingMode::None;
1238 default:
1239 llvm_unreachable("Unknown CL address mode");
1240 }
1241}
1242
1243static unsigned getSamplerParamFromBitmask(unsigned Bitmask) {
1244 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1245}
1246
1247static SPIRV::SamplerFilterMode::SamplerFilterMode
1249 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1250 return SPIRV::SamplerFilterMode::Linear;
1251 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1252 return SPIRV::SamplerFilterMode::Nearest;
1253 return SPIRV::SamplerFilterMode::Nearest;
1254}
1255
1256static bool generateReadImageInst(const StringRef DemangledCall,
1257 const SPIRV::IncomingCall *Call,
1258 MachineIRBuilder &MIRBuilder,
1259 SPIRVGlobalRegistry *GR) {
1260 Register Image = Call->Arguments[0];
1261 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1262 MRI->setRegClass(Image, &SPIRV::IDRegClass);
1263 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1264 bool HasOclSampler = DemangledCall.contains_insensitive("ocl_sampler");
1265 bool HasMsaa = DemangledCall.contains_insensitive("msaa");
1266 if (HasOclSampler || HasMsaa)
1267 MRI->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1268 if (HasOclSampler) {
1269 Register Sampler = Call->Arguments[1];
1270
1271 if (!GR->isScalarOfType(Sampler, SPIRV::OpTypeSampler) &&
1272 getDefInstrMaybeConstant(Sampler, MRI)->getOperand(1).isCImm()) {
1273 uint64_t SamplerMask = getIConstVal(Sampler, MRI);
1274 Sampler = GR->buildConstantSampler(
1276 getSamplerParamFromBitmask(SamplerMask),
1277 getSamplerFilterModeFromBitmask(SamplerMask), MIRBuilder,
1278 GR->getSPIRVTypeForVReg(Sampler));
1279 }
1280 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1281 SPIRVType *SampledImageType =
1282 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1283 Register SampledImage = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1284
1285 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1286 .addDef(SampledImage)
1287 .addUse(GR->getSPIRVTypeID(SampledImageType))
1288 .addUse(Image)
1289 .addUse(Sampler);
1290
1292 MIRBuilder);
1293 SPIRVType *TempType = Call->ReturnType;
1294 bool NeedsExtraction = false;
1295 if (TempType->getOpcode() != SPIRV::OpTypeVector) {
1296 TempType =
1297 GR->getOrCreateSPIRVVectorType(Call->ReturnType, 4, MIRBuilder);
1298 NeedsExtraction = true;
1299 }
1300 LLT LLType = LLT::scalar(GR->getScalarOrVectorBitWidth(TempType));
1301 Register TempRegister = MRI->createGenericVirtualRegister(LLType);
1302 MRI->setRegClass(TempRegister, &SPIRV::IDRegClass);
1303 GR->assignSPIRVTypeToVReg(TempType, TempRegister, MIRBuilder.getMF());
1304
1305 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1306 .addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister)
1307 .addUse(GR->getSPIRVTypeID(TempType))
1308 .addUse(SampledImage)
1309 .addUse(Call->Arguments[2]) // Coordinate.
1310 .addImm(SPIRV::ImageOperand::Lod)
1311 .addUse(Lod);
1312
1313 if (NeedsExtraction)
1314 MIRBuilder.buildInstr(SPIRV::OpCompositeExtract)
1315 .addDef(Call->ReturnRegister)
1316 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1317 .addUse(TempRegister)
1318 .addImm(0);
1319 } else if (HasMsaa) {
1320 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1321 .addDef(Call->ReturnRegister)
1322 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1323 .addUse(Image)
1324 .addUse(Call->Arguments[1]) // Coordinate.
1325 .addImm(SPIRV::ImageOperand::Sample)
1326 .addUse(Call->Arguments[2]);
1327 } else {
1328 MIRBuilder.buildInstr(SPIRV::OpImageRead)
1329 .addDef(Call->ReturnRegister)
1330 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1331 .addUse(Image)
1332 .addUse(Call->Arguments[1]); // Coordinate.
1333 }
1334 return true;
1335}
1336
1338 MachineIRBuilder &MIRBuilder,
1339 SPIRVGlobalRegistry *GR) {
1340 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1341 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1342 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1343 MIRBuilder.buildInstr(SPIRV::OpImageWrite)
1344 .addUse(Call->Arguments[0]) // Image.
1345 .addUse(Call->Arguments[1]) // Coordinate.
1346 .addUse(Call->Arguments[2]); // Texel.
1347 return true;
1348}
1349
1350static bool generateSampleImageInst(const StringRef DemangledCall,
1351 const SPIRV::IncomingCall *Call,
1352 MachineIRBuilder &MIRBuilder,
1353 SPIRVGlobalRegistry *GR) {
1354 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1355 if (Call->Builtin->Name.contains_insensitive(
1356 "__translate_sampler_initializer")) {
1357 // Build sampler literal.
1358 uint64_t Bitmask = getIConstVal(Call->Arguments[0], MRI);
1359 Register Sampler = GR->buildConstantSampler(
1360 Call->ReturnRegister, getSamplerAddressingModeFromBitmask(Bitmask),
1362 getSamplerFilterModeFromBitmask(Bitmask), MIRBuilder, Call->ReturnType);
1363 return Sampler.isValid();
1364 } else if (Call->Builtin->Name.contains_insensitive("__spirv_SampledImage")) {
1365 // Create OpSampledImage.
1366 Register Image = Call->Arguments[0];
1367 SPIRVType *ImageType = GR->getSPIRVTypeForVReg(Image);
1368 SPIRVType *SampledImageType =
1369 GR->getOrCreateOpTypeSampledImage(ImageType, MIRBuilder);
1370 Register SampledImage =
1371 Call->ReturnRegister.isValid()
1372 ? Call->ReturnRegister
1373 : MRI->createVirtualRegister(&SPIRV::IDRegClass);
1374 MIRBuilder.buildInstr(SPIRV::OpSampledImage)
1375 .addDef(SampledImage)
1376 .addUse(GR->getSPIRVTypeID(SampledImageType))
1377 .addUse(Image)
1378 .addUse(Call->Arguments[1]); // Sampler.
1379 return true;
1380 } else if (Call->Builtin->Name.contains_insensitive(
1381 "__spirv_ImageSampleExplicitLod")) {
1382 // Sample an image using an explicit level of detail.
1383 std::string ReturnType = DemangledCall.str();
1384 if (DemangledCall.contains("_R")) {
1385 ReturnType = ReturnType.substr(ReturnType.find("_R") + 2);
1386 ReturnType = ReturnType.substr(0, ReturnType.find('('));
1387 }
1388 SPIRVType *Type = GR->getOrCreateSPIRVTypeByName(ReturnType, MIRBuilder);
1389 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1390 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1391 MRI->setRegClass(Call->Arguments[3], &SPIRV::IDRegClass);
1392
1393 MIRBuilder.buildInstr(SPIRV::OpImageSampleExplicitLod)
1394 .addDef(Call->ReturnRegister)
1396 .addUse(Call->Arguments[0]) // Image.
1397 .addUse(Call->Arguments[1]) // Coordinate.
1398 .addImm(SPIRV::ImageOperand::Lod)
1399 .addUse(Call->Arguments[3]);
1400 return true;
1401 }
1402 return false;
1403}
1404
1406 MachineIRBuilder &MIRBuilder) {
1407 MIRBuilder.buildSelect(Call->ReturnRegister, Call->Arguments[0],
1408 Call->Arguments[1], Call->Arguments[2]);
1409 return true;
1410}
1411
1413 MachineIRBuilder &MIRBuilder,
1414 SPIRVGlobalRegistry *GR) {
1415 // Lookup the instruction opcode in the TableGen records.
1416 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1417 unsigned Opcode =
1418 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1419 const MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1420
1421 switch (Opcode) {
1422 case SPIRV::OpSpecConstant: {
1423 // Build the SpecID decoration.
1424 unsigned SpecId =
1425 static_cast<unsigned>(getIConstVal(Call->Arguments[0], MRI));
1426 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
1427 {SpecId});
1428 // Determine the constant MI.
1429 Register ConstRegister = Call->Arguments[1];
1430 const MachineInstr *Const = getDefInstrMaybeConstant(ConstRegister, MRI);
1431 assert(Const &&
1432 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1433 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
1434 "Argument should be either an int or floating-point constant");
1435 // Determine the opcode and built the OpSpec MI.
1436 const MachineOperand &ConstOperand = Const->getOperand(1);
1437 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
1438 assert(ConstOperand.isCImm() && "Int constant operand is expected");
1439 Opcode = ConstOperand.getCImm()->getValue().getZExtValue()
1440 ? SPIRV::OpSpecConstantTrue
1441 : SPIRV::OpSpecConstantFalse;
1442 }
1443 auto MIB = MIRBuilder.buildInstr(Opcode)
1444 .addDef(Call->ReturnRegister)
1445 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1446
1447 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
1448 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
1449 addNumImm(ConstOperand.getCImm()->getValue(), MIB);
1450 else
1451 addNumImm(ConstOperand.getFPImm()->getValueAPF().bitcastToAPInt(), MIB);
1452 }
1453 return true;
1454 }
1455 case SPIRV::OpSpecConstantComposite: {
1456 auto MIB = MIRBuilder.buildInstr(Opcode)
1457 .addDef(Call->ReturnRegister)
1458 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1459 for (unsigned i = 0; i < Call->Arguments.size(); i++)
1460 MIB.addUse(Call->Arguments[i]);
1461 return true;
1462 }
1463 default:
1464 return false;
1465 }
1466}
1467
1468static bool buildNDRange(const SPIRV::IncomingCall *Call,
1469 MachineIRBuilder &MIRBuilder,
1470 SPIRVGlobalRegistry *GR) {
1471 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1472 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1473 SPIRVType *PtrType = GR->getSPIRVTypeForVReg(Call->Arguments[0]);
1474 assert(PtrType->getOpcode() == SPIRV::OpTypePointer &&
1475 PtrType->getOperand(2).isReg());
1476 Register TypeReg = PtrType->getOperand(2).getReg();
1478 MachineFunction &MF = MIRBuilder.getMF();
1479 Register TmpReg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1480 GR->assignSPIRVTypeToVReg(StructType, TmpReg, MF);
1481 // Skip the first arg, it's the destination pointer. OpBuildNDRange takes
1482 // three other arguments, so pass zero constant on absence.
1483 unsigned NumArgs = Call->Arguments.size();
1484 assert(NumArgs >= 2);
1485 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
1486 MRI->setRegClass(GlobalWorkSize, &SPIRV::IDRegClass);
1487 Register LocalWorkSize =
1488 NumArgs == 2 ? Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
1489 if (LocalWorkSize.isValid())
1490 MRI->setRegClass(LocalWorkSize, &SPIRV::IDRegClass);
1491 Register GlobalWorkOffset = NumArgs <= 3 ? Register(0) : Call->Arguments[1];
1492 if (GlobalWorkOffset.isValid())
1493 MRI->setRegClass(GlobalWorkOffset, &SPIRV::IDRegClass);
1494 if (NumArgs < 4) {
1495 Register Const;
1496 SPIRVType *SpvTy = GR->getSPIRVTypeForVReg(GlobalWorkSize);
1497 if (SpvTy->getOpcode() == SPIRV::OpTypePointer) {
1498 MachineInstr *DefInstr = MRI->getUniqueVRegDef(GlobalWorkSize);
1499 assert(DefInstr && isSpvIntrinsic(*DefInstr, Intrinsic::spv_gep) &&
1500 DefInstr->getOperand(3).isReg());
1501 Register GWSPtr = DefInstr->getOperand(3).getReg();
1502 if (!MRI->getRegClassOrNull(GWSPtr))
1503 MRI->setRegClass(GWSPtr, &SPIRV::IDRegClass);
1504 // TODO: Maybe simplify generation of the type of the fields.
1505 unsigned Size = Call->Builtin->Name.equals("ndrange_3D") ? 3 : 2;
1506 unsigned BitWidth = GR->getPointerSize() == 64 ? 64 : 32;
1508 Type *FieldTy = ArrayType::get(BaseTy, Size);
1509 SPIRVType *SpvFieldTy = GR->getOrCreateSPIRVType(FieldTy, MIRBuilder);
1510 GlobalWorkSize = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1511 GR->assignSPIRVTypeToVReg(SpvFieldTy, GlobalWorkSize, MF);
1512 MIRBuilder.buildInstr(SPIRV::OpLoad)
1513 .addDef(GlobalWorkSize)
1514 .addUse(GR->getSPIRVTypeID(SpvFieldTy))
1515 .addUse(GWSPtr);
1516 Const = GR->getOrCreateConsIntArray(0, MIRBuilder, SpvFieldTy);
1517 } else {
1518 Const = GR->buildConstantInt(0, MIRBuilder, SpvTy);
1519 }
1520 if (!LocalWorkSize.isValid())
1521 LocalWorkSize = Const;
1522 if (!GlobalWorkOffset.isValid())
1523 GlobalWorkOffset = Const;
1524 }
1525 assert(LocalWorkSize.isValid() && GlobalWorkOffset.isValid());
1526 MIRBuilder.buildInstr(SPIRV::OpBuildNDRange)
1527 .addDef(TmpReg)
1528 .addUse(TypeReg)
1529 .addUse(GlobalWorkSize)
1530 .addUse(LocalWorkSize)
1531 .addUse(GlobalWorkOffset);
1532 return MIRBuilder.buildInstr(SPIRV::OpStore)
1533 .addUse(Call->Arguments[0])
1534 .addUse(TmpReg);
1535}
1536
1539 // We expect the following sequence of instructions:
1540 // %0:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.alloca)
1541 // or = G_GLOBAL_VALUE @block_literal_global
1542 // %1:_(pN) = G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.spv.bitcast), %0
1543 // %2:_(p4) = G_ADDRSPACE_CAST %1:_(pN)
1544 MachineInstr *MI = MRI->getUniqueVRegDef(ParamReg);
1545 assert(MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
1546 MI->getOperand(1).isReg());
1547 Register BitcastReg = MI->getOperand(1).getReg();
1548 MachineInstr *BitcastMI = MRI->getUniqueVRegDef(BitcastReg);
1549 assert(isSpvIntrinsic(*BitcastMI, Intrinsic::spv_bitcast) &&
1550 BitcastMI->getOperand(2).isReg());
1551 Register ValueReg = BitcastMI->getOperand(2).getReg();
1552 MachineInstr *ValueMI = MRI->getUniqueVRegDef(ValueReg);
1553 return ValueMI;
1554}
1555
1556// Return an integer constant corresponding to the given register and
1557// defined in spv_track_constant.
1558// TODO: maybe unify with prelegalizer pass.
1560 MachineInstr *DefMI = MRI->getUniqueVRegDef(Reg);
1561 assert(isSpvIntrinsic(*DefMI, Intrinsic::spv_track_constant) &&
1562 DefMI->getOperand(2).isReg());
1563 MachineInstr *DefMI2 = MRI->getUniqueVRegDef(DefMI->getOperand(2).getReg());
1564 assert(DefMI2->getOpcode() == TargetOpcode::G_CONSTANT &&
1565 DefMI2->getOperand(1).isCImm());
1566 return DefMI2->getOperand(1).getCImm()->getValue().getZExtValue();
1567}
1568
1569// Return type of the instruction result from spv_assign_type intrinsic.
1570// TODO: maybe unify with prelegalizer pass.
1572 MachineInstr *NextMI = MI->getNextNode();
1573 if (isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_name))
1574 NextMI = NextMI->getNextNode();
1575 Register ValueReg = MI->getOperand(0).getReg();
1576 if (!isSpvIntrinsic(*NextMI, Intrinsic::spv_assign_type) ||
1577 NextMI->getOperand(1).getReg() != ValueReg)
1578 return nullptr;
1579 Type *Ty = getMDOperandAsType(NextMI->getOperand(2).getMetadata(), 0);
1580 assert(Ty && "Type is expected");
1581 return getTypedPtrEltType(Ty);
1582}
1583
1584static const Type *getBlockStructType(Register ParamReg,
1586 // In principle, this information should be passed to us from Clang via
1587 // an elementtype attribute. However, said attribute requires that
1588 // the function call be an intrinsic, which is not. Instead, we rely on being
1589 // able to trace this to the declaration of a variable: OpenCL C specification
1590 // section 6.12.5 should guarantee that we can do this.
1591 MachineInstr *MI = getBlockStructInstr(ParamReg, MRI);
1592 if (MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
1593 return getTypedPtrEltType(MI->getOperand(1).getGlobal()->getType());
1594 assert(isSpvIntrinsic(*MI, Intrinsic::spv_alloca) &&
1595 "Blocks in OpenCL C must be traceable to allocation site");
1596 return getMachineInstrType(MI);
1597}
1598
1599// TODO: maybe move to the global register.
1600static SPIRVType *
1602 SPIRVGlobalRegistry *GR) {
1603 LLVMContext &Context = MIRBuilder.getMF().getFunction().getContext();
1604 Type *OpaqueType = StructType::getTypeByName(Context, "spirv.DeviceEvent");
1605 if (!OpaqueType)
1606 OpaqueType = StructType::getTypeByName(Context, "opencl.clk_event_t");
1607 if (!OpaqueType)
1608 OpaqueType = StructType::create(Context, "spirv.DeviceEvent");
1609 unsigned SC0 = storageClassToAddressSpace(SPIRV::StorageClass::Function);
1610 unsigned SC1 = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
1611 Type *PtrType = PointerType::get(PointerType::get(OpaqueType, SC0), SC1);
1612 return GR->getOrCreateSPIRVType(PtrType, MIRBuilder);
1613}
1614
1616 MachineIRBuilder &MIRBuilder,
1617 SPIRVGlobalRegistry *GR) {
1618 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1619 const DataLayout &DL = MIRBuilder.getDataLayout();
1620 bool HasEvents = Call->Builtin->Name.find("events") != StringRef::npos;
1621 const SPIRVType *Int32Ty = GR->getOrCreateSPIRVIntegerType(32, MIRBuilder);
1622
1623 // Make vararg instructions before OpEnqueueKernel.
1624 // Local sizes arguments: Sizes of block invoke arguments. Clang generates
1625 // local size operands as an array, so we need to unpack them.
1626 SmallVector<Register, 16> LocalSizes;
1627 if (Call->Builtin->Name.find("_varargs") != StringRef::npos) {
1628 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
1629 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
1630 MachineInstr *GepMI = MRI->getUniqueVRegDef(GepReg);
1631 assert(isSpvIntrinsic(*GepMI, Intrinsic::spv_gep) &&
1632 GepMI->getOperand(3).isReg());
1633 Register ArrayReg = GepMI->getOperand(3).getReg();
1634 MachineInstr *ArrayMI = MRI->getUniqueVRegDef(ArrayReg);
1635 const Type *LocalSizeTy = getMachineInstrType(ArrayMI);
1636 assert(LocalSizeTy && "Local size type is expected");
1637 const uint64_t LocalSizeNum =
1638 cast<ArrayType>(LocalSizeTy)->getNumElements();
1639 unsigned SC = storageClassToAddressSpace(SPIRV::StorageClass::Generic);
1640 const LLT LLType = LLT::pointer(SC, GR->getPointerSize());
1641 const SPIRVType *PointerSizeTy = GR->getOrCreateSPIRVPointerType(
1642 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
1643 for (unsigned I = 0; I < LocalSizeNum; ++I) {
1644 Register Reg = MRI->createVirtualRegister(&SPIRV::IDRegClass);
1645 MRI->setType(Reg, LLType);
1646 GR->assignSPIRVTypeToVReg(PointerSizeTy, Reg, MIRBuilder.getMF());
1647 auto GEPInst = MIRBuilder.buildIntrinsic(
1648 Intrinsic::spv_gep, ArrayRef<Register>{Reg}, true, false);
1649 GEPInst
1650 .addImm(GepMI->getOperand(2).getImm()) // In bound.
1651 .addUse(ArrayMI->getOperand(0).getReg()) // Alloca.
1652 .addUse(buildConstantIntReg(0, MIRBuilder, GR)) // Indices.
1653 .addUse(buildConstantIntReg(I, MIRBuilder, GR));
1654 LocalSizes.push_back(Reg);
1655 }
1656 }
1657
1658 // SPIRV OpEnqueueKernel instruction has 10+ arguments.
1659 auto MIB = MIRBuilder.buildInstr(SPIRV::OpEnqueueKernel)
1660 .addDef(Call->ReturnRegister)
1662
1663 // Copy all arguments before block invoke function pointer.
1664 const unsigned BlockFIdx = HasEvents ? 6 : 3;
1665 for (unsigned i = 0; i < BlockFIdx; i++)
1666 MIB.addUse(Call->Arguments[i]);
1667
1668 // If there are no event arguments in the original call, add dummy ones.
1669 if (!HasEvents) {
1670 MIB.addUse(buildConstantIntReg(0, MIRBuilder, GR)); // Dummy num events.
1671 Register NullPtr = GR->getOrCreateConstNullPtr(
1672 MIRBuilder, getOrCreateSPIRVDeviceEventPointer(MIRBuilder, GR));
1673 MIB.addUse(NullPtr); // Dummy wait events.
1674 MIB.addUse(NullPtr); // Dummy ret event.
1675 }
1676
1677 MachineInstr *BlockMI = getBlockStructInstr(Call->Arguments[BlockFIdx], MRI);
1678 assert(BlockMI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
1679 // Invoke: Pointer to invoke function.
1680 MIB.addGlobalAddress(BlockMI->getOperand(1).getGlobal());
1681
1682 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
1683 // Param: Pointer to block literal.
1684 MIB.addUse(BlockLiteralReg);
1685
1686 Type *PType = const_cast<Type *>(getBlockStructType(BlockLiteralReg, MRI));
1687 // TODO: these numbers should be obtained from block literal structure.
1688 // Param Size: Size of block literal structure.
1689 MIB.addUse(buildConstantIntReg(DL.getTypeStoreSize(PType), MIRBuilder, GR));
1690 // Param Aligment: Aligment of block literal structure.
1691 MIB.addUse(
1692 buildConstantIntReg(DL.getPrefTypeAlign(PType).value(), MIRBuilder, GR));
1693
1694 for (unsigned i = 0; i < LocalSizes.size(); i++)
1695 MIB.addUse(LocalSizes[i]);
1696 return true;
1697}
1698
1700 MachineIRBuilder &MIRBuilder,
1701 SPIRVGlobalRegistry *GR) {
1702 // Lookup the instruction opcode in the TableGen records.
1703 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1704 unsigned Opcode =
1705 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1706
1707 switch (Opcode) {
1708 case SPIRV::OpRetainEvent:
1709 case SPIRV::OpReleaseEvent:
1710 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1711 return MIRBuilder.buildInstr(Opcode).addUse(Call->Arguments[0]);
1712 case SPIRV::OpCreateUserEvent:
1713 case SPIRV::OpGetDefaultQueue:
1714 return MIRBuilder.buildInstr(Opcode)
1715 .addDef(Call->ReturnRegister)
1716 .addUse(GR->getSPIRVTypeID(Call->ReturnType));
1717 case SPIRV::OpIsValidEvent:
1718 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1719 return MIRBuilder.buildInstr(Opcode)
1720 .addDef(Call->ReturnRegister)
1721 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1722 .addUse(Call->Arguments[0]);
1723 case SPIRV::OpSetUserEventStatus:
1724 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1725 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1726 return MIRBuilder.buildInstr(Opcode)
1727 .addUse(Call->Arguments[0])
1728 .addUse(Call->Arguments[1]);
1729 case SPIRV::OpCaptureEventProfilingInfo:
1730 MIRBuilder.getMRI()->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1731 MIRBuilder.getMRI()->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1732 MIRBuilder.getMRI()->setRegClass(Call->Arguments[2], &SPIRV::IDRegClass);
1733 return MIRBuilder.buildInstr(Opcode)
1734 .addUse(Call->Arguments[0])
1735 .addUse(Call->Arguments[1])
1736 .addUse(Call->Arguments[2]);
1737 case SPIRV::OpBuildNDRange:
1738 return buildNDRange(Call, MIRBuilder, GR);
1739 case SPIRV::OpEnqueueKernel:
1740 return buildEnqueueKernel(Call, MIRBuilder, GR);
1741 default:
1742 return false;
1743 }
1744}
1745
1747 MachineIRBuilder &MIRBuilder,
1748 SPIRVGlobalRegistry *GR) {
1749 // Lookup the instruction opcode in the TableGen records.
1750 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1751 unsigned Opcode =
1752 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1753 auto Scope = buildConstantIntReg(SPIRV::Scope::Workgroup, MIRBuilder, GR);
1754
1755 switch (Opcode) {
1756 case SPIRV::OpGroupAsyncCopy:
1757 return MIRBuilder.buildInstr(Opcode)
1758 .addDef(Call->ReturnRegister)
1759 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1760 .addUse(Scope)
1761 .addUse(Call->Arguments[0])
1762 .addUse(Call->Arguments[1])
1763 .addUse(Call->Arguments[2])
1764 .addUse(buildConstantIntReg(1, MIRBuilder, GR))
1765 .addUse(Call->Arguments[3]);
1766 case SPIRV::OpGroupWaitEvents:
1767 return MIRBuilder.buildInstr(Opcode)
1768 .addUse(Scope)
1769 .addUse(Call->Arguments[0])
1770 .addUse(Call->Arguments[1]);
1771 default:
1772 return false;
1773 }
1774}
1775
1776static bool generateConvertInst(const StringRef DemangledCall,
1777 const SPIRV::IncomingCall *Call,
1778 MachineIRBuilder &MIRBuilder,
1779 SPIRVGlobalRegistry *GR) {
1780 // Lookup the conversion builtin in the TableGen records.
1781 const SPIRV::ConvertBuiltin *Builtin =
1782 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
1783
1784 if (Builtin->IsSaturated)
1785 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
1786 SPIRV::Decoration::SaturatedConversion, {});
1787 if (Builtin->IsRounded)
1788 buildOpDecorate(Call->ReturnRegister, MIRBuilder,
1789 SPIRV::Decoration::FPRoundingMode,
1790 {(unsigned)Builtin->RoundingMode});
1791
1792 unsigned Opcode = SPIRV::OpNop;
1793 if (GR->isScalarOrVectorOfType(Call->Arguments[0], SPIRV::OpTypeInt)) {
1794 // Int -> ...
1795 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt)) {
1796 // Int -> Int
1797 if (Builtin->IsSaturated)
1798 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpSatConvertUToS
1799 : SPIRV::OpSatConvertSToU;
1800 else
1801 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpUConvert
1802 : SPIRV::OpSConvert;
1803 } else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
1804 SPIRV::OpTypeFloat)) {
1805 // Int -> Float
1806 bool IsSourceSigned =
1807 DemangledCall[DemangledCall.find_first_of('(') + 1] != 'u';
1808 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
1809 }
1810 } else if (GR->isScalarOrVectorOfType(Call->Arguments[0],
1811 SPIRV::OpTypeFloat)) {
1812 // Float -> ...
1813 if (GR->isScalarOrVectorOfType(Call->ReturnRegister, SPIRV::OpTypeInt))
1814 // Float -> Int
1815 Opcode = Builtin->IsDestinationSigned ? SPIRV::OpConvertFToS
1816 : SPIRV::OpConvertFToU;
1817 else if (GR->isScalarOrVectorOfType(Call->ReturnRegister,
1818 SPIRV::OpTypeFloat))
1819 // Float -> Float
1820 Opcode = SPIRV::OpFConvert;
1821 }
1822
1823 assert(Opcode != SPIRV::OpNop &&
1824 "Conversion between the types not implemented!");
1825
1826 MIRBuilder.buildInstr(Opcode)
1827 .addDef(Call->ReturnRegister)
1828 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1829 .addUse(Call->Arguments[0]);
1830 return true;
1831}
1832
1834 MachineIRBuilder &MIRBuilder,
1835 SPIRVGlobalRegistry *GR) {
1836 // Lookup the vector load/store builtin in the TableGen records.
1837 const SPIRV::VectorLoadStoreBuiltin *Builtin =
1838 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
1839 Call->Builtin->Set);
1840 // Build extended instruction.
1841 auto MIB =
1842 MIRBuilder.buildInstr(SPIRV::OpExtInst)
1843 .addDef(Call->ReturnRegister)
1844 .addUse(GR->getSPIRVTypeID(Call->ReturnType))
1845 .addImm(static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1846 .addImm(Builtin->Number);
1847 for (auto Argument : Call->Arguments)
1848 MIB.addUse(Argument);
1849
1850 // Rounding mode should be passed as a last argument in the MI for builtins
1851 // like "vstorea_halfn_r".
1852 if (Builtin->IsRounded)
1853 MIB.addImm(static_cast<uint32_t>(Builtin->RoundingMode));
1854 return true;
1855}
1856
1858 MachineIRBuilder &MIRBuilder,
1859 SPIRVGlobalRegistry *GR) {
1860 // Lookup the instruction opcode in the TableGen records.
1861 const SPIRV::DemangledBuiltin *Builtin = Call->Builtin;
1862 unsigned Opcode =
1863 SPIRV::lookupNativeBuiltin(Builtin->Name, Builtin->Set)->Opcode;
1864 bool IsLoad = Opcode == SPIRV::OpLoad;
1865 // Build the instruction.
1866 auto MIB = MIRBuilder.buildInstr(Opcode);
1867 if (IsLoad) {
1868 MIB.addDef(Call->ReturnRegister);
1869 MIB.addUse(GR->getSPIRVTypeID(Call->ReturnType));
1870 }
1871 // Add a pointer to the value to load/store.
1872 MIB.addUse(Call->Arguments[0]);
1873 MachineRegisterInfo *MRI = MIRBuilder.getMRI();
1874 MRI->setRegClass(Call->Arguments[0], &SPIRV::IDRegClass);
1875 // Add a value to store.
1876 if (!IsLoad) {
1877 MIB.addUse(Call->Arguments[1]);
1878 MRI->setRegClass(Call->Arguments[1], &SPIRV::IDRegClass);
1879 }
1880 // Add optional memory attributes and an alignment.
1881 unsigned NumArgs = Call->Arguments.size();
1882 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3) {
1883 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 1 : 2], MRI));
1884 MRI->setRegClass(Call->Arguments[IsLoad ? 1 : 2], &SPIRV::IDRegClass);
1885 }
1886 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4) {
1887 MIB.addImm(getConstFromIntrinsic(Call->Arguments[IsLoad ? 2 : 3], MRI));
1888 MRI->setRegClass(Call->Arguments[IsLoad ? 2 : 3], &SPIRV::IDRegClass);
1889 }
1890 return true;
1891}
1892
1893/// Lowers a builtin funtion call using the provided \p DemangledCall skeleton
1894/// and external instruction \p Set.
1895namespace SPIRV {
1896std::optional<bool> lowerBuiltin(const StringRef DemangledCall,
1897 SPIRV::InstructionSet::InstructionSet Set,
1898 MachineIRBuilder &MIRBuilder,
1899 const Register OrigRet, const Type *OrigRetTy,
1900 const SmallVectorImpl<Register> &Args,
1901 SPIRVGlobalRegistry *GR) {
1902 LLVM_DEBUG(dbgs() << "Lowering builtin call: " << DemangledCall << "\n");
1903
1904 // SPIR-V type and return register.
1905 Register ReturnRegister = OrigRet;
1906 SPIRVType *ReturnType = nullptr;
1907 if (OrigRetTy && !OrigRetTy->isVoidTy()) {
1908 ReturnType = GR->assignTypeToVReg(OrigRetTy, OrigRet, MIRBuilder);
1909 if (!MIRBuilder.getMRI()->getRegClassOrNull(ReturnRegister))
1910 MIRBuilder.getMRI()->setRegClass(ReturnRegister, &SPIRV::IDRegClass);
1911 } else if (OrigRetTy && OrigRetTy->isVoidTy()) {
1912 ReturnRegister = MIRBuilder.getMRI()->createVirtualRegister(&IDRegClass);
1913 MIRBuilder.getMRI()->setType(ReturnRegister, LLT::scalar(32));
1914 ReturnType = GR->assignTypeToVReg(OrigRetTy, ReturnRegister, MIRBuilder);
1915 }
1916
1917 // Lookup the builtin in the TableGen records.
1918 std::unique_ptr<const IncomingCall> Call =
1919 lookupBuiltin(DemangledCall, Set, ReturnRegister, ReturnType, Args);
1920
1921 if (!Call) {
1922 LLVM_DEBUG(dbgs() << "Builtin record was not found!\n");
1923 return std::nullopt;
1924 }
1925
1926 // TODO: check if the provided args meet the builtin requirments.
1927 assert(Args.size() >= Call->Builtin->MinNumArgs &&
1928 "Too few arguments to generate the builtin");
1929 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
1930 LLVM_DEBUG(dbgs() << "More arguments provided than required!\n");
1931
1932 // Match the builtin with implementation based on the grouping.
1933 switch (Call->Builtin->Group) {
1934 case SPIRV::Extended:
1935 return generateExtInst(Call.get(), MIRBuilder, GR);
1936 case SPIRV::Relational:
1937 return generateRelationalInst(Call.get(), MIRBuilder, GR);
1938 case SPIRV::Group:
1939 return generateGroupInst(Call.get(), MIRBuilder, GR);
1940 case SPIRV::Variable:
1941 return generateBuiltinVar(Call.get(), MIRBuilder, GR);
1942 case SPIRV::Atomic:
1943 return generateAtomicInst(Call.get(), MIRBuilder, GR);
1944 case SPIRV::Barrier:
1945 return generateBarrierInst(Call.get(), MIRBuilder, GR);
1946 case SPIRV::Dot:
1947 return generateDotOrFMulInst(Call.get(), MIRBuilder, GR);
1948 case SPIRV::GetQuery:
1949 return generateGetQueryInst(Call.get(), MIRBuilder, GR);
1950 case SPIRV::ImageSizeQuery:
1951 return generateImageSizeQueryInst(Call.get(), MIRBuilder, GR);
1952 case SPIRV::ImageMiscQuery:
1953 return generateImageMiscQueryInst(Call.get(), MIRBuilder, GR);
1954 case SPIRV::ReadImage:
1955 return generateReadImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
1956 case SPIRV::WriteImage:
1957 return generateWriteImageInst(Call.get(), MIRBuilder, GR);
1958 case SPIRV::SampleImage:
1959 return generateSampleImageInst(DemangledCall, Call.get(), MIRBuilder, GR);
1960 case SPIRV::Select:
1961 return generateSelectInst(Call.get(), MIRBuilder);
1962 case SPIRV::SpecConstant:
1963 return generateSpecConstantInst(Call.get(), MIRBuilder, GR);
1964 case SPIRV::Enqueue:
1965 return generateEnqueueInst(Call.get(), MIRBuilder, GR);
1966 case SPIRV::AsyncCopy:
1967 return generateAsyncCopy(Call.get(), MIRBuilder, GR);
1968 case SPIRV::Convert:
1969 return generateConvertInst(DemangledCall, Call.get(), MIRBuilder, GR);
1970 case SPIRV::VectorLoadStore:
1971 return generateVectorLoadStoreInst(Call.get(), MIRBuilder, GR);
1972 case SPIRV::LoadStore:
1973 return generateLoadStoreInst(Call.get(), MIRBuilder, GR);
1974 }
1975 return false;
1976}
1977
1981};
1982
1983#define GET_BuiltinTypes_DECL
1984#define GET_BuiltinTypes_IMPL
1985
1989};
1990
1991#define GET_OpenCLTypes_DECL
1992#define GET_OpenCLTypes_IMPL
1993
1994#include "SPIRVGenTables.inc"
1995} // namespace SPIRV
1996
1997//===----------------------------------------------------------------------===//
1998// Misc functions for parsing builtin types.
1999//===----------------------------------------------------------------------===//
2000
2002 if (Name.startswith("void"))
2003 return Type::getVoidTy(Context);
2004 else if (Name.startswith("int") || Name.startswith("uint"))
2005 return Type::getInt32Ty(Context);
2006 else if (Name.startswith("float"))
2007 return Type::getFloatTy(Context);
2008 else if (Name.startswith("half"))
2009 return Type::getHalfTy(Context);
2010 llvm_unreachable("Unable to recognize type!");
2011}
2012
2013static const TargetExtType *parseToTargetExtType(const Type *OpaqueType,
2014 MachineIRBuilder &MIRBuilder) {
2015 assert(isSpecialOpaqueType(OpaqueType) &&
2016 "Not a SPIR-V/OpenCL special opaque type!");
2017 assert(!OpaqueType->isTargetExtTy() &&
2018 "This already is SPIR-V/OpenCL TargetExtType!");
2019
2020 StringRef NameWithParameters = OpaqueType->getStructName();
2021
2022 // Pointers-to-opaque-structs representing OpenCL types are first translated
2023 // to equivalent SPIR-V types. OpenCL builtin type names should have the
2024 // following format: e.g. %opencl.event_t
2025 if (NameWithParameters.startswith("opencl.")) {
2026 const SPIRV::OpenCLType *OCLTypeRecord =
2027 SPIRV::lookupOpenCLType(NameWithParameters);
2028 if (!OCLTypeRecord)
2029 report_fatal_error("Missing TableGen record for OpenCL type: " +
2030 NameWithParameters);
2031 NameWithParameters = OCLTypeRecord->SpirvTypeLiteral;
2032 // Continue with the SPIR-V builtin type...
2033 }
2034
2035 // Names of the opaque structs representing a SPIR-V builtins without
2036 // parameters should have the following format: e.g. %spirv.Event
2037 assert(NameWithParameters.startswith("spirv.") &&
2038 "Unknown builtin opaque type!");
2039
2040 // Parameterized SPIR-V builtins names follow this format:
2041 // e.g. %spirv.Image._void_1_0_0_0_0_0_0, %spirv.Pipe._0
2042 if (NameWithParameters.find('_') == std::string::npos)
2043 return TargetExtType::get(OpaqueType->getContext(), NameWithParameters);
2044
2045 SmallVector<StringRef> Parameters;
2046 unsigned BaseNameLength = NameWithParameters.find('_') - 1;
2047 SplitString(NameWithParameters.substr(BaseNameLength + 1), Parameters, "_");
2048
2049 SmallVector<Type *, 1> TypeParameters;
2050 bool HasTypeParameter = !isDigit(Parameters[0][0]);
2051 if (HasTypeParameter)
2052 TypeParameters.push_back(parseTypeString(
2053 Parameters[0], MIRBuilder.getMF().getFunction().getContext()));
2054 SmallVector<unsigned> IntParameters;
2055 for (unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2056 unsigned IntParameter = 0;
2057 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2058 assert(ValidLiteral &&
2059 "Invalid format of SPIR-V builtin parameter literal!");
2060 IntParameters.push_back(IntParameter);
2061 }
2062 return TargetExtType::get(OpaqueType->getContext(),
2063 NameWithParameters.substr(0, BaseNameLength),
2064 TypeParameters, IntParameters);
2065}
2066
2067//===----------------------------------------------------------------------===//
2068// Implementation functions for builtin types.
2069//===----------------------------------------------------------------------===//
2070
2072 const SPIRV::BuiltinType *TypeRecord,
2073 MachineIRBuilder &MIRBuilder,
2074 SPIRVGlobalRegistry *GR) {
2075 unsigned Opcode = TypeRecord->Opcode;
2076 // Create or get an existing type from GlobalRegistry.
2077 return GR->getOrCreateOpTypeByOpcode(ExtensionType, MIRBuilder, Opcode);
2078}
2079
2081 SPIRVGlobalRegistry *GR) {
2082 // Create or get an existing type from GlobalRegistry.
2083 return GR->getOrCreateOpTypeSampler(MIRBuilder);
2084}
2085
2086static SPIRVType *getPipeType(const TargetExtType *ExtensionType,
2087 MachineIRBuilder &MIRBuilder,
2088 SPIRVGlobalRegistry *GR) {
2089 assert(ExtensionType->getNumIntParameters() == 1 &&
2090 "Invalid number of parameters for SPIR-V pipe builtin!");
2091 // Create or get an existing type from GlobalRegistry.
2092 return GR->getOrCreateOpTypePipe(MIRBuilder,
2093 SPIRV::AccessQualifier::AccessQualifier(
2094 ExtensionType->getIntParameter(0)));
2095}
2096
2097static SPIRVType *
2098getImageType(const TargetExtType *ExtensionType,
2099 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2100 MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR) {
2101 assert(ExtensionType->getNumTypeParameters() == 1 &&
2102 "SPIR-V image builtin type must have sampled type parameter!");
2103 const SPIRVType *SampledType =
2104 GR->getOrCreateSPIRVType(ExtensionType->getTypeParameter(0), MIRBuilder);
2105 assert(ExtensionType->getNumIntParameters() == 7 &&
2106 "Invalid number of parameters for SPIR-V image builtin!");
2107 // Create or get an existing type from GlobalRegistry.
2108 return GR->getOrCreateOpTypeImage(
2109 MIRBuilder, SampledType,
2110 SPIRV::Dim::Dim(ExtensionType->getIntParameter(0)),
2111 ExtensionType->getIntParameter(1), ExtensionType->getIntParameter(2),
2112 ExtensionType->getIntParameter(3), ExtensionType->getIntParameter(4),
2113 SPIRV::ImageFormat::ImageFormat(ExtensionType->getIntParameter(5)),
2114 Qualifier == SPIRV::AccessQualifier::WriteOnly
2115 ? SPIRV::AccessQualifier::WriteOnly
2116 : SPIRV::AccessQualifier::AccessQualifier(
2117 ExtensionType->getIntParameter(6)));
2118}
2119
2121 MachineIRBuilder &MIRBuilder,
2122 SPIRVGlobalRegistry *GR) {
2123 SPIRVType *OpaqueImageType = getImageType(
2124 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2125 // Create or get an existing type from GlobalRegistry.
2126 return GR->getOrCreateOpTypeSampledImage(OpaqueImageType, MIRBuilder);
2127}
2128
2129namespace SPIRV {
2131 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2132 MachineIRBuilder &MIRBuilder,
2133 SPIRVGlobalRegistry *GR) {
2134 // In LLVM IR, SPIR-V and OpenCL builtin types are represented as either
2135 // target(...) target extension types or pointers-to-opaque-structs. The
2136 // approach relying on structs is deprecated and works only in the non-opaque
2137 // pointer mode (-opaque-pointers=0).
2138 // In order to maintain compatibility with LLVM IR generated by older versions
2139 // of Clang and LLVM/SPIR-V Translator, the pointers-to-opaque-structs are
2140 // "translated" to target extension types. This translation is temporary and
2141 // will be removed in the future release of LLVM.
2142 const TargetExtType *BuiltinType = dyn_cast<TargetExtType>(OpaqueType);
2143 if (!BuiltinType)
2144 BuiltinType = parseToTargetExtType(OpaqueType, MIRBuilder);
2145
2146 unsigned NumStartingVRegs = MIRBuilder.getMRI()->getNumVirtRegs();
2147
2148 const StringRef Name = BuiltinType->getName();
2149 LLVM_DEBUG(dbgs() << "Lowering builtin type: " << Name << "\n");
2150
2151 // Lookup the demangled builtin type in the TableGen records.
2152 const SPIRV::BuiltinType *TypeRecord = SPIRV::lookupBuiltinType(Name);
2153 if (!TypeRecord)
2154 report_fatal_error("Missing TableGen record for builtin type: " + Name);
2155
2156 // "Lower" the BuiltinType into TargetType. The following get<...>Type methods
2157 // use the implementation details from TableGen records or TargetExtType
2158 // parameters to either create a new OpType<...> machine instruction or get an
2159 // existing equivalent SPIRVType from GlobalRegistry.
2160 SPIRVType *TargetType;
2161 switch (TypeRecord->Opcode) {
2162 case SPIRV::OpTypeImage:
2163 TargetType = getImageType(BuiltinType, AccessQual, MIRBuilder, GR);
2164 break;
2165 case SPIRV::OpTypePipe:
2166 TargetType = getPipeType(BuiltinType, MIRBuilder, GR);
2167 break;
2168 case SPIRV::OpTypeDeviceEvent:
2169 TargetType = GR->getOrCreateOpTypeDeviceEvent(MIRBuilder);
2170 break;
2171 case SPIRV::OpTypeSampler:
2172 TargetType = getSamplerType(MIRBuilder, GR);
2173 break;
2174 case SPIRV::OpTypeSampledImage:
2175 TargetType = getSampledImageType(BuiltinType, MIRBuilder, GR);
2176 break;
2177 default:
2178 TargetType =
2179 getNonParameterizedType(BuiltinType, TypeRecord, MIRBuilder, GR);
2180 break;
2181 }
2182
2183 // Emit OpName instruction if a new OpType<...> instruction was added
2184 // (equivalent type was not found in GlobalRegistry).
2185 if (NumStartingVRegs < MIRBuilder.getMRI()->getNumVirtRegs())
2186 buildOpName(GR->getSPIRVTypeID(TargetType), Name, MIRBuilder);
2187
2188 return TargetType;
2189}
2190} // namespace SPIRV
2191} // namespace llvm
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
return RetTy
#define LLVM_DEBUG(X)
Definition: Debug.h:101
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned Reg
IntegerType * Int32Ty
LLVMContext & Context
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file contains some functions that are useful when dealing with strings.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:470
APInt bitcastToAPInt() const
Definition: APFloat.h:1208
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Definition: APFloat.h:955
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
Definition: APInt.h:212
uint64_t getZExtValue() const
Get zero extended value.
Definition: APInt.h:1485
This class represents an incoming formal argument to a Function.
Definition: Argument.h:28
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
Definition: Type.cpp:648
@ ICMP_ULT
unsigned less than
Definition: InstrTypes.h:736
@ ICMP_EQ
equal
Definition: InstrTypes.h:732
const APFloat & getValueAPF() const
Definition: Constants.h:296
const APInt & getValue() const
Return the constant as an APInt value reference.
Definition: Constants.h:136
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:110
Tagged union holding either a T or a Error.
Definition: Error.h:468
Class to represent fixed width SIMD vectors.
Definition: DerivedTypes.h:536
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Definition: Function.cpp:320
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
Definition: Type.cpp:279
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
Definition: LowLevelType.h:56
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
Definition: LowLevelType.h:42
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
Definition: LowLevelType.h:49
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
Definition: LowLevelType.h:92
This is an important class for using LLVM in a threaded context.
Definition: LLVMContext.h:67
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:68
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:543
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:553
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
const TargetRegisterClass * getRegClassOrNull(Register Reg) const
Return the register class of Reg, or null if Reg has not been assigned a register class yet.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isValid() const
Definition: Register.h:116
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
Register getOrCreateConsIntVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg) const
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, MachineFunction &MF)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * assignTypeToVReg(const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntArray(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr, bool EmitIR=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
size_t size() const
Definition: SmallVector.h:91
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:577
void push_back(const T &Elt)
Definition: SmallVector.h:416
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1200
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:50
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:704
std::string str() const
str - Get the contents as an std::string.
Definition: StringRef.h:222
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
Definition: StringRef.h:575
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:440
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
Definition: StringRef.h:688
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
Definition: StringRef.h:428
bool startswith(StringRef Prefix) const
Definition: StringRef.h:261
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
Definition: StringRef.h:381
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
Definition: StringRef.h:301
static constexpr size_t npos
Definition: StringRef.h:52
Class to represent struct types.
Definition: DerivedTypes.h:213
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
Definition: Type.cpp:633
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Definition: Type.cpp:514
Class to represent target extensions types, which are generally unintrospectable from target-independ...
Definition: DerivedTypes.h:749
unsigned getNumIntParameters() const
Definition: DerivedTypes.h:794
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types=std::nullopt, ArrayRef< unsigned > Ints=std::nullopt)
Return a target extension type having the specified name and optional type and integer parameters.
Definition: Type.cpp:797
Type * getTypeParameter(unsigned i) const
Definition: DerivedTypes.h:784
unsigned getNumTypeParameters() const
Definition: DerivedTypes.h:785
unsigned getIntParameter(unsigned i) const
Definition: DerivedTypes.h:793
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
bool isTargetExtTy() const
Return true if this is a target extension type.
Definition: Type.h:207
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition: Type.h:129
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
bool isVoidTy() const
Return true if this is 'void'.
Definition: Type.h:140
LLVM Value Representation.
Definition: Value.h:74
Value(Type *Ty, unsigned scid)
Definition: Value.cpp:53
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
Definition: ilist_node.h:289
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
Definition: Core.cpp:854
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
StorageClass
Definition: XCOFF.h:169
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
Definition: SPIRVUtils.cpp:96
unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:134
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildConstantIntReg(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, unsigned BitWidth=32)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0))
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
Definition: SPIRVUtils.cpp:79
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:225
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
Definition: SPIRVUtils.cpp:174
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Definition: SPIRVUtils.cpp:113
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool isSpecialOpaqueType(const Type *Ty)
Definition: SPIRVUtils.cpp:341
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:163
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
Definition: Error.cpp:156
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID)
Definition: SPIRVUtils.cpp:231
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
const Type * getTypedPtrEltType(const Type *Ty)
Definition: SPIRVUtils.cpp:328
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
Definition: SPIRVUtils.cpp:210
constexpr unsigned BitWidth
Definition: BitmaskEnum.h:184
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
Definition: SPIRVUtils.cpp:237
static const TargetExtType * parseToTargetExtType(const Type *OpaqueType, MachineIRBuilder &MIRBuilder)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
Definition: APFloat.cpp:249
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
BuiltIn::BuiltIn Value
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode