18#include "llvm/IR/IntrinsicsSPIRV.h"
22#define DEBUG_TYPE "spirv-builtins"
26#define GET_BuiltinGroup_DECL
27#include "SPIRVGenTables.inc"
31 InstructionSet::InstructionSet
Set;
37#define GET_DemangledBuiltins_DECL
38#define GET_DemangledBuiltins_IMPL
58 InstructionSet::InstructionSet
Set;
62#define GET_NativeBuiltins_DECL
63#define GET_NativeBuiltins_IMPL
81#define GET_GroupBuiltins_DECL
82#define GET_GroupBuiltins_IMPL
86 InstructionSet::InstructionSet
Set;
90using namespace BuiltIn;
91#define GET_GetBuiltins_DECL
92#define GET_GetBuiltins_IMPL
96 InstructionSet::InstructionSet
Set;
100#define GET_ImageQueryBuiltins_DECL
101#define GET_ImageQueryBuiltins_IMPL
105 InstructionSet::InstructionSet
Set;
114 InstructionSet::InstructionSet
Set;
120using namespace FPRoundingMode;
121#define GET_ConvertBuiltins_DECL
122#define GET_ConvertBuiltins_IMPL
124using namespace InstructionSet;
125#define GET_VectorLoadStoreBuiltins_DECL
126#define GET_VectorLoadStoreBuiltins_IMPL
128#define GET_CLMemoryScope_DECL
129#define GET_CLSamplerAddressingMode_DECL
130#define GET_CLMemoryFenceFlags_DECL
131#define GET_ExtendedBuiltins_DECL
132#include "SPIRVGenTables.inc"
150static std::unique_ptr<const SPIRV::IncomingCall>
152 SPIRV::InstructionSet::InstructionSet Set,
157 std::string BuiltinName =
163 if (BuiltinName.find(
'<') && BuiltinName.back() ==
'>') {
164 BuiltinName = BuiltinName.substr(0, BuiltinName.find(
'<'));
165 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(
" ") + 1);
173 BuiltinName = BuiltinName.substr(0, BuiltinName.find(
"_R"));
178 DemangledCall.
slice(DemangledCall.
find(
'(') + 1, DemangledCall.
find(
')'));
179 BuiltinArgs.
split(BuiltinArgumentTypes,
',', -1,
false);
184 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
185 return std::make_unique<SPIRV::IncomingCall>(
186 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
191 if (BuiltinArgumentTypes.
size() >= 1) {
192 char FirstArgumentType = BuiltinArgumentTypes[0][0];
197 switch (FirstArgumentType) {
200 if (Set == SPIRV::InstructionSet::OpenCL_std)
202 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
210 if (Set == SPIRV::InstructionSet::OpenCL_std)
212 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
219 if (Set == SPIRV::InstructionSet::OpenCL_std ||
220 Set == SPIRV::InstructionSet::GLSL_std_450)
226 if (!Prefix.empty() &&
227 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
228 return std::make_unique<SPIRV::IncomingCall>(
229 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
236 switch (FirstArgumentType) {
257 if (!Suffix.empty() &&
258 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
259 return std::make_unique<SPIRV::IncomingCall>(
260 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
275static std::tuple<Register, SPIRVType *>
281 if (ResultType->
getOpcode() == SPIRV::OpTypeVector) {
295 return std::make_tuple(ResultRegister, BoolType);
306 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
315 return MIRBuilder.
buildSelect(ReturnRegister, SourceRegister, TrueConst,
326 if (!DestinationReg.isValid()) {
327 DestinationReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
334 MIRBuilder.
buildLoad(DestinationReg, PtrRegister, PtrInfo,
Align());
335 return DestinationReg;
343 SPIRV::BuiltIn::BuiltIn BuiltinValue,
351 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
357 SPIRV::StorageClass::Input,
nullptr,
true,
true,
358 SPIRV::LinkageType::Import, MIRBuilder,
false);
364 return LoadedRegister;
373 SPIRVGlobalRegistry *GR,
374 MachineIRBuilder &MIB,
375 MachineRegisterInfo &
MRI);
378static SPIRV::MemorySemantics::MemorySemantics
381 case std::memory_order::memory_order_relaxed:
382 return SPIRV::MemorySemantics::None;
383 case std::memory_order::memory_order_acquire:
384 return SPIRV::MemorySemantics::Acquire;
385 case std::memory_order::memory_order_release:
386 return SPIRV::MemorySemantics::Release;
387 case std::memory_order::memory_order_acq_rel:
388 return SPIRV::MemorySemantics::AcquireRelease;
389 case std::memory_order::memory_order_seq_cst:
390 return SPIRV::MemorySemantics::SequentiallyConsistent;
398 case SPIRV::CLMemoryScope::memory_scope_work_item:
399 return SPIRV::Scope::Invocation;
400 case SPIRV::CLMemoryScope::memory_scope_work_group:
401 return SPIRV::Scope::Workgroup;
402 case SPIRV::CLMemoryScope::memory_scope_device:
403 return SPIRV::Scope::Device;
404 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
405 return SPIRV::Scope::CrossDevice;
406 case SPIRV::CLMemoryScope::memory_scope_sub_group:
407 return SPIRV::Scope::Subgroup;
427 if (CLScope ==
static_cast<unsigned>(Scope))
428 return CLScopeRegister;
437 std::memory_order Order =
443 if (Order == Semantics)
444 return SemanticsRegister;
452 assert(Call->Arguments.size() == 2 &&
453 "Need 2 arguments for atomic init translation");
456 .
addUse(Call->Arguments[0])
457 .
addUse(Call->Arguments[1]);
465 Register PtrRegister = Call->Arguments[0];
470 if (Call->Arguments.size() > 1)
471 ScopeRegister = Call->Arguments[1];
476 if (Call->Arguments.size() > 2) {
478 MemSemanticsReg = Call->Arguments[2];
481 SPIRV::MemorySemantics::SequentiallyConsistent |
487 .
addDef(Call->ReturnRegister)
501 Register PtrRegister = Call->Arguments[0];
503 SPIRV::MemorySemantics::SequentiallyConsistent |
511 .
addUse(Call->Arguments[1]);
521 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
522 bool IsCmpxchg = Call->Builtin->Name.contains(
"cmpxchg");
525 Register ObjectPtr = Call->Arguments[0];
526 Register ExpectedArg = Call->Arguments[1];
527 Register Desired = Call->Arguments[2];
529 LLT DesiredLLT =
MRI->getType(Desired);
532 SPIRV::OpTypePointer);
534 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
535 : ExpectedType == SPIRV::OpTypePointer);
540 auto StorageClass =
static_cast<SPIRV::StorageClass::StorageClass
>(
548 ? SPIRV::MemorySemantics::None
549 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
552 ? SPIRV::MemorySemantics::None
553 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
554 if (Call->Arguments.size() >= 4) {
555 assert(Call->Arguments.size() >= 5 &&
556 "Need 5+ args for explicit atomic cmpxchg");
563 if (MemOrdEq == MemSemEqual)
564 MemSemEqualReg = Call->Arguments[3];
565 if (MemOrdNeq == MemSemEqual)
566 MemSemUnequalReg = Call->Arguments[4];
570 if (!MemSemUnequalReg.
isValid())
574 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
575 if (Call->Arguments.size() >= 6) {
576 assert(Call->Arguments.size() == 6 &&
577 "Extra args for explicit atomic cmpxchg");
578 auto ClScope =
static_cast<SPIRV::CLMemoryScope
>(
581 if (ClScope ==
static_cast<unsigned>(Scope))
582 ScopeReg = Call->Arguments[5];
592 Register Tmp = !IsCmpxchg ?
MRI->createGenericVirtualRegister(DesiredLLT)
593 : Call->ReturnRegister;
618 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
621 if (Call->Arguments.size() >= 4) {
622 assert(Call->Arguments.size() == 4 &&
623 "Too many args for explicit atomic RMW");
630 Register PtrRegister = Call->Arguments[0];
631 unsigned Semantics = SPIRV::MemorySemantics::None;
634 if (Call->Arguments.size() >= 3)
638 if (!MemSemanticsReg.
isValid())
642 .
addDef(Call->ReturnRegister)
647 .
addUse(Call->Arguments[1]);
658 Register PtrRegister = Call->Arguments[0];
659 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
662 if (Call->Arguments.size() >= 2)
666 if (!MemSemanticsReg.
isValid())
669 assert((Opcode != SPIRV::OpAtomicFlagClear ||
670 (Semantics != SPIRV::MemorySemantics::Acquire &&
671 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
672 "Invalid memory order argument!");
674 SPIRV::Scope::Scope Scope = SPIRV::Scope::Device;
677 if (Call->Arguments.size() >= 3)
684 if (Opcode == SPIRV::OpAtomicFlagTestAndSet)
685 MIB.
addDef(Call->ReturnRegister)
699 unsigned MemSemantics = SPIRV::MemorySemantics::None;
701 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
702 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
704 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
705 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
707 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
708 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
710 if (Opcode == SPIRV::OpMemoryBarrier) {
711 std::memory_order MemOrder =
715 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
719 if (MemFlags == MemSemantics)
720 MemSemanticsReg = Call->Arguments[0];
725 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
726 SPIRV::Scope::Scope MemScope = Scope;
727 if (Call->Arguments.size() >= 2) {
729 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
730 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
731 "Extra args for explicitly scoped barrier");
732 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
733 : Call->Arguments[1];
734 SPIRV::CLMemoryScope CLScope =
737 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
738 (Opcode == SPIRV::OpMemoryBarrier))
741 if (CLScope ==
static_cast<unsigned>(Scope))
742 ScopeReg = Call->Arguments[1];
749 if (Opcode != SPIRV::OpMemoryBarrier)
751 MIB.
addUse(MemSemanticsReg);
757 case SPIRV::Dim::DIM_1D:
758 case SPIRV::Dim::DIM_Buffer:
760 case SPIRV::Dim::DIM_2D:
761 case SPIRV::Dim::DIM_Cube:
762 case SPIRV::Dim::DIM_Rect:
764 case SPIRV::Dim::DIM_3D:
777 return arrayed ? numComps + 1 : numComps;
790 SPIRV::lookupExtendedBuiltin(Builtin->
Name, Builtin->
Set)->Number;
795 .
addDef(Call->ReturnRegister)
800 for (
auto Argument : Call->Arguments)
811 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
815 std::tie(CompareRegister, RelationType) =
823 for (
auto Argument : Call->Arguments)
827 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
828 Call->ReturnType, GR);
836 SPIRV::lookupGroupBuiltin(Builtin->
Name);
840 Register ConstRegister = Call->Arguments[0];
843 assert(ArgInstruction->getOpcode() == TargetOpcode::G_CONSTANT &&
844 "Only constant bool value args are supported");
851 Register GroupResultRegister = Call->ReturnRegister;
852 SPIRVType *GroupResultType = Call->ReturnType;
856 const bool HasBoolReturnTy =
862 std::tie(GroupResultRegister, GroupResultType) =
865 auto Scope = Builtin->
Name.
startswith(
"sub_group") ? SPIRV::Scope::Subgroup
866 : SPIRV::Scope::Workgroup;
871 .
addDef(GroupResultRegister)
877 if (Call->Arguments.size() > 0) {
879 for (
unsigned i = 1; i < Call->Arguments.size(); i++)
880 MIB.
addUse(Call->Arguments[i]);
886 Call->ReturnType, GR);
919 SPIRV::BuiltIn::BuiltIn BuiltinValue,
921 Register IndexRegister = Call->Arguments[0];
922 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
930 Register ToTruncate = Call->ReturnRegister;
933 bool IsConstantIndex =
934 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
939 Register defaultReg = Call->ReturnRegister;
940 if (PointerSize != ResultWidth) {
941 defaultReg =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
944 ToTruncate = defaultReg;
948 MIRBuilder.
buildCopy(defaultReg, NewRegister);
956 Register Extracted = Call->ReturnRegister;
957 if (!IsConstantIndex || PointerSize != ResultWidth) {
958 Extracted =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
968 if (!IsConstantIndex) {
989 Register SelectionResult = Call->ReturnRegister;
990 if (PointerSize != ResultWidth) {
997 MIRBuilder.
buildSelect(SelectionResult, CompareRegister, Extracted,
999 ToTruncate = SelectionResult;
1001 ToTruncate = Extracted;
1005 if (PointerSize != ResultWidth)
1015 SPIRV::BuiltIn::BuiltIn
Value =
1016 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1018 if (
Value == SPIRV::BuiltIn::GlobalInvocationId)
1024 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1031 LLType, Call->ReturnRegister);
1040 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1043 case SPIRV::OpStore:
1045 case SPIRV::OpAtomicLoad:
1047 case SPIRV::OpAtomicStore:
1049 case SPIRV::OpAtomicCompareExchange:
1050 case SPIRV::OpAtomicCompareExchangeWeak:
1052 case SPIRV::OpAtomicIAdd:
1053 case SPIRV::OpAtomicISub:
1054 case SPIRV::OpAtomicOr:
1055 case SPIRV::OpAtomicXor:
1056 case SPIRV::OpAtomicAnd:
1057 case SPIRV::OpAtomicExchange:
1059 case SPIRV::OpMemoryBarrier:
1061 case SPIRV::OpAtomicFlagTestAndSet:
1062 case SPIRV::OpAtomicFlagClear:
1075 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1084 bool IsVec = Opcode == SPIRV::OpTypeVector;
1086 MIRBuilder.
buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1087 .
addDef(Call->ReturnRegister)
1089 .
addUse(Call->Arguments[0])
1090 .
addUse(Call->Arguments[1]);
1098 SPIRV::BuiltIn::BuiltIn
Value =
1099 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->
Value;
1100 uint64_t IsDefault = (
Value == SPIRV::BuiltIn::GlobalSize ||
1101 Value == SPIRV::BuiltIn::WorkgroupSize ||
1102 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1112 SPIRV::lookupImageQueryBuiltin(Builtin->
Name, Builtin->
Set)->Component;
1117 unsigned NumExpectedRetComponents =
RetTy->getOpcode() == SPIRV::OpTypeVector
1118 ?
RetTy->getOperand(2).getImm()
1123 Register QueryResult = Call->ReturnRegister;
1124 SPIRVType *QueryResultType = Call->ReturnType;
1125 if (NumExpectedRetComponents != NumActualRetComponents) {
1130 IntTy, NumActualRetComponents, MIRBuilder);
1135 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1139 .
addUse(Call->Arguments[0]);
1142 if (NumExpectedRetComponents == NumActualRetComponents)
1144 if (NumExpectedRetComponents == 1) {
1146 unsigned ExtractedComposite =
1147 Component == 3 ? NumActualRetComponents - 1 : Component;
1148 assert(ExtractedComposite < NumActualRetComponents &&
1149 "Invalid composite index!");
1150 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
1151 .
addDef(Call->ReturnRegister)
1154 .
addImm(ExtractedComposite);
1157 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpVectorShuffle)
1158 .
addDef(Call->ReturnRegister)
1162 for (
unsigned i = 0; i < NumExpectedRetComponents; ++i)
1163 MIB.
addImm(i < NumActualRetComponents ? i : 0xffffffff);
1171 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1172 "Image samples query result must be of int type!");
1177 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1179 Register Image = Call->Arguments[0];
1180 SPIRV::Dim::Dim ImageDimensionality =
static_cast<SPIRV::Dim::Dim
>(
1184 case SPIRV::OpImageQuerySamples:
1185 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1186 "Image must be of 2D dimensionality");
1188 case SPIRV::OpImageQueryLevels:
1189 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1190 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1191 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1192 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1193 "Image must be of 1D/2D/3D/Cube dimensionality");
1198 .
addDef(Call->ReturnRegister)
1205static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1207 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1208 case SPIRV::CLK_ADDRESS_CLAMP:
1209 return SPIRV::SamplerAddressingMode::Clamp;
1210 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1211 return SPIRV::SamplerAddressingMode::ClampToEdge;
1212 case SPIRV::CLK_ADDRESS_REPEAT:
1213 return SPIRV::SamplerAddressingMode::Repeat;
1214 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1215 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1216 case SPIRV::CLK_ADDRESS_NONE:
1217 return SPIRV::SamplerAddressingMode::None;
1224 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1227static SPIRV::SamplerFilterMode::SamplerFilterMode
1229 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1230 return SPIRV::SamplerFilterMode::Linear;
1231 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1232 return SPIRV::SamplerFilterMode::Nearest;
1233 return SPIRV::SamplerFilterMode::Nearest;
1240 Register Image = Call->Arguments[0];
1244 Register Sampler = Call->Arguments[1];
1258 Register SampledImage =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1269 bool NeedsExtraction =
false;
1270 if (TempType->
getOpcode() != SPIRV::OpTypeVector) {
1273 NeedsExtraction =
true;
1276 Register TempRegister =
MRI->createGenericVirtualRegister(LLType);
1279 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
1280 .
addDef(NeedsExtraction ? TempRegister : Call->ReturnRegister)
1283 .
addUse(Call->Arguments[2])
1284 .
addImm(SPIRV::ImageOperand::Lod)
1287 if (NeedsExtraction)
1288 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
1289 .
addDef(Call->ReturnRegister)
1295 .
addDef(Call->ReturnRegister)
1298 .
addUse(Call->Arguments[1])
1299 .
addImm(SPIRV::ImageOperand::Sample)
1300 .
addUse(Call->Arguments[2]);
1303 .
addDef(Call->ReturnRegister)
1306 .
addUse(Call->Arguments[1]);
1315 .
addUse(Call->Arguments[0])
1316 .
addUse(Call->Arguments[1])
1317 .
addUse(Call->Arguments[2]);
1325 if (Call->Builtin->Name.contains_insensitive(
1326 "__translate_sampler_initializer")) {
1333 return Sampler.isValid();
1334 }
else if (Call->Builtin->Name.contains_insensitive(
"__spirv_SampledImage")) {
1336 Register Image = Call->Arguments[0];
1341 Call->ReturnRegister.isValid()
1342 ? Call->ReturnRegister
1348 .
addUse(Call->Arguments[1]);
1350 }
else if (Call->Builtin->Name.contains_insensitive(
1351 "__spirv_ImageSampleExplicitLod")) {
1353 std::string ReturnType = DemangledCall.
str();
1354 if (DemangledCall.
contains(
"_R")) {
1355 ReturnType = ReturnType.substr(ReturnType.find(
"_R") + 2);
1356 ReturnType = ReturnType.substr(0, ReturnType.find(
'('));
1359 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
1360 .
addDef(Call->ReturnRegister)
1362 .
addUse(Call->Arguments[0])
1363 .
addUse(Call->Arguments[1])
1364 .
addImm(SPIRV::ImageOperand::Lod)
1365 .
addUse(Call->Arguments[3]);
1373 MIRBuilder.
buildSelect(Call->ReturnRegister, Call->Arguments[0],
1374 Call->Arguments[1], Call->Arguments[2]);
1384 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1388 case SPIRV::OpSpecConstant: {
1392 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
1395 Register ConstRegister = Call->Arguments[1];
1398 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1399 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
1400 "Argument should be either an int or floating-point constant");
1403 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
1404 assert(ConstOperand.
isCImm() &&
"Int constant operand is expected");
1406 ? SPIRV::OpSpecConstantTrue
1407 : SPIRV::OpSpecConstantFalse;
1410 .
addDef(Call->ReturnRegister)
1413 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
1414 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
1421 case SPIRV::OpSpecConstantComposite: {
1423 .
addDef(Call->ReturnRegister)
1425 for (
unsigned i = 0; i < Call->Arguments.size(); i++)
1426 MIB.
addUse(Call->Arguments[i]);
1442 assert(
MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
1443 MI->getOperand(1).isReg());
1444 Register BitcastReg =
MI->getOperand(1).getReg();
1472 Register ValueReg =
MI->getOperand(0).getReg();
1477 assert(Ty &&
"Type is expected");
1489 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
1492 "Blocks in OpenCL C must be traceable to allocation site");
1517 bool HasEvents = Call->Builtin->Name.find(
"events") !=
StringRef::npos;
1525 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
1526 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
1533 assert(LocalSizeTy &&
"Local size type is expected");
1535 cast<ArrayType>(LocalSizeTy)->getNumElements();
1539 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
1540 for (
unsigned I = 0;
I < LocalSizeNum; ++
I) {
1557 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpEnqueueKernel)
1558 .
addDef(Call->ReturnRegister)
1562 const unsigned BlockFIdx = HasEvents ? 6 : 3;
1563 for (
unsigned i = 0; i < BlockFIdx; i++)
1564 MIB.addUse(Call->Arguments[i]);
1571 MIB.addUse(NullPtr);
1572 MIB.addUse(NullPtr);
1580 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
1582 MIB.addUse(BlockLiteralReg);
1592 for (
unsigned i = 0; i < LocalSizes.
size(); i++)
1593 MIB.addUse(LocalSizes[i]);
1603 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1606 case SPIRV::OpRetainEvent:
1607 case SPIRV::OpReleaseEvent:
1609 case SPIRV::OpCreateUserEvent:
1610 case SPIRV::OpGetDefaultQueue:
1612 .
addDef(Call->ReturnRegister)
1614 case SPIRV::OpIsValidEvent:
1616 .
addDef(Call->ReturnRegister)
1618 .
addUse(Call->Arguments[0]);
1619 case SPIRV::OpSetUserEventStatus:
1621 .
addUse(Call->Arguments[0])
1622 .
addUse(Call->Arguments[1]);
1623 case SPIRV::OpCaptureEventProfilingInfo:
1625 .
addUse(Call->Arguments[0])
1626 .
addUse(Call->Arguments[1])
1627 .
addUse(Call->Arguments[2]);
1628 case SPIRV::OpBuildNDRange: {
1635 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1639 unsigned NumArgs = Call->Arguments.size();
1641 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
1643 NumArgs == 2 ?
Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
1644 Register GlobalWorkOffset = NumArgs <= 3 ?
Register(0) : Call->Arguments[1];
1648 if (SpvTy->
getOpcode() == SPIRV::OpTypePointer) {
1654 unsigned Size = Call->Builtin->Name.equals(
"ndrange_3D") ? 3 : 2;
1660 GlobalWorkSize =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1662 MIRBuilder.
getMF());
1672 LocalWorkSize = Const;
1673 if (!GlobalWorkOffset.
isValid())
1674 GlobalWorkOffset = Const;
1681 .
addUse(GlobalWorkOffset);
1683 .
addUse(Call->Arguments[0])
1686 case SPIRV::OpEnqueueKernel:
1699 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1703 case SPIRV::OpGroupAsyncCopy:
1705 .
addDef(Call->ReturnRegister)
1708 .
addUse(Call->Arguments[0])
1709 .
addUse(Call->Arguments[1])
1710 .
addUse(Call->Arguments[2])
1712 .
addUse(Call->Arguments[3]);
1713 case SPIRV::OpGroupWaitEvents:
1716 .
addUse(Call->Arguments[0])
1717 .
addUse(Call->Arguments[1]);
1729 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
1733 SPIRV::Decoration::SaturatedConversion, {});
1736 SPIRV::Decoration::FPRoundingMode,
1737 {(unsigned)Builtin->RoundingMode});
1739 unsigned Opcode = SPIRV::OpNop;
1746 : SPIRV::OpSatConvertSToU;
1749 : SPIRV::OpSConvert;
1751 SPIRV::OpTypeFloat)) {
1753 bool IsSourceSigned =
1755 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
1758 SPIRV::OpTypeFloat)) {
1763 : SPIRV::OpConvertFToU;
1765 SPIRV::OpTypeFloat))
1767 Opcode = SPIRV::OpFConvert;
1770 assert(Opcode != SPIRV::OpNop &&
1771 "Conversion between the types not implemented!");
1774 .
addDef(Call->ReturnRegister)
1776 .
addUse(Call->Arguments[0]);
1785 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
1786 Call->Builtin->Set);
1790 .
addDef(Call->ReturnRegister)
1792 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1794 for (
auto Argument : Call->Arguments)
1810 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1811 bool IsLoad = Opcode == SPIRV::OpLoad;
1815 MIB.
addDef(Call->ReturnRegister);
1819 MIB.
addUse(Call->Arguments[0]);
1822 MIB.
addUse(Call->Arguments[1]);
1825 unsigned NumArgs = Call->Arguments.size();
1826 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
1828 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
1837 SPIRV::InstructionSet::InstructionSet Set,
1842 LLVM_DEBUG(
dbgs() <<
"Lowering builtin call: " << DemangledCall <<
"\n");
1847 if (OrigRetTy && !OrigRetTy->
isVoidTy()) {
1849 }
else if (OrigRetTy && OrigRetTy->
isVoidTy()) {
1856 std::unique_ptr<const IncomingCall> Call =
1857 lookupBuiltin(DemangledCall, Set, ReturnRegister, ReturnType, Args);
1861 return std::nullopt;
1865 assert(Args.size() >= Call->Builtin->MinNumArgs &&
1866 "Too few arguments to generate the builtin");
1867 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
1868 LLVM_DEBUG(
dbgs() <<
"More arguments provided than required!\n");
1871 switch (Call->Builtin->Group) {
1872 case SPIRV::Extended:
1874 case SPIRV::Relational:
1878 case SPIRV::Variable:
1882 case SPIRV::Barrier:
1886 case SPIRV::GetQuery:
1888 case SPIRV::ImageSizeQuery:
1890 case SPIRV::ImageMiscQuery:
1892 case SPIRV::ReadImage:
1894 case SPIRV::WriteImage:
1896 case SPIRV::SampleImage:
1900 case SPIRV::SpecConstant:
1902 case SPIRV::Enqueue:
1904 case SPIRV::AsyncCopy:
1906 case SPIRV::Convert:
1908 case SPIRV::VectorLoadStore:
1910 case SPIRV::LoadStore:
1921#define GET_DemangledTypes_DECL
1922#define GET_DemangledTypes_IMPL
1941using namespace AccessQualifier;
1943using namespace ImageFormat;
1944#define GET_ImageTypes_DECL
1945#define GET_ImageTypes_IMPL
1946#define GET_PipeTypes_DECL
1947#define GET_PipeTypes_IMPL
1948#include "SPIRVGenTables.inc"
1957 if (
Name.startswith(
"opencl."))
1958 return SPIRV::lookupBuiltinType(
Name);
1959 if (!
Name.startswith(
"spirv."))
1965 unsigned BaseTypeNameLength =
1967 return SPIRV::lookupBuiltinType(
Name.substr(0, BaseTypeNameLength).str());
1970static std::unique_ptr<const SPIRV::ImageType>
1972 if (
Name.startswith(
"opencl.")) {
1977 if (!
Name.startswith(
"spirv."))
1983 StringRef TypeParametersString =
Name.substr(strlen(
"spirv.Image."));
1985 SplitString(TypeParametersString, TypeParameters,
"_");
1987 "Wrong number of literals in SPIR-V builtin image type");
1989 StringRef SampledType = TypeParameters[0];
1990 unsigned Dim,
Depth, Arrayed, Multisampled, Sampled,
Format, AccessQual;
1991 bool AreParameterLiteralsValid =
1992 !(TypeParameters[1].getAsInteger(10, Dim) ||
1993 TypeParameters[2].getAsInteger(10,
Depth) ||
1994 TypeParameters[3].getAsInteger(10, Arrayed) ||
1995 TypeParameters[4].getAsInteger(10, Multisampled) ||
1996 TypeParameters[5].getAsInteger(10, Sampled) ||
1997 TypeParameters[6].getAsInteger(10,
Format) ||
1998 TypeParameters[7].getAsInteger(10, AccessQual));
1999 assert(AreParameterLiteralsValid &&
2000 "Invalid format of SPIR-V image type parameter literals.");
2003 Name, SampledType, SPIRV::AccessQualifier::AccessQualifier(AccessQual),
2004 SPIRV::Dim::Dim(Dim),
static_cast<bool>(Arrayed),
2005 static_cast<bool>(
Depth),
static_cast<bool>(Multisampled),
2006 static_cast<bool>(Sampled), SPIRV::ImageFormat::ImageFormat(
Format)});
2009static std::unique_ptr<const SPIRV::PipeType>
2011 if (
Name.startswith(
"opencl.")) {
2016 if (!
Name.startswith(
"spirv."))
2022 if (
Name.endswith(
"_0"))
2023 return std::unique_ptr<SPIRV::PipeType>(
2025 if (
Name.endswith(
"_1"))
2026 return std::unique_ptr<SPIRV::PipeType>(
2028 if (
Name.endswith(
"_2"))
2029 return std::unique_ptr<SPIRV::PipeType>(
2042 unsigned Opcode = TypeRecord->
Opcode;
2058 std::unique_ptr<const SPIRV::PipeType>
Record =
2066 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2070 std::unique_ptr<const SPIRV::ImageType>
Record =
2076 MIRBuilder, SampledType,
Record.get()->Dimensionality,
2079 AccessQual == SPIRV::AccessQualifier::WriteOnly
2080 ? SPIRV::AccessQualifier::WriteOnly
2081 :
Record.get()->Qualifier);
2088 OpaqueType->
getName().
substr(strlen(
"spirv.SampledImage."));
2091 Context,
"spirv.Image." + TypeParametersString.
str());
2099 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2103 "Structs representing builtin types must have a parsable name");
2119 switch (TypeRecord->
Opcode) {
2120 case SPIRV::OpTypeImage:
2121 TargetType =
getImageType(OpaqueType, AccessQual, MIRBuilder, GR);
2123 case SPIRV::OpTypePipe:
2124 TargetType =
getPipeType(OpaqueType, MIRBuilder, GR);
2126 case SPIRV::OpTypeDeviceEvent:
2129 case SPIRV::OpTypeSampler:
2132 case SPIRV::OpTypeSampledImage:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
AMDGPU Lower Kernel Arguments
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
uint64_t getZExtValue() const
Get zero extended value.
static APInt getAllOnesValue(unsigned numBits)
NOTE: This is soft-deprecated. Please use getAllOnes() instead.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
@ ICMP_ULT
unsigned less than
const APFloat & getValueAPF() const
const APInt & getValue() const
Return the constant as an APInt value reference.
A parsed version of the target data layout string in and methods for querying it.
Tagged union holding either a T or a Error.
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
This is an important class for using LLVM in a threaded context.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects)
Build and insert either a G_INTRINSIC (if HasSideEffects is false) or G_INTRINSIC_W_SIDE_EFFECTS inst...
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
Register getOrCreateConsIntVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg) const
unsigned getPointerSize() const
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, MachineFunction &MF)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * assignTypeToVReg(const Type *Type, Register VReg, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntArray(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr, bool EmitIR=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
std::string str() const
str - Get the contents as an std::string.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
bool startswith(StringRef Prefix) const
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
static constexpr size_t npos
Class to represent struct types.
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
bool hasName() const
Return true if this is a named struct that has a non-empty name.
StringRef getName() const
Return the name for this struct type if it has an identity.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isVoidTy() const
Return true if this is 'void'.
LLVM Value Representation.
Value(Type *Ty, unsigned scid)
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SPIRVType * lowerBuiltinType(const StructType *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
static SPIRVType * getImageType(const StructType *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const SPIRV::DemangledType * findBuiltinType(StringRef Name)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildConstantIntReg(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, unsigned BitWidth=32)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0))
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, const MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const StructType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const StructType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static std::unique_ptr< const SPIRV::PipeType > lookupOrParseBuiltinPipeType(StringRef Name)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
bool isSpvIntrinsic(MachineInstr &MI, Intrinsic::ID IntrinsicID)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParametrizedType(const StructType *OpaqueType, const SPIRV::DemangledType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
const Type * getTypedPtrEltType(const Type *Ty)
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static std::unique_ptr< const SPIRV::ImageType > lookupOrParseBuiltinImageType(StringRef Name)
static Register buildScopeReg(Register CLScopeRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, const MachineRegisterInfo *MRI)
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
AccessQualifier::AccessQualifier Qualifier
ImageFormat::ImageFormat Format
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
AccessQualifier::AccessQualifier Qualifier
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode