20#include "llvm/IR/IntrinsicsSPIRV.h"
25#define DEBUG_TYPE "spirv-builtins"
29#define GET_BuiltinGroup_DECL
30#include "SPIRVGenTables.inc"
34 InstructionSet::InstructionSet
Set;
40#define GET_DemangledBuiltins_DECL
41#define GET_DemangledBuiltins_IMPL
63 InstructionSet::InstructionSet
Set;
67#define GET_NativeBuiltins_DECL
68#define GET_NativeBuiltins_IMPL
86#define GET_GroupBuiltins_DECL
87#define GET_GroupBuiltins_IMPL
97#define GET_IntelSubgroupsBuiltins_DECL
98#define GET_IntelSubgroupsBuiltins_IMPL
105#define GET_AtomicFloatingBuiltins_DECL
106#define GET_AtomicFloatingBuiltins_IMPL
113#define GET_GroupUniformBuiltins_DECL
114#define GET_GroupUniformBuiltins_IMPL
118 InstructionSet::InstructionSet
Set;
122using namespace BuiltIn;
123#define GET_GetBuiltins_DECL
124#define GET_GetBuiltins_IMPL
128 InstructionSet::InstructionSet
Set;
132#define GET_ImageQueryBuiltins_DECL
133#define GET_ImageQueryBuiltins_IMPL
137 InstructionSet::InstructionSet
Set;
147 InstructionSet::InstructionSet
Set;
154using namespace FPRoundingMode;
155#define GET_ConvertBuiltins_DECL
156#define GET_ConvertBuiltins_IMPL
158using namespace InstructionSet;
159#define GET_VectorLoadStoreBuiltins_DECL
160#define GET_VectorLoadStoreBuiltins_IMPL
162#define GET_CLMemoryScope_DECL
163#define GET_CLSamplerAddressingMode_DECL
164#define GET_CLMemoryFenceFlags_DECL
165#define GET_ExtendedBuiltins_DECL
166#include "SPIRVGenTables.inc"
178 const static std::string PassPrefix =
"(anonymous namespace)::";
179 std::string BuiltinName;
182 BuiltinName = DemangledCall.
substr(PassPrefix.length());
184 BuiltinName = DemangledCall;
187 BuiltinName = BuiltinName.
substr(0, BuiltinName.find(
'('));
190 if (BuiltinName.rfind(
"__spirv_ocl_", 0) == 0)
191 BuiltinName = BuiltinName.
substr(12);
196 std::size_t Pos1 = BuiltinName.
rfind(
'<');
197 if (Pos1 != std::string::npos && BuiltinName.back() ==
'>') {
198 std::size_t Pos2 = BuiltinName.rfind(
' ', Pos1);
199 if (Pos2 == std::string::npos)
203 BuiltinName = BuiltinName.substr(Pos2, Pos1 - Pos2);
204 BuiltinName = BuiltinName.substr(BuiltinName.find_last_of(
' ') + 1);
229 static const std::regex SpvWithR(
230 "(__spirv_(ImageSampleExplicitLod|ImageRead|ImageQuerySizeLod|UDotKHR|"
231 "SDotKHR|SUDotKHR|SDotAccSatKHR|UDotAccSatKHR|SUDotAccSatKHR|"
232 "ReadClockKHR|SubgroupBlockReadINTEL|SubgroupImageBlockReadINTEL|"
233 "SubgroupImageMediaBlockReadINTEL|SubgroupImageMediaBlockWriteINTEL|"
235 "UConvert|SConvert|FConvert|SatConvert).*)_R[^_]*_?(\\w+)?.*");
237 if (std::regex_match(BuiltinName,
Match, SpvWithR) &&
Match.size() > 1) {
238 std::ssub_match SubMatch;
239 if (DecorationId &&
Match.size() > 3) {
244 BuiltinName = SubMatch.str();
261static std::unique_ptr<const SPIRV::IncomingCall>
263 SPIRV::InstructionSet::InstructionSet Set,
270 DemangledCall.
slice(DemangledCall.
find(
'(') + 1, DemangledCall.
find(
')'));
271 BuiltinArgs.
split(BuiltinArgumentTypes,
',', -1,
false);
276 if ((Builtin = SPIRV::lookupBuiltin(BuiltinName, Set)))
277 return std::make_unique<SPIRV::IncomingCall>(
278 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
283 if (BuiltinArgumentTypes.
size() >= 1) {
284 char FirstArgumentType = BuiltinArgumentTypes[0][0];
289 switch (FirstArgumentType) {
292 if (Set == SPIRV::InstructionSet::OpenCL_std)
294 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
302 if (Set == SPIRV::InstructionSet::OpenCL_std)
304 else if (Set == SPIRV::InstructionSet::GLSL_std_450)
311 if (Set == SPIRV::InstructionSet::OpenCL_std ||
312 Set == SPIRV::InstructionSet::GLSL_std_450)
318 if (!Prefix.empty() &&
319 (Builtin = SPIRV::lookupBuiltin(Prefix + BuiltinName, Set)))
320 return std::make_unique<SPIRV::IncomingCall>(
321 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
328 switch (FirstArgumentType) {
349 if (!Suffix.empty() &&
350 (Builtin = SPIRV::lookupBuiltin(BuiltinName + Suffix, Set)))
351 return std::make_unique<SPIRV::IncomingCall>(
352 BuiltinName, Builtin, ReturnRegister, ReturnType,
Arguments);
367 assert(
MI->getOpcode() == TargetOpcode::G_ADDRSPACE_CAST &&
368 MI->getOperand(1).isReg());
369 Register BitcastReg =
MI->getOperand(1).getReg();
400 Register ValueReg =
MI->getOperand(0).getReg();
406 assert(Ty &&
"Type is expected");
418 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE)
419 return MI->getOperand(1).getGlobal()->getType();
421 "Blocks in OpenCL C must be traceable to allocation site");
433static std::tuple<Register, SPIRVType *>
439 if (ResultType->
getOpcode() == SPIRV::OpTypeVector) {
454 return std::make_tuple(ResultRegister, BoolType);
465 if (ReturnType->getOpcode() == SPIRV::OpTypeVector) {
475 return MIRBuilder.
buildSelect(ReturnRegister, SourceRegister, TrueConst,
485 if (!DestinationReg.isValid())
490 MIRBuilder.
buildLoad(DestinationReg, PtrRegister, PtrInfo,
Align());
491 return DestinationReg;
507 VariableType, MIRBuilder, SPIRV::StorageClass::Input);
513 SPIRV::StorageClass::Input,
nullptr, isConst,
514 hasLinkageTy, SPIRV::LinkageType::Import, MIRBuilder,
521 return LoadedRegister;
530 SPIRVGlobalRegistry *GR,
531 MachineIRBuilder &MIB,
532 MachineRegisterInfo &
MRI);
535static SPIRV::MemorySemantics::MemorySemantics
538 case std::memory_order::memory_order_relaxed:
539 return SPIRV::MemorySemantics::None;
540 case std::memory_order::memory_order_acquire:
541 return SPIRV::MemorySemantics::Acquire;
542 case std::memory_order::memory_order_release:
543 return SPIRV::MemorySemantics::Release;
544 case std::memory_order::memory_order_acq_rel:
545 return SPIRV::MemorySemantics::AcquireRelease;
546 case std::memory_order::memory_order_seq_cst:
547 return SPIRV::MemorySemantics::SequentiallyConsistent;
555 case SPIRV::CLMemoryScope::memory_scope_work_item:
556 return SPIRV::Scope::Invocation;
557 case SPIRV::CLMemoryScope::memory_scope_work_group:
558 return SPIRV::Scope::Workgroup;
559 case SPIRV::CLMemoryScope::memory_scope_device:
560 return SPIRV::Scope::Device;
561 case SPIRV::CLMemoryScope::memory_scope_all_svm_devices:
562 return SPIRV::Scope::CrossDevice;
563 case SPIRV::CLMemoryScope::memory_scope_sub_group:
564 return SPIRV::Scope::Subgroup;
577 SPIRV::Scope::Scope Scope,
581 if (CLScopeRegister.
isValid()) {
586 if (CLScope ==
static_cast<unsigned>(Scope)) {
587 MRI->setRegClass(CLScopeRegister, &SPIRV::iIDRegClass);
588 return CLScopeRegister;
596 if (
MRI->getRegClassOrNull(
Reg))
600 SpvType ? GR->
getRegClass(SpvType) : &SPIRV::iIDRegClass);
604 Register PtrRegister,
unsigned &Semantics,
607 if (SemanticsRegister.
isValid()) {
609 std::memory_order Order =
615 if (Order == Semantics) {
616 MRI->setRegClass(SemanticsRegister, &SPIRV::iIDRegClass);
617 return SemanticsRegister;
630 unsigned Sz = Call->Arguments.size() - ImmArgs.size();
631 for (
unsigned i = 0; i < Sz; ++i)
632 MIB.addUse(Call->Arguments[i]);
641 if (Call->isSpirvOp())
644 assert(Call->Arguments.size() == 2 &&
645 "Need 2 arguments for atomic init translation");
647 .
addUse(Call->Arguments[0])
648 .
addUse(Call->Arguments[1]);
657 if (Call->isSpirvOp())
660 Register PtrRegister = Call->Arguments[0];
665 Call->Arguments.size() > 1
669 if (Call->Arguments.size() > 2) {
671 MemSemanticsReg = Call->Arguments[2];
674 SPIRV::MemorySemantics::SequentiallyConsistent |
680 .
addDef(Call->ReturnRegister)
692 if (Call->isSpirvOp())
697 Register PtrRegister = Call->Arguments[0];
699 SPIRV::MemorySemantics::SequentiallyConsistent |
706 .
addUse(Call->Arguments[1]);
714 if (Call->isSpirvOp())
718 bool IsCmpxchg = Call->Builtin->Name.contains(
"cmpxchg");
721 Register ObjectPtr = Call->Arguments[0];
722 Register ExpectedArg = Call->Arguments[1];
723 Register Desired = Call->Arguments[2];
725 LLT DesiredLLT =
MRI->getType(Desired);
728 SPIRV::OpTypePointer);
731 assert(IsCmpxchg ? ExpectedType == SPIRV::OpTypeInt
732 : ExpectedType == SPIRV::OpTypePointer);
737 auto StorageClass =
static_cast<SPIRV::StorageClass::StorageClass
>(
745 ? SPIRV::MemorySemantics::None
746 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
749 ? SPIRV::MemorySemantics::None
750 : SPIRV::MemorySemantics::SequentiallyConsistent | MemSemStorage;
751 if (Call->Arguments.size() >= 4) {
752 assert(Call->Arguments.size() >= 5 &&
753 "Need 5+ args for explicit atomic cmpxchg");
760 if (MemOrdEq == MemSemEqual)
761 MemSemEqualReg = Call->Arguments[3];
762 if (MemOrdNeq == MemSemEqual)
763 MemSemUnequalReg = Call->Arguments[4];
767 if (!MemSemUnequalReg.
isValid())
771 auto Scope = IsCmpxchg ? SPIRV::Scope::Workgroup : SPIRV::Scope::Device;
772 if (Call->Arguments.size() >= 6) {
773 assert(Call->Arguments.size() == 6 &&
774 "Extra args for explicit atomic cmpxchg");
775 auto ClScope =
static_cast<SPIRV::CLMemoryScope
>(
778 if (ClScope ==
static_cast<unsigned>(Scope))
779 ScopeReg = Call->Arguments[5];
789 Register Tmp = !IsCmpxchg ?
MRI->createGenericVirtualRegister(DesiredLLT)
790 : Call->ReturnRegister;
791 if (!
MRI->getRegClassOrNull(Tmp))
816 if (Call->isSpirvOp())
822 Call->Arguments.size() >= 4 ? Call->Arguments[3] :
Register();
824 assert(Call->Arguments.size() <= 4 &&
825 "Too many args for explicit atomic RMW");
826 ScopeRegister =
buildScopeReg(ScopeRegister, SPIRV::Scope::Workgroup,
827 MIRBuilder, GR,
MRI);
829 Register PtrRegister = Call->Arguments[0];
830 unsigned Semantics = SPIRV::MemorySemantics::None;
832 Call->Arguments.size() >= 3 ? Call->Arguments[2] :
Register();
834 Semantics, MIRBuilder, GR);
835 Register ValueReg = Call->Arguments[1];
838 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeFloat) {
839 if (Opcode == SPIRV::OpAtomicIAdd) {
840 Opcode = SPIRV::OpAtomicFAddEXT;
841 }
else if (Opcode == SPIRV::OpAtomicISub) {
844 Opcode = SPIRV::OpAtomicFAddEXT;
846 MRI->createGenericVirtualRegister(
MRI->getType(ValueReg));
855 ValueReg = NegValueReg;
859 .
addDef(Call->ReturnRegister)
873 assert(Call->Arguments.size() == 4 &&
874 "Wrong number of atomic floating-type builtin");
875 Register PtrReg = Call->Arguments[0];
876 Register ScopeReg = Call->Arguments[1];
877 Register MemSemanticsReg = Call->Arguments[2];
878 Register ValueReg = Call->Arguments[3];
880 .
addDef(Call->ReturnRegister)
894 bool IsSet = Opcode == SPIRV::OpAtomicFlagTestAndSet;
896 if (Call->isSpirvOp())
901 Register PtrRegister = Call->Arguments[0];
902 unsigned Semantics = SPIRV::MemorySemantics::SequentiallyConsistent;
904 Call->Arguments.size() >= 2 ? Call->Arguments[1] :
Register();
906 Semantics, MIRBuilder, GR);
908 assert((Opcode != SPIRV::OpAtomicFlagClear ||
909 (Semantics != SPIRV::MemorySemantics::Acquire &&
910 Semantics != SPIRV::MemorySemantics::AcquireRelease)) &&
911 "Invalid memory order argument!");
914 Call->Arguments.size() >= 3 ? Call->Arguments[2] :
Register();
934 if ((Opcode == SPIRV::OpControlBarrierArriveINTEL ||
935 Opcode == SPIRV::OpControlBarrierWaitINTEL) &&
936 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_split_barrier)) {
937 std::string DiagMsg = std::string(Builtin->
Name) +
938 ": the builtin requires the following SPIR-V "
939 "extension: SPV_INTEL_split_barrier";
943 if (Call->isSpirvOp())
948 unsigned MemSemantics = SPIRV::MemorySemantics::None;
950 if (MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE)
951 MemSemantics |= SPIRV::MemorySemantics::WorkgroupMemory;
953 if (MemFlags & SPIRV::CLK_GLOBAL_MEM_FENCE)
954 MemSemantics |= SPIRV::MemorySemantics::CrossWorkgroupMemory;
956 if (MemFlags & SPIRV::CLK_IMAGE_MEM_FENCE)
957 MemSemantics |= SPIRV::MemorySemantics::ImageMemory;
959 if (Opcode == SPIRV::OpMemoryBarrier)
963 else if (Opcode == SPIRV::OpControlBarrierArriveINTEL)
964 MemSemantics |= SPIRV::MemorySemantics::Release;
965 else if (Opcode == SPIRV::OpControlBarrierWaitINTEL)
966 MemSemantics |= SPIRV::MemorySemantics::Acquire;
968 MemSemantics |= SPIRV::MemorySemantics::SequentiallyConsistent;
971 MemFlags == MemSemantics
975 SPIRV::Scope::Scope Scope = SPIRV::Scope::Workgroup;
976 SPIRV::Scope::Scope MemScope = Scope;
977 if (Call->Arguments.size() >= 2) {
979 ((Opcode != SPIRV::OpMemoryBarrier && Call->Arguments.size() == 2) ||
980 (Opcode == SPIRV::OpMemoryBarrier && Call->Arguments.size() == 3)) &&
981 "Extra args for explicitly scoped barrier");
982 Register ScopeArg = (Opcode == SPIRV::OpMemoryBarrier) ? Call->Arguments[2]
983 : Call->Arguments[1];
984 SPIRV::CLMemoryScope CLScope =
987 if (!(MemFlags & SPIRV::CLK_LOCAL_MEM_FENCE) ||
988 (Opcode == SPIRV::OpMemoryBarrier))
990 if (CLScope ==
static_cast<unsigned>(Scope))
991 ScopeReg = Call->Arguments[1];
998 if (Opcode != SPIRV::OpMemoryBarrier)
1000 MIB.
addUse(MemSemanticsReg);
1006 case SPIRV::Dim::DIM_1D:
1007 case SPIRV::Dim::DIM_Buffer:
1009 case SPIRV::Dim::DIM_2D:
1010 case SPIRV::Dim::DIM_Cube:
1011 case SPIRV::Dim::DIM_Rect:
1013 case SPIRV::Dim::DIM_3D:
1026 return arrayed ? numComps + 1 : numComps;
1039 SPIRV::lookupExtendedBuiltin(Builtin->
Name, Builtin->
Set)->Number;
1044 .
addDef(Call->ReturnRegister)
1046 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
1049 for (
auto Argument : Call->Arguments)
1060 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1064 std::tie(CompareRegister, RelationType) =
1072 for (
auto Argument : Call->Arguments)
1076 return buildSelectInst(MIRBuilder, Call->ReturnRegister, CompareRegister,
1077 Call->ReturnType, GR);
1085 SPIRV::lookupGroupBuiltin(Builtin->
Name);
1088 if (Call->isSpirvOp()) {
1094 Register GroupOpReg = Call->Arguments[1];
1096 if (!
MI ||
MI->getOpcode() != TargetOpcode::G_CONSTANT)
1098 "Group Operation parameter must be an integer constant");
1099 uint64_t GrpOp =
MI->getOperand(1).getCImm()->getValue().getZExtValue();
1100 Register ScopeReg = Call->Arguments[0];
1102 .
addDef(Call->ReturnRegister)
1106 for (
unsigned i = 2; i < Call->Arguments.size(); ++i)
1107 MIB.
addUse(Call->Arguments[i]);
1114 Register BoolReg = Call->Arguments[0];
1119 if (ArgInstruction->
getOpcode() == TargetOpcode::G_CONSTANT) {
1120 if (BoolRegType->
getOpcode() != SPIRV::OpTypeBool)
1124 if (BoolRegType->
getOpcode() == SPIRV::OpTypeInt) {
1126 MRI->setRegClass(Arg0, &SPIRV::iIDRegClass);
1132 }
else if (BoolRegType->
getOpcode() != SPIRV::OpTypeBool) {
1139 Register GroupResultRegister = Call->ReturnRegister;
1140 SPIRVType *GroupResultType = Call->ReturnType;
1144 const bool HasBoolReturnTy =
1149 if (HasBoolReturnTy)
1150 std::tie(GroupResultRegister, GroupResultType) =
1153 auto Scope = Builtin->
Name.
starts_with(
"sub_group") ? SPIRV::Scope::Subgroup
1154 : SPIRV::Scope::Workgroup;
1158 if (GroupBuiltin->
Opcode == SPIRV::OpGroupBroadcast &&
1159 Call->Arguments.size() > 2) {
1165 Register ElemReg = Call->Arguments[1];
1167 if (!ElemType || ElemType->
getOpcode() != SPIRV::OpTypeInt)
1169 unsigned VecLen = Call->Arguments.size() - 1;
1170 VecReg =
MRI->createGenericVirtualRegister(
1172 MRI->setRegClass(VecReg, &SPIRV::vIDRegClass);
1178 for (
unsigned i = 1; i < Call->Arguments.size(); i++) {
1179 MIB.
addUse(Call->Arguments[i]);
1188 .
addDef(GroupResultRegister)
1194 if (Call->Arguments.size() > 0) {
1200 for (
unsigned i = 1; i < Call->Arguments.size(); i++)
1201 MIB.addUse(Call->Arguments[i]);
1205 if (HasBoolReturnTy)
1206 buildSelectInst(MIRBuilder, Call->ReturnRegister, GroupResultRegister,
1207 Call->ReturnType, GR);
1218 SPIRV::lookupIntelSubgroupsBuiltin(Builtin->
Name);
1220 if (IntelSubgroups->
IsMedia &&
1221 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_media_block_io)) {
1222 std::string DiagMsg = std::string(Builtin->
Name) +
1223 ": the builtin requires the following SPIR-V "
1224 "extension: SPV_INTEL_media_block_io";
1226 }
else if (!IntelSubgroups->
IsMedia &&
1227 !ST->canUseExtension(SPIRV::Extension::SPV_INTEL_subgroups)) {
1228 std::string DiagMsg = std::string(Builtin->
Name) +
1229 ": the builtin requires the following SPIR-V "
1230 "extension: SPV_INTEL_subgroups";
1235 if (Call->isSpirvOp()) {
1236 bool IsSet = OpCode != SPIRV::OpSubgroupBlockWriteINTEL &&
1237 OpCode != SPIRV::OpSubgroupImageBlockWriteINTEL &&
1238 OpCode != SPIRV::OpSubgroupImageMediaBlockWriteINTEL;
1244 if (IntelSubgroups->
IsBlock) {
1247 if (Arg0Type->getOpcode() == SPIRV::OpTypeImage) {
1253 case SPIRV::OpSubgroupBlockReadINTEL:
1254 OpCode = SPIRV::OpSubgroupImageBlockReadINTEL;
1256 case SPIRV::OpSubgroupBlockWriteINTEL:
1257 OpCode = SPIRV::OpSubgroupImageBlockWriteINTEL;
1278 .
addDef(Call->ReturnRegister)
1280 for (
size_t i = 0; i < Call->Arguments.size(); ++i)
1281 MIB.
addUse(Call->Arguments[i]);
1291 if (!ST->canUseExtension(
1292 SPIRV::Extension::SPV_KHR_uniform_group_instructions)) {
1293 std::string DiagMsg = std::string(Builtin->
Name) +
1294 ": the builtin requires the following SPIR-V "
1295 "extension: SPV_KHR_uniform_group_instructions";
1299 SPIRV::lookupGroupUniformBuiltin(Builtin->
Name);
1302 Register GroupResultReg = Call->ReturnRegister;
1303 Register ScopeReg = Call->Arguments[0];
1304 Register ValueReg = Call->Arguments[2];
1307 Register ConstGroupOpReg = Call->Arguments[1];
1309 if (!Const || Const->getOpcode() != TargetOpcode::G_CONSTANT)
1311 "expect a constant group operation for a uniform group instruction",
1314 if (!ConstOperand.
isCImm())
1324 MIB.addUse(ValueReg);
1335 if (!ST->canUseExtension(SPIRV::Extension::SPV_KHR_shader_clock)) {
1336 std::string DiagMsg = std::string(Builtin->
Name) +
1337 ": the builtin requires the following SPIR-V "
1338 "extension: SPV_KHR_shader_clock";
1342 Register ResultReg = Call->ReturnRegister;
1345 SPIRV::Scope::Scope ScopeArg =
1347 .
EndsWith(
"device", SPIRV::Scope::Scope::Device)
1348 .
EndsWith(
"work_group", SPIRV::Scope::Scope::Workgroup)
1349 .
EndsWith(
"sub_group", SPIRV::Scope::Scope::Subgroup);
1389 SPIRV::BuiltIn::BuiltIn BuiltinValue,
1391 Register IndexRegister = Call->Arguments[0];
1392 const unsigned ResultWidth = Call->ReturnType->getOperand(1).getImm();
1400 Register ToTruncate = Call->ReturnRegister;
1403 bool IsConstantIndex =
1404 IndexInstruction->getOpcode() == TargetOpcode::G_CONSTANT;
1409 Register DefaultReg = Call->ReturnRegister;
1410 if (PointerSize != ResultWidth) {
1411 DefaultReg =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
1412 MRI->setRegClass(DefaultReg, &SPIRV::iIDRegClass);
1414 MIRBuilder.
getMF());
1415 ToTruncate = DefaultReg;
1419 MIRBuilder.
buildCopy(DefaultReg, NewRegister);
1427 Register Extracted = Call->ReturnRegister;
1428 if (!IsConstantIndex || PointerSize != ResultWidth) {
1429 Extracted =
MRI->createGenericVirtualRegister(
LLT::scalar(PointerSize));
1430 MRI->setRegClass(Extracted, &SPIRV::iIDRegClass);
1437 ExtractInst.
addUse(LoadedVector).
addUse(IndexRegister);
1440 if (!IsConstantIndex) {
1449 MRI->setRegClass(CompareRegister, &SPIRV::iIDRegClass);
1462 Register SelectionResult = Call->ReturnRegister;
1463 if (PointerSize != ResultWidth) {
1466 MRI->setRegClass(SelectionResult, &SPIRV::iIDRegClass);
1468 MIRBuilder.
getMF());
1471 MIRBuilder.
buildSelect(SelectionResult, CompareRegister, Extracted,
1473 ToTruncate = SelectionResult;
1475 ToTruncate = Extracted;
1479 if (PointerSize != ResultWidth)
1489 SPIRV::BuiltIn::BuiltIn
Value =
1490 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1492 if (
Value == SPIRV::BuiltIn::GlobalInvocationId)
1498 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeVector)
1505 LLType, Call->ReturnRegister);
1514 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1517 case SPIRV::OpStore:
1519 case SPIRV::OpAtomicLoad:
1521 case SPIRV::OpAtomicStore:
1523 case SPIRV::OpAtomicCompareExchange:
1524 case SPIRV::OpAtomicCompareExchangeWeak:
1527 case SPIRV::OpAtomicIAdd:
1528 case SPIRV::OpAtomicISub:
1529 case SPIRV::OpAtomicOr:
1530 case SPIRV::OpAtomicXor:
1531 case SPIRV::OpAtomicAnd:
1532 case SPIRV::OpAtomicExchange:
1534 case SPIRV::OpMemoryBarrier:
1536 case SPIRV::OpAtomicFlagTestAndSet:
1537 case SPIRV::OpAtomicFlagClear:
1540 if (Call->isSpirvOp())
1552 unsigned Opcode = SPIRV::lookupAtomicFloatingBuiltin(Builtin->
Name)->Opcode;
1555 case SPIRV::OpAtomicFAddEXT:
1556 case SPIRV::OpAtomicFMinEXT:
1557 case SPIRV::OpAtomicFMaxEXT:
1570 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1577 MIRBuilder.
buildInstr(TargetOpcode::G_ADDRSPACE_CAST)
1578 .
addDef(Call->ReturnRegister)
1579 .
addUse(Call->Arguments[0]);
1586 if (Call->isSpirvOp())
1590 bool IsVec = Opcode == SPIRV::OpTypeVector;
1592 MIRBuilder.
buildInstr(IsVec ? SPIRV::OpDot : SPIRV::OpFMulS)
1593 .
addDef(Call->ReturnRegister)
1595 .
addUse(Call->Arguments[0])
1596 .
addUse(Call->Arguments[1]);
1604 SPIRV::BuiltIn::BuiltIn
Value =
1605 SPIRV::lookupGetBuiltin(Builtin->
Name, Builtin->
Set)->
Value;
1608 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt);
1612 MIRBuilder, Call->ReturnType, GR,
Value, LLType, Call->ReturnRegister,
1627 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1629 Register SRetReg = Call->Arguments[0];
1634 if (RetType->
getOpcode() != SPIRV::OpTypeStruct)
1636 "overflow builtins");
1640 if (!OpType1 || !OpType2 || OpType1 != OpType2)
1642 if (OpType1->
getOpcode() == SPIRV::OpTypeVector)
1644 case SPIRV::OpIAddCarryS:
1645 Opcode = SPIRV::OpIAddCarryV;
1647 case SPIRV::OpISubBorrowS:
1648 Opcode = SPIRV::OpISubBorrowV;
1653 Register ResReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1655 MRI->getRegClassOrNull(Call->Arguments[1])) {
1656 MRI->setRegClass(ResReg, DstRC);
1657 MRI->setType(ResReg,
MRI->getType(Call->Arguments[1]));
1665 .
addUse(Call->Arguments[1])
1666 .
addUse(Call->Arguments[2]);
1675 SPIRV::BuiltIn::BuiltIn
Value =
1676 SPIRV::lookupGetBuiltin(Call->Builtin->Name, Call->Builtin->Set)->
Value;
1677 uint64_t IsDefault = (
Value == SPIRV::BuiltIn::GlobalSize ||
1678 Value == SPIRV::BuiltIn::WorkgroupSize ||
1679 Value == SPIRV::BuiltIn::EnqueuedWorkgroupSize);
1689 SPIRV::lookupImageQueryBuiltin(Builtin->
Name, Builtin->
Set)->Component;
1694 unsigned NumExpectedRetComponents =
RetTy->getOpcode() == SPIRV::OpTypeVector
1695 ?
RetTy->getOperand(2).getImm()
1700 Register QueryResult = Call->ReturnRegister;
1701 SPIRVType *QueryResultType = Call->ReturnType;
1702 if (NumExpectedRetComponents != NumActualRetComponents) {
1708 IntTy, NumActualRetComponents, MIRBuilder);
1713 IsDimBuf ? SPIRV::OpImageQuerySize : SPIRV::OpImageQuerySizeLod;
1717 .
addUse(Call->Arguments[0]);
1720 if (NumExpectedRetComponents == NumActualRetComponents)
1722 if (NumExpectedRetComponents == 1) {
1724 unsigned ExtractedComposite =
1725 Component == 3 ? NumActualRetComponents - 1 : Component;
1726 assert(ExtractedComposite < NumActualRetComponents &&
1727 "Invalid composite index!");
1730 if (QueryResultType->
getOpcode() == SPIRV::OpTypeVector) {
1732 if (TypeReg != NewTypeReg &&
1734 TypeReg = NewTypeReg;
1736 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
1737 .
addDef(Call->ReturnRegister)
1740 .
addImm(ExtractedComposite);
1741 if (NewType !=
nullptr)
1746 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpVectorShuffle)
1747 .
addDef(Call->ReturnRegister)
1751 for (
unsigned i = 0; i < NumExpectedRetComponents; ++i)
1752 MIB.
addImm(i < NumActualRetComponents ? i : 0xffffffff);
1760 assert(Call->ReturnType->getOpcode() == SPIRV::OpTypeInt &&
1761 "Image samples query result must be of int type!");
1766 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1768 Register Image = Call->Arguments[0];
1769 SPIRV::Dim::Dim ImageDimensionality =
static_cast<SPIRV::Dim::Dim
>(
1771 (void)ImageDimensionality;
1774 case SPIRV::OpImageQuerySamples:
1775 assert(ImageDimensionality == SPIRV::Dim::DIM_2D &&
1776 "Image must be of 2D dimensionality");
1778 case SPIRV::OpImageQueryLevels:
1779 assert((ImageDimensionality == SPIRV::Dim::DIM_1D ||
1780 ImageDimensionality == SPIRV::Dim::DIM_2D ||
1781 ImageDimensionality == SPIRV::Dim::DIM_3D ||
1782 ImageDimensionality == SPIRV::Dim::DIM_Cube) &&
1783 "Image must be of 1D/2D/3D/Cube dimensionality");
1788 .
addDef(Call->ReturnRegister)
1795static SPIRV::SamplerAddressingMode::SamplerAddressingMode
1797 switch (Bitmask & SPIRV::CLK_ADDRESS_MODE_MASK) {
1798 case SPIRV::CLK_ADDRESS_CLAMP:
1799 return SPIRV::SamplerAddressingMode::Clamp;
1800 case SPIRV::CLK_ADDRESS_CLAMP_TO_EDGE:
1801 return SPIRV::SamplerAddressingMode::ClampToEdge;
1802 case SPIRV::CLK_ADDRESS_REPEAT:
1803 return SPIRV::SamplerAddressingMode::Repeat;
1804 case SPIRV::CLK_ADDRESS_MIRRORED_REPEAT:
1805 return SPIRV::SamplerAddressingMode::RepeatMirrored;
1806 case SPIRV::CLK_ADDRESS_NONE:
1807 return SPIRV::SamplerAddressingMode::None;
1814 return (Bitmask & SPIRV::CLK_NORMALIZED_COORDS_TRUE) ? 1 : 0;
1817static SPIRV::SamplerFilterMode::SamplerFilterMode
1819 if (Bitmask & SPIRV::CLK_FILTER_LINEAR)
1820 return SPIRV::SamplerFilterMode::Linear;
1821 if (Bitmask & SPIRV::CLK_FILTER_NEAREST)
1822 return SPIRV::SamplerFilterMode::Nearest;
1823 return SPIRV::SamplerFilterMode::Nearest;
1830 Register Image = Call->Arguments[0];
1834 if (HasOclSampler) {
1835 Register Sampler = Call->Arguments[1];
1849 Register SampledImage =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1860 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeVector) {
1864 MRI->createGenericVirtualRegister(GR->
getRegType(TempType));
1867 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
1871 .
addUse(Call->Arguments[2])
1872 .
addImm(SPIRV::ImageOperand::Lod)
1874 MIRBuilder.
buildInstr(SPIRV::OpCompositeExtract)
1875 .
addDef(Call->ReturnRegister)
1880 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
1881 .
addDef(Call->ReturnRegister)
1884 .
addUse(Call->Arguments[2])
1885 .
addImm(SPIRV::ImageOperand::Lod)
1888 }
else if (HasMsaa) {
1890 .
addDef(Call->ReturnRegister)
1893 .
addUse(Call->Arguments[1])
1894 .
addImm(SPIRV::ImageOperand::Sample)
1895 .
addUse(Call->Arguments[2]);
1898 .
addDef(Call->ReturnRegister)
1901 .
addUse(Call->Arguments[1]);
1910 .
addUse(Call->Arguments[0])
1911 .
addUse(Call->Arguments[1])
1912 .
addUse(Call->Arguments[2]);
1921 if (Call->Builtin->Name.contains_insensitive(
1922 "__translate_sampler_initializer")) {
1929 return Sampler.isValid();
1930 }
else if (Call->Builtin->Name.contains_insensitive(
"__spirv_SampledImage")) {
1932 Register Image = Call->Arguments[0];
1937 Call->ReturnRegister.isValid()
1938 ? Call->ReturnRegister
1939 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1944 .
addUse(Call->Arguments[1]);
1946 }
else if (Call->Builtin->Name.contains_insensitive(
1947 "__spirv_ImageSampleExplicitLod")) {
1949 std::string ReturnType = DemangledCall.
str();
1950 if (DemangledCall.
contains(
"_R")) {
1951 ReturnType = ReturnType.substr(ReturnType.find(
"_R") + 2);
1952 ReturnType = ReturnType.substr(0, ReturnType.find(
'('));
1959 std::string DiagMsg =
1960 "Unable to recognize SPIRV type name: " + ReturnType;
1963 MIRBuilder.
buildInstr(SPIRV::OpImageSampleExplicitLod)
1964 .
addDef(Call->ReturnRegister)
1966 .
addUse(Call->Arguments[0])
1967 .
addUse(Call->Arguments[1])
1968 .
addImm(SPIRV::ImageOperand::Lod)
1969 .
addUse(Call->Arguments[3]);
1977 MIRBuilder.
buildSelect(Call->ReturnRegister, Call->Arguments[0],
1978 Call->Arguments[1], Call->Arguments[2]);
1994 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
1995 bool IsSet = Opcode != SPIRV::OpCooperativeMatrixStoreKHR &&
1996 Opcode != SPIRV::OpCooperativeMatrixStoreCheckedINTEL &&
1997 Opcode != SPIRV::OpCooperativeMatrixPrefetchINTEL;
1998 unsigned ArgSz = Call->Arguments.size();
1999 unsigned LiteralIdx = 0;
2002 case SPIRV::OpCooperativeMatrixLoadKHR:
2003 LiteralIdx = ArgSz > 3 ? 3 : 0;
2005 case SPIRV::OpCooperativeMatrixStoreKHR:
2006 LiteralIdx = ArgSz > 4 ? 4 : 0;
2008 case SPIRV::OpCooperativeMatrixLoadCheckedINTEL:
2009 LiteralIdx = ArgSz > 7 ? 7 : 0;
2011 case SPIRV::OpCooperativeMatrixStoreCheckedINTEL:
2012 LiteralIdx = ArgSz > 8 ? 8 : 0;
2015 case SPIRV::OpCooperativeMatrixMulAddKHR:
2016 LiteralIdx = ArgSz > 3 ? 3 : 0;
2022 if (Opcode == SPIRV::OpCooperativeMatrixPrefetchINTEL) {
2024 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpCooperativeMatrixPrefetchINTEL)
2025 .
addUse(Call->Arguments[0])
2026 .
addUse(Call->Arguments[1])
2027 .
addUse(Call->Arguments[2])
2029 .
addUse(Call->Arguments[4]);
2031 MIB.
addUse(Call->Arguments[5]);
2041 if (Opcode == SPIRV::OpCooperativeMatrixLengthKHR) {
2046 .
addDef(Call->ReturnRegister)
2052 IsSet ? TypeReg :
Register(0), ImmArgs);
2061 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2065 case SPIRV::OpSpecConstant: {
2069 buildOpDecorate(Call->ReturnRegister, MIRBuilder, SPIRV::Decoration::SpecId,
2072 Register ConstRegister = Call->Arguments[1];
2075 (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
2076 Const->getOpcode() == TargetOpcode::G_FCONSTANT) &&
2077 "Argument should be either an int or floating-point constant");
2080 if (Call->ReturnType->getOpcode() == SPIRV::OpTypeBool) {
2081 assert(ConstOperand.
isCImm() &&
"Int constant operand is expected");
2083 ? SPIRV::OpSpecConstantTrue
2084 : SPIRV::OpSpecConstantFalse;
2087 .
addDef(Call->ReturnRegister)
2090 if (Call->ReturnType->getOpcode() != SPIRV::OpTypeBool) {
2091 if (Const->getOpcode() == TargetOpcode::G_CONSTANT)
2098 case SPIRV::OpSpecConstantComposite: {
2100 .
addDef(Call->ReturnRegister)
2102 for (
unsigned i = 0; i < Call->Arguments.size(); i++)
2103 MIB.
addUse(Call->Arguments[i]);
2121 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2125 unsigned NumArgs = Call->Arguments.size();
2127 Register GlobalWorkSize = Call->Arguments[NumArgs < 4 ? 1 : 2];
2129 NumArgs == 2 ?
Register(0) : Call->Arguments[NumArgs < 4 ? 2 : 3];
2130 Register GlobalWorkOffset = NumArgs <= 3 ?
Register(0) : Call->Arguments[1];
2134 if (SpvTy->
getOpcode() == SPIRV::OpTypePointer) {
2140 unsigned Size = Call->Builtin->Name ==
"ndrange_3D" ? 3 : 2;
2145 GlobalWorkSize =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2154 SpvFieldTy, *ST.getInstrInfo());
2159 LocalWorkSize = Const;
2160 if (!GlobalWorkOffset.
isValid())
2161 GlobalWorkOffset = Const;
2169 .
addUse(GlobalWorkOffset);
2171 .
addUse(Call->Arguments[0])
2196 bool IsSpirvOp = Call->isSpirvOp();
2197 bool HasEvents = Call->Builtin->Name.contains(
"events") || IsSpirvOp;
2204 if (Call->Builtin->Name.contains(
"_varargs") || IsSpirvOp) {
2205 const unsigned LocalSizeArrayIdx = HasEvents ? 9 : 6;
2206 Register GepReg = Call->Arguments[LocalSizeArrayIdx];
2213 assert(LocalSizeTy &&
"Local size type is expected");
2215 cast<ArrayType>(LocalSizeTy)->getNumElements();
2219 Int32Ty, MIRBuilder, SPIRV::StorageClass::Function);
2220 for (
unsigned I = 0;
I < LocalSizeNum; ++
I) {
2222 MRI->setType(
Reg, LLType);
2236 auto MIB = MIRBuilder.
buildInstr(SPIRV::OpEnqueueKernel)
2237 .
addDef(Call->ReturnRegister)
2241 const unsigned BlockFIdx = HasEvents ? 6 : 3;
2242 for (
unsigned i = 0; i < BlockFIdx; i++)
2243 MIB.addUse(Call->Arguments[i]);
2250 MIB.addUse(NullPtr);
2251 MIB.addUse(NullPtr);
2259 Register BlockLiteralReg = Call->Arguments[BlockFIdx + 1];
2261 MIB.addUse(BlockLiteralReg);
2271 for (
unsigned i = 0; i < LocalSizes.
size(); i++)
2272 MIB.addUse(LocalSizes[i]);
2282 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2285 case SPIRV::OpRetainEvent:
2286 case SPIRV::OpReleaseEvent:
2288 case SPIRV::OpCreateUserEvent:
2289 case SPIRV::OpGetDefaultQueue:
2291 .
addDef(Call->ReturnRegister)
2293 case SPIRV::OpIsValidEvent:
2295 .
addDef(Call->ReturnRegister)
2297 .
addUse(Call->Arguments[0]);
2298 case SPIRV::OpSetUserEventStatus:
2300 .
addUse(Call->Arguments[0])
2301 .
addUse(Call->Arguments[1]);
2302 case SPIRV::OpCaptureEventProfilingInfo:
2304 .
addUse(Call->Arguments[0])
2305 .
addUse(Call->Arguments[1])
2306 .
addUse(Call->Arguments[2]);
2307 case SPIRV::OpBuildNDRange:
2309 case SPIRV::OpEnqueueKernel:
2322 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2324 bool IsSet = Opcode == SPIRV::OpGroupAsyncCopy;
2326 if (Call->isSpirvOp())
2333 case SPIRV::OpGroupAsyncCopy: {
2335 Call->ReturnType->getOpcode() == SPIRV::OpTypeEvent
2339 unsigned NumArgs = Call->Arguments.size();
2340 Register EventReg = Call->Arguments[NumArgs - 1];
2342 .
addDef(Call->ReturnRegister)
2345 .
addUse(Call->Arguments[0])
2346 .
addUse(Call->Arguments[1])
2347 .
addUse(Call->Arguments[2])
2348 .
addUse(Call->Arguments.size() > 4
2349 ? Call->Arguments[3]
2352 if (NewType !=
nullptr)
2357 case SPIRV::OpGroupWaitEvents:
2360 .
addUse(Call->Arguments[0])
2361 .
addUse(Call->Arguments[1]);
2373 SPIRV::lookupConvertBuiltin(Call->Builtin->Name, Call->Builtin->Set);
2375 if (!Builtin && Call->isSpirvOp()) {
2378 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2385 SPIRV::Decoration::SaturatedConversion, {});
2388 SPIRV::Decoration::FPRoundingMode,
2389 {(unsigned)Builtin->RoundingMode});
2391 std::string NeedExtMsg;
2392 bool IsRightComponentsNumber =
true;
2393 unsigned Opcode = SPIRV::OpNop;
2400 : SPIRV::OpSatConvertSToU;
2403 : SPIRV::OpSConvert;
2405 SPIRV::OpTypeFloat)) {
2410 if (!ST->canUseExtension(
2411 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2412 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
2413 IsRightComponentsNumber =
2416 Opcode = SPIRV::OpConvertBF16ToFINTEL;
2418 bool IsSourceSigned =
2420 Opcode = IsSourceSigned ? SPIRV::OpConvertSToF : SPIRV::OpConvertUToF;
2424 SPIRV::OpTypeFloat)) {
2431 if (!ST->canUseExtension(
2432 SPIRV::Extension::SPV_INTEL_bfloat16_conversion))
2433 NeedExtMsg =
"SPV_INTEL_bfloat16_conversion";
2434 IsRightComponentsNumber =
2437 Opcode = SPIRV::OpConvertFToBF16INTEL;
2440 : SPIRV::OpConvertFToU;
2443 SPIRV::OpTypeFloat)) {
2445 Opcode = SPIRV::OpFConvert;
2449 if (!NeedExtMsg.empty()) {
2450 std::string DiagMsg = std::string(Builtin->
Name) +
2451 ": the builtin requires the following SPIR-V "
2456 if (!IsRightComponentsNumber) {
2457 std::string DiagMsg =
2458 std::string(Builtin->
Name) +
2459 ": result and argument must have the same number of components";
2462 assert(Opcode != SPIRV::OpNop &&
2463 "Conversion between the types not implemented!");
2466 .
addDef(Call->ReturnRegister)
2468 .
addUse(Call->Arguments[0]);
2477 SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2478 Call->Builtin->Set);
2482 .
addDef(Call->ReturnRegister)
2484 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::OpenCL_std))
2486 for (
auto Argument : Call->Arguments)
2504 SPIRV::lookupNativeBuiltin(Builtin->
Name, Builtin->
Set)->Opcode;
2505 bool IsLoad = Opcode == SPIRV::OpLoad;
2509 MIB.
addDef(Call->ReturnRegister);
2513 MIB.
addUse(Call->Arguments[0]);
2517 MIB.addUse(Call->Arguments[1]);
2519 unsigned NumArgs = Call->Arguments.size();
2520 if ((IsLoad && NumArgs >= 2) || NumArgs >= 3)
2522 if ((IsLoad && NumArgs >= 3) || NumArgs >= 4)
2535std::tuple<int, unsigned, unsigned>
2537 SPIRV::InstructionSet::InstructionSet Set) {
2540 std::unique_ptr<const IncomingCall> Call =
2543 return std::make_tuple(-1, 0, 0);
2545 switch (Call->Builtin->Group) {
2546 case SPIRV::Relational:
2548 case SPIRV::Barrier:
2549 case SPIRV::CastToPtr:
2550 case SPIRV::ImageMiscQuery:
2551 case SPIRV::SpecConstant:
2552 case SPIRV::Enqueue:
2553 case SPIRV::AsyncCopy:
2554 case SPIRV::LoadStore:
2555 case SPIRV::CoopMatr:
2557 SPIRV::lookupNativeBuiltin(Call->Builtin->Name, Call->Builtin->Set))
2558 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2560 case SPIRV::Extended:
2561 if (
const auto *R = SPIRV::lookupExtendedBuiltin(Call->Builtin->Name,
2562 Call->Builtin->Set))
2563 return std::make_tuple(Call->Builtin->Group, 0, R->Number);
2565 case SPIRV::VectorLoadStore:
2566 if (
const auto *R = SPIRV::lookupVectorLoadStoreBuiltin(Call->Builtin->Name,
2567 Call->Builtin->Set))
2568 return std::make_tuple(SPIRV::Extended, 0, R->Number);
2571 if (
const auto *R = SPIRV::lookupGroupBuiltin(Call->Builtin->Name))
2572 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2574 case SPIRV::AtomicFloating:
2575 if (
const auto *R = SPIRV::lookupAtomicFloatingBuiltin(Call->Builtin->Name))
2576 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2578 case SPIRV::IntelSubgroups:
2579 if (
const auto *R = SPIRV::lookupIntelSubgroupsBuiltin(Call->Builtin->Name))
2580 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2582 case SPIRV::GroupUniform:
2583 if (
const auto *R = SPIRV::lookupGroupUniformBuiltin(Call->Builtin->Name))
2584 return std::make_tuple(Call->Builtin->Group, R->Opcode, 0);
2586 case SPIRV::WriteImage:
2587 return std::make_tuple(Call->Builtin->Group, SPIRV::OpImageWrite, 0);
2589 return std::make_tuple(Call->Builtin->Group, TargetOpcode::G_SELECT, 0);
2590 case SPIRV::Construct:
2591 return std::make_tuple(Call->Builtin->Group, SPIRV::OpCompositeConstruct,
2593 case SPIRV::KernelClock:
2594 return std::make_tuple(Call->Builtin->Group, SPIRV::OpReadClockKHR, 0);
2596 return std::make_tuple(-1, 0, 0);
2598 return std::make_tuple(-1, 0, 0);
2602 SPIRV::InstructionSet::InstructionSet Set,
2607 LLVM_DEBUG(
dbgs() <<
"Lowering builtin call: " << DemangledCall <<
"\n");
2611 assert(SpvType &&
"Inconsistent return register: expected valid type info");
2612 std::unique_ptr<const IncomingCall> Call =
2617 return std::nullopt;
2621 assert(Args.size() >= Call->Builtin->MinNumArgs &&
2622 "Too few arguments to generate the builtin");
2623 if (Call->Builtin->MaxNumArgs && Args.size() > Call->Builtin->MaxNumArgs)
2624 LLVM_DEBUG(
dbgs() <<
"More arguments provided than required!\n");
2627 switch (Call->Builtin->Group) {
2628 case SPIRV::Extended:
2630 case SPIRV::Relational:
2634 case SPIRV::Variable:
2638 case SPIRV::AtomicFloating:
2640 case SPIRV::Barrier:
2642 case SPIRV::CastToPtr:
2648 case SPIRV::ICarryBorrow:
2650 case SPIRV::GetQuery:
2652 case SPIRV::ImageSizeQuery:
2654 case SPIRV::ImageMiscQuery:
2656 case SPIRV::ReadImage:
2658 case SPIRV::WriteImage:
2660 case SPIRV::SampleImage:
2664 case SPIRV::Construct:
2666 case SPIRV::SpecConstant:
2668 case SPIRV::Enqueue:
2670 case SPIRV::AsyncCopy:
2672 case SPIRV::Convert:
2674 case SPIRV::VectorLoadStore:
2676 case SPIRV::LoadStore:
2678 case SPIRV::IntelSubgroups:
2680 case SPIRV::GroupUniform:
2682 case SPIRV::KernelClock:
2684 case SPIRV::CoopMatr:
2695 [[maybe_unused]]
bool IsOCLBuiltinType = TypeStr.
consume_front(
"ocl_");
2696 assert(IsOCLBuiltinType &&
"Invalid OpenCL builtin prefix");
2713 unsigned VecElts = 0;
2724 TypeStr = TypeStr.
substr(0, TypeStr.
find(
']'));
2736 auto Pos1 = DemangledCall.
find(
'(');
2739 auto Pos2 = DemangledCall.
find(
')');
2742 DemangledCall.
slice(Pos1 + 1, Pos2)
2743 .
split(BuiltinArgsTypeStrs,
',', -1,
false);
2751 if (ArgIdx >= BuiltinArgsTypeStrs.
size())
2753 StringRef TypeStr = BuiltinArgsTypeStrs[ArgIdx].trim();
2762#define GET_BuiltinTypes_DECL
2763#define GET_BuiltinTypes_IMPL
2770#define GET_OpenCLTypes_DECL
2771#define GET_OpenCLTypes_IMPL
2773#include "SPIRVGenTables.inc"
2781 if (
Name.starts_with(
"void"))
2783 else if (
Name.starts_with(
"int") ||
Name.starts_with(
"uint"))
2785 else if (
Name.starts_with(
"float"))
2787 else if (
Name.starts_with(
"half"))
2800 unsigned Opcode = TypeRecord->
Opcode;
2815 "Invalid number of parameters for SPIR-V pipe builtin!");
2818 SPIRV::AccessQualifier::AccessQualifier(
2826 "Invalid number of parameters for SPIR-V coop matrices builtin!");
2828 "SPIR-V coop matrices builtin type must have a type parameter!");
2833 MIRBuilder, ExtensionType, ElemType, ExtensionType->
getIntParameter(0),
2840 const SPIRV::AccessQualifier::AccessQualifier Qualifier,
2843 "SPIR-V image builtin type must have sampled type parameter!");
2848 "Invalid number of parameters for SPIR-V image builtin!");
2850 SPIRV::AccessQualifier::AccessQualifier accessQualifier =
2851 SPIRV::AccessQualifier::None;
2853 accessQualifier = Qualifier == SPIRV::AccessQualifier::WriteOnly
2854 ? SPIRV::AccessQualifier::WriteOnly
2855 : SPIRV::AccessQualifier::AccessQualifier(
2861 MIRBuilder, SampledType,
2873 OpaqueType, SPIRV::AccessQualifier::ReadOnly, MIRBuilder, GR);
2881 StringRef NameWithParameters = TypeName;
2888 SPIRV::lookupOpenCLType(NameWithParameters);
2891 NameWithParameters);
2899 "Unknown builtin opaque type!");
2903 if (!NameWithParameters.
contains(
'_'))
2907 unsigned BaseNameLength = NameWithParameters.
find(
'_') - 1;
2908 SplitString(NameWithParameters.
substr(BaseNameLength + 1), Parameters,
"_");
2911 bool HasTypeParameter = !
isDigit(Parameters[0][0]);
2912 if (HasTypeParameter)
2915 for (
unsigned i = HasTypeParameter ? 1 : 0; i < Parameters.size(); i++) {
2916 unsigned IntParameter = 0;
2917 bool ValidLiteral = !Parameters[i].getAsInteger(10, IntParameter);
2920 "Invalid format of SPIR-V builtin parameter literal!");
2924 NameWithParameters.
substr(0, BaseNameLength),
2925 TypeParameters, IntParameters);
2929 SPIRV::AccessQualifier::AccessQualifier AccessQual,
2960 switch (TypeRecord->
Opcode) {
2961 case SPIRV::OpTypeImage:
2964 case SPIRV::OpTypePipe:
2967 case SPIRV::OpTypeDeviceEvent:
2970 case SPIRV::OpTypeSampler:
2973 case SPIRV::OpTypeSampledImage:
2976 case SPIRV::OpTypeCooperativeMatrixKHR:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
AMDGPU Lower Kernel Arguments
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static bool isDigit(const char C)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
This class represents an incoming formal argument to a Function.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
static ArrayType * get(Type *ElementType, uint64_t NumElements)
This static method is the primary way to construct an ArrayType.
@ ICMP_ULT
unsigned less than
const APFloat & getValueAPF() const
const APInt & getValue() const
Return the constant as an APInt value reference.
A parsed version of the target data layout string in and methods for querying it.
Tagged union holding either a T or a Error.
Class to represent fixed width SIMD vectors.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT vector(ElementCount EC, unsigned ScalarSizeInBits)
Get a low-level vector of some number of elements and element width.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
This is an important class for using LLVM in a threaded context.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
LLVMContext & getContext() const
MachineInstrBuilder buildSelect(const DstOp &Res, const SrcOp &Tst, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_SELECT Tst, Op0, Op1.
MachineInstrBuilder buildICmp(CmpInst::Predicate Pred, const DstOp &Res, const SrcOp &Op0, const SrcOp &Op1, std::optional< unsigned > Flags=std::nullopt)
Build and insert a Res = G_ICMP Pred, Op0, Op1.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildIntrinsic(Intrinsic::ID ID, ArrayRef< Register > Res, bool HasSideEffects, bool isConvergent)
Build and insert a G_INTRINSIC instruction.
MachineInstrBuilder buildLoad(const DstOp &Res, const SrcOp &Addr, MachineMemOperand &MMO)
Build and insert Res = G_LOAD Addr, MMO.
MachineInstrBuilder buildZExtOrTrunc(const DstOp &Res, const SrcOp &Op)
Build and insert Res = G_ZEXT Op, Res = G_TRUNC Op, or Res = COPY Op depending on the differing sizes...
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
const DataLayout & getDataLayout() const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
const ConstantInt * getCImm() const
bool isCImm() const
isCImm - Test if this is a MO_CImmediate operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
const MDNode * getMetadata() const
Register getReg() const
getReg - Returns the register number.
const ConstantFP * getFPImm() const
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
void setRegClass(Register Reg, const TargetRegisterClass *RC)
setRegClass - Set the register class of the specified virtual register.
Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
unsigned getNumVirtRegs() const
getNumVirtRegs - Return the number of virtual registers created.
static PointerType * get(Type *ElementType, unsigned AddressSpace)
This constructs a pointer to an object of the specified type in a numbered address space.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getOrCreateOpTypePipe(MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AccQual)
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder)
Register getOrCreateConsIntVector(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
Register buildConstantSampler(Register Res, unsigned AddrMode, unsigned Param, unsigned FilerMode, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorComponentCount(Register VReg) const
SPIRVType * getOrCreateOpTypeImage(MachineIRBuilder &MIRBuilder, SPIRVType *SampledType, SPIRV::Dim::Dim Dim, uint32_t Depth, uint32_t Arrayed, uint32_t Multisampled, uint32_t Sampled, SPIRV::ImageFormat::ImageFormat ImageFormat, SPIRV::AccessQualifier::AccessQualifier AccQual)
unsigned getPointerSize() const
SPIRVType * getOrCreateOpTypeByOpcode(const Type *Ty, MachineIRBuilder &MIRBuilder, unsigned Opcode)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineIRBuilder &MIRBuilder, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite, bool EmitIR=true)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * getOrCreateOpTypeSampledImage(SPIRVType *ImageType, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVTypeByName(StringRef TypeStr, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC=SPIRV::StorageClass::Function, SPIRV::AccessQualifier::AccessQualifier AQ=SPIRV::AccessQualifier::ReadWrite)
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * getOrCreateOpTypeDeviceEvent(MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVPointerType(SPIRVType *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SClass=SPIRV::StorageClass::Function)
SPIRVType * getOrCreateOpTypeCoopMatr(MachineIRBuilder &MIRBuilder, const TargetExtType *ExtensionType, const SPIRVType *ElemType, uint32_t Scope, uint32_t Rows, uint32_t Columns, uint32_t Use)
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
SPIRVType * getOrCreateOpTypeSampler(MachineIRBuilder &MIRBuilder)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
Register buildConstantInt(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType, bool EmitIR=true, bool ZeroAsNull=true)
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
bool consume_back(StringRef Suffix)
Returns true if this StringRef has the given suffix and removes that suffix.
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
std::string str() const
str - Get the contents as an std::string.
constexpr StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
bool contains_insensitive(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
bool contains(StringRef Other) const
Return true if the given string is a substring of *this, and false otherwise.
bool consume_front(StringRef Prefix)
Returns true if this StringRef has the given prefix and removes that prefix.
size_t find_first_of(char C, size_t From=0) const
Find the first character in the string that is C, or npos if not found.
size_t rfind(char C, size_t From=npos) const
Search for the last character C in the string.
size_t find(char C, size_t From=0) const
Search for the first character C in the string.
bool ends_with(StringRef Suffix) const
Check if this string ends with the given Suffix.
static constexpr size_t npos
A switch()-like statement whose cases are string literals.
StringSwitch & EndsWith(StringLiteral S, T Value)
Class to represent struct types.
static StructType * getTypeByName(LLVMContext &C, StringRef Name)
Return the type with the specified name, or null if there is none by that name.
static StructType * create(LLVMContext &Context, StringRef Name)
This creates an identified struct.
Class to represent target extensions types, which are generally unintrospectable from target-independ...
unsigned getNumIntParameters() const
static TargetExtType * get(LLVMContext &Context, StringRef Name, ArrayRef< Type * > Types={}, ArrayRef< unsigned > Ints={})
Return a target extension type having the specified name and optional type and integer parameters.
Type * getTypeParameter(unsigned i) const
unsigned getNumTypeParameters() const
unsigned getIntParameter(unsigned i) const
The instances of the Type class are immutable: once they are created, they are never changed.
static Type * getHalfTy(LLVMContext &C)
StringRef getStructName() const
static Type * getVoidTy(LLVMContext &C)
static IntegerType * getInt8Ty(LLVMContext &C)
static IntegerType * getInt32Ty(LLVMContext &C)
static Type * getFloatTy(LLVMContext &C)
LLVM Value Representation.
Value(Type *Ty, unsigned scid)
static VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVMTypeRef LLVMVectorType(LLVMTypeRef ElementType, unsigned ElementCount)
Create a vector type that contains a defined type and has a specific number of elements.
std::string lookupBuiltinNameHelper(StringRef DemangledCall, FPDecorationId *DecorationId)
Parses the name part of the demangled builtin call.
Type * parseBuiltinCallArgumentType(StringRef TypeStr, LLVMContext &Ctx)
bool parseBuiltinTypeStr(SmallVector< StringRef, 10 > &BuiltinArgsTypeStrs, const StringRef DemangledCall, LLVMContext &Ctx)
std::tuple< int, unsigned, unsigned > mapBuiltinToOpcode(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set)
Helper function for finding a builtin function attributes by a demangled function name.
Type * parseBuiltinCallArgumentBaseType(const StringRef DemangledCall, unsigned ArgIdx, LLVMContext &Ctx)
Parses the provided ArgIdx argument base type in the DemangledCall skeleton.
TargetExtType * parseBuiltinTypeNameToTargetExtType(std::string TypeName, LLVMContext &Context)
Translates a string representing a SPIR-V or OpenCL builtin type to a TargetExtType that can be furth...
std::optional< bool > lowerBuiltin(const StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, MachineIRBuilder &MIRBuilder, const Register OrigRet, const Type *OrigRetTy, const SmallVectorImpl< Register > &Args, SPIRVGlobalRegistry *GR)
SPIRVType * lowerBuiltinType(const Type *OpaqueType, SPIRV::AccessQualifier::AccessQualifier AccessQual, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
static bool generateGetQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConstructInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicFlagInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic flag instructions (e.g.
static Register buildBuiltinVariableLoad(MachineIRBuilder &MIRBuilder, SPIRVType *VariableType, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, LLT LLType, Register Reg=Register(0), bool isConst=true, bool hasLinkageTy=true)
Helper function for building a load instruction for loading a builtin global variable of BuiltinValue...
static bool generateImageSizeQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::SamplerFilterMode::SamplerFilterMode getSamplerFilterModeFromBitmask(unsigned Bitmask)
static bool buildAtomicStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic store instruction.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
static const Type * getBlockStructType(Register ParamReg, MachineRegisterInfo *MRI)
static bool generateGroupInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
FPDecorationId demangledPostfixToDecorationId(const std::string &S)
static unsigned getNumComponentsForDim(SPIRV::Dim::Dim dim)
Register insertAssignInstr(Register Reg, Type *Ty, SPIRVType *SpirvTy, SPIRVGlobalRegistry *GR, MachineIRBuilder &MIB, MachineRegisterInfo &MRI)
Helper external function for inserting ASSIGN_TYPE instuction between Reg and its definition,...
static bool generateICarryBorrowInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildScopeReg(Register CLScopeRegister, SPIRV::Scope::Scope Scope, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI)
static std::tuple< Register, SPIRVType * > buildBoolRegister(MachineIRBuilder &MIRBuilder, const SPIRVType *ResultType, SPIRVGlobalRegistry *GR)
Helper function building either a resulting scalar or vector bool register depending on the expected ...
static unsigned getNumSizeComponents(SPIRVType *imgType)
Helper function for obtaining the number of size components.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
static Register buildConstantIntReg32(uint64_t Val, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getSampledImageType(const TargetExtType *OpaqueType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
static bool generateDotOrFMulInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSampleImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateBarrierInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getCoopMatrType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateKernelClockInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static void setRegClassIfNull(Register Reg, MachineRegisterInfo *MRI, SPIRVGlobalRegistry *GR)
static bool generateGroupUniformInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateWaveInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getImageType(const TargetExtType *ExtensionType, const SPIRV::AccessQualifier::AccessQualifier Qualifier, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
static bool buildBarrierInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building barriers, i.e., memory/control ordering operations.
static bool generateAsyncCopy(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRV::Scope::Scope getSPIRVScope(SPIRV::CLMemoryScope ClScope)
static SPIRVType * getSamplerType(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static Register buildLoadInst(SPIRVType *BaseType, Register PtrRegister, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, LLT LowLevelType, Register DestinationReg=Register(0))
Helper function for building a load instruction loading into the DestinationReg.
static bool generateEnqueueInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
static bool buildSelectInst(MachineIRBuilder &MIRBuilder, Register ReturnRegister, Register SourceRegister, const SPIRVType *ReturnType, SPIRVGlobalRegistry *GR)
Helper function for building either a vector or scalar select instruction depending on the expected R...
static const Type * getMachineInstrType(MachineInstr *MI)
static SPIRV::SamplerAddressingMode::SamplerAddressingMode getSamplerAddressingModeFromBitmask(unsigned Bitmask)
static bool generateAtomicInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateConvertInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Register buildMemSemanticsReg(Register SemanticsRegister, Register PtrRegister, unsigned &Semantics, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static unsigned getConstFromIntrinsic(Register Reg, MachineRegisterInfo *MRI)
static bool generateImageMiscQueryInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSelectInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool buildAtomicLoadInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic load instruction.
static bool generateIntelSubgroupsInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateSpecConstantInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getOrCreateSPIRVDeviceEventPointer(MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * parseBasicTypeName(StringRef &TypeName, LLVMContext &Ctx)
static bool generateVectorLoadStoreInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool genWorkgroupQuery(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR, SPIRV::BuiltIn::BuiltIn BuiltinValue, uint64_t DefaultValue)
static bool generateCoopMatrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static std::unique_ptr< const SPIRV::IncomingCall > lookupBuiltin(StringRef DemangledCall, SPIRV::InstructionSet::InstructionSet Set, Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
Looks up the demangled builtin call in the SPIRVBuiltins.td records using the provided DemangledCall ...
static bool buildAtomicFloatingRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic floating-type instruction.
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
const MachineInstr SPIRVType
static bool generateReadImageInst(const StringRef DemangledCall, const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
bool hasBuiltinTypePrefix(StringRef Name)
static bool buildEnqueueKernel(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Type * getMDOperandAsType(const MDNode *N, unsigned I)
static bool buildAtomicRMWInst(const SPIRV::IncomingCall *Call, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building atomic instructions.
static SPIRV::MemorySemantics::MemorySemantics getSPIRVMemSemantics(std::memory_order MemOrder)
static bool generateRelationalInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildAtomicInitInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
Helper function for translating atomic init to OpStore.
static bool generateWriteImageInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getPipeType(const TargetExtType *ExtensionType, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static Type * parseTypeString(const StringRef Name, LLVMContext &Context)
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
static bool generateCastToPtrInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder)
static bool generateAtomicFloatingInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool generateExtInst(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static bool buildNDRange(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static SPIRVType * getNonParameterizedType(const TargetExtType *ExtensionType, const SPIRV::BuiltinType *TypeRecord, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static MachineInstr * getBlockStructInstr(Register ParamReg, MachineRegisterInfo *MRI)
static bool buildOpFromWrapper(MachineIRBuilder &MIRBuilder, unsigned Opcode, const SPIRV::IncomingCall *Call, Register TypeReg, ArrayRef< uint32_t > ImmArgs={})
static unsigned getSamplerParamFromBitmask(unsigned Bitmask)
static bool buildAtomicCompareExchangeInst(const SPIRV::IncomingCall *Call, const SPIRV::DemangledBuiltin *Builtin, unsigned Opcode, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
Helper function for building an atomic compare-exchange instruction.
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
static bool generateBuiltinVar(const SPIRV::IncomingCall *Call, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry *GR)
static const fltSemantics & IEEEsingle() LLVM_READNONE
This struct is a compact representation of a valid (non-zero power of two) alignment.
This class contains a discriminated union of information about pointers in memory operands,...
FPRoundingMode::FPRoundingMode RoundingMode
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
InstructionSet::InstructionSet Set
const SmallVectorImpl< Register > & Arguments
const std::string BuiltinName
const SPIRVType * ReturnType
const Register ReturnRegister
const DemangledBuiltin * Builtin
IncomingCall(const std::string BuiltinName, const DemangledBuiltin *Builtin, const Register ReturnRegister, const SPIRVType *ReturnType, const SmallVectorImpl< Register > &Arguments)
InstructionSet::InstructionSet Set
StringRef SpirvTypeLiteral
InstructionSet::InstructionSet Set
FPRoundingMode::FPRoundingMode RoundingMode