32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
48 std::optional<Register> Bias;
49 std::optional<Register>
Offset;
50 std::optional<Register> MinLod;
51 std::optional<Register> GradX;
52 std::optional<Register> GradY;
53 std::optional<Register> Lod;
54 std::optional<Register> Compare;
57llvm::SPIRV::SelectionControl::SelectionControl
58getSelectionOperandForImm(
int Imm) {
60 return SPIRV::SelectionControl::Flatten;
62 return SPIRV::SelectionControl::DontFlatten;
64 return SPIRV::SelectionControl::None;
68#define GET_GLOBALISEL_PREDICATE_BITSET
69#include "SPIRVGenGlobalISel.inc"
70#undef GET_GLOBALISEL_PREDICATE_BITSET
97#define GET_GLOBALISEL_PREDICATES_DECL
98#include "SPIRVGenGlobalISel.inc"
99#undef GET_GLOBALISEL_PREDICATES_DECL
101#define GET_GLOBALISEL_TEMPORARIES_DECL
102#include "SPIRVGenGlobalISel.inc"
103#undef GET_GLOBALISEL_TEMPORARIES_DECL
127 unsigned BitSetOpcode)
const;
131 unsigned BitSetOpcode)
const;
135 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
139 unsigned BitSetOpcode,
140 bool SwapPrimarySide)
const;
147 unsigned Opcode)
const;
150 unsigned Opcode)
const;
169 unsigned NewOpcode,
unsigned NegateOpcode = 0)
const;
180 unsigned OpType)
const;
229 template <
bool Signed>
232 template <
bool Signed>
239 template <
typename PickOpcodeFn>
242 PickOpcodeFn &&PickOpcode)
const;
253 template <
typename PickOpcodeFn>
256 PickOpcodeFn &&PickOpcode)
const;
272 bool IsSigned,
unsigned Opcode)
const;
274 bool IsSigned)
const;
280 bool IsSigned)
const;
313 GL::GLSLExtInst GLInst,
bool setMIFlags =
true,
314 bool useMISrc =
true,
316 bool selectExtInst(
Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
317 CL::OpenCLExtInst CLInst,
bool setMIFlags =
true,
318 bool useMISrc =
true,
320 bool selectExtInst(
Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
321 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst,
322 bool setMIFlags =
true,
bool useMISrc =
true,
324 bool selectExtInst(
Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
325 const ExtInstList &ExtInsts,
bool setMIFlags =
true,
326 bool useMISrc =
true,
329 bool selectLog10(
Register ResVReg, SPIRVTypeInst ResType,
330 MachineInstr &
I)
const;
332 bool selectSaturate(
Register ResVReg, SPIRVTypeInst ResType,
333 MachineInstr &
I)
const;
335 bool selectWaveOpInst(
Register ResVReg, SPIRVTypeInst ResType,
336 MachineInstr &
I,
unsigned Opcode)
const;
338 bool selectWaveActiveCountBits(
Register ResVReg, SPIRVTypeInst ResType,
339 MachineInstr &
I)
const;
343 bool selectHandleFromBinding(
Register &ResVReg, SPIRVTypeInst ResType,
344 MachineInstr &
I)
const;
346 bool selectCounterHandleFromBinding(
Register &ResVReg, SPIRVTypeInst ResType,
347 MachineInstr &
I)
const;
349 bool selectReadImageIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
350 MachineInstr &
I)
const;
351 bool selectSampleBasicIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
352 MachineInstr &
I)
const;
353 bool selectSampleBiasIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
354 MachineInstr &
I)
const;
355 bool selectSampleGradIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
356 MachineInstr &
I)
const;
357 bool selectSampleLevelIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
358 MachineInstr &
I)
const;
359 bool selectSampleCmpIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
360 MachineInstr &
I)
const;
361 bool selectSampleCmpLevelZeroIntrinsic(
Register &ResVReg,
362 SPIRVTypeInst ResType,
363 MachineInstr &
I)
const;
364 bool selectGatherIntrinsic(
Register &ResVReg, SPIRVTypeInst ResType,
365 MachineInstr &
I)
const;
366 bool selectImageWriteIntrinsic(MachineInstr &
I)
const;
367 bool selectResourceGetPointer(
Register &ResVReg, SPIRVTypeInst ResType,
368 MachineInstr &
I)
const;
369 bool selectPushConstantGetPointer(
Register &ResVReg, SPIRVTypeInst ResType,
370 MachineInstr &
I)
const;
371 bool selectResourceNonUniformIndex(
Register &ResVReg, SPIRVTypeInst ResType,
372 MachineInstr &
I)
const;
373 bool selectModf(
Register ResVReg, SPIRVTypeInst ResType,
374 MachineInstr &
I)
const;
375 bool selectUpdateCounter(
Register &ResVReg, SPIRVTypeInst ResType,
376 MachineInstr &
I)
const;
377 bool selectFrexp(
Register ResVReg, SPIRVTypeInst ResType,
378 MachineInstr &
I)
const;
379 bool selectSincos(
Register ResVReg, SPIRVTypeInst ResType,
380 MachineInstr &
I)
const;
381 bool selectExp10(
Register ResVReg, SPIRVTypeInst ResType,
382 MachineInstr &
I)
const;
383 bool selectDerivativeInst(
Register ResVReg, SPIRVTypeInst ResType,
384 MachineInstr &
I,
const unsigned DPdOpCode)
const;
386 Register buildI32Constant(uint32_t Val, MachineInstr &
I,
387 SPIRVTypeInst ResType =
nullptr)
const;
389 Register buildZerosVal(SPIRVTypeInst ResType, MachineInstr &
I)
const;
390 bool isScalarOrVectorIntConstantZero(
Register Reg)
const;
391 Register buildZerosValF(SPIRVTypeInst ResType, MachineInstr &
I)
const;
393 MachineInstr &
I)
const;
394 Register buildOnesValF(SPIRVTypeInst ResType, MachineInstr &
I)
const;
396 bool wrapIntoSpecConstantOp(MachineInstr &
I,
399 Register getUcharPtrTypeReg(MachineInstr &
I,
400 SPIRV::StorageClass::StorageClass SC)
const;
401 MachineInstrBuilder buildSpecConstantOp(MachineInstr &
I,
Register Dest,
403 uint32_t Opcode)
const;
404 MachineInstrBuilder buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
405 SPIRVTypeInst SrcPtrTy)
const;
406 Register buildPointerToResource(SPIRVTypeInst ResType,
407 SPIRV::StorageClass::StorageClass SC,
408 uint32_t Set, uint32_t
Binding,
409 uint32_t ArraySize,
Register IndexReg,
411 MachineIRBuilder MIRBuilder)
const;
412 SPIRVTypeInst widenTypeToVec4(SPIRVTypeInst
Type, MachineInstr &
I)
const;
413 bool extractSubvector(
Register &ResVReg, SPIRVTypeInst ResType,
414 Register &ReadReg, MachineInstr &InsertionPoint)
const;
415 bool generateImageReadOrFetch(
Register &ResVReg, SPIRVTypeInst ResType,
417 DebugLoc Loc, MachineInstr &Pos)
const;
418 bool generateSampleImage(
Register ResVReg, SPIRVTypeInst ResType,
420 Register CoordinateReg,
const ImageOperands &ImOps,
423 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
424 Register ResVReg, SPIRVTypeInst ResType,
425 MachineInstr &
I)
const;
426 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
427 Register ResVReg, SPIRVTypeInst ResType,
428 MachineInstr &
I)
const;
429 bool loadHandleBeforePosition(
Register &HandleReg, SPIRVTypeInst ResType,
430 GIntrinsic &HandleDef, MachineInstr &Pos)
const;
431 void decorateUsesAsNonUniform(
Register &NonUniformReg)
const;
432 void errorIfInstrOutsideShader(MachineInstr &
I)
const;
435bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
437 if (
TET->getTargetExtName() ==
"spirv.Image") {
440 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
441 return TET->getTypeParameter(0)->isIntegerTy();
445#define GET_GLOBALISEL_IMPL
446#include "SPIRVGenGlobalISel.inc"
447#undef GET_GLOBALISEL_IMPL
453 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
456#include
"SPIRVGenGlobalISel.inc"
459#include
"SPIRVGenGlobalISel.inc"
471 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
475void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
476 if (HasVRegsReset == &MF)
481 for (
unsigned I = 0,
E =
MRI.getNumVirtRegs();
I !=
E; ++
I) {
483 LLT RegType =
MRI.getType(
Reg);
491 for (
const auto &
MBB : MF) {
492 for (
const auto &
MI :
MBB) {
495 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
499 LLT DstType =
MRI.getType(DstReg);
501 LLT SrcType =
MRI.getType(SrcReg);
502 if (DstType != SrcType)
503 MRI.setType(DstReg,
MRI.getType(SrcReg));
505 const TargetRegisterClass *DstRC =
MRI.getRegClassOrNull(DstReg);
506 const TargetRegisterClass *SrcRC =
MRI.getRegClassOrNull(SrcReg);
507 if (DstRC != SrcRC && SrcRC)
508 MRI.setRegClass(DstReg, SrcRC);
519 while (!Stack.empty()) {
524 switch (
MI->getOpcode()) {
525 case TargetOpcode::G_INTRINSIC:
526 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
527 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
529 Intrinsic::spv_const_composite)
532 case TargetOpcode::G_BUILD_VECTOR:
533 case TargetOpcode::G_SPLAT_VECTOR:
535 i < OpDef->getNumOperands(); i++) {
540 Stack.push_back(OpNestedDef);
543 case TargetOpcode::G_CONSTANT:
544 case TargetOpcode::G_FCONSTANT:
545 case TargetOpcode::G_IMPLICIT_DEF:
546 case SPIRV::OpConstantTrue:
547 case SPIRV::OpConstantFalse:
548 case SPIRV::OpConstantI:
549 case SPIRV::OpConstantF:
550 case SPIRV::OpConstantComposite:
551 case SPIRV::OpConstantCompositeContinuedINTEL:
552 case SPIRV::OpConstantSampler:
553 case SPIRV::OpConstantNull:
555 case SPIRV::OpConstantFunctionPointerINTEL:
582 case Intrinsic::spv_all:
583 case Intrinsic::spv_alloca:
584 case Intrinsic::spv_any:
585 case Intrinsic::spv_bitcast:
586 case Intrinsic::spv_const_composite:
587 case Intrinsic::spv_cross:
588 case Intrinsic::spv_degrees:
589 case Intrinsic::spv_distance:
590 case Intrinsic::spv_extractelt:
591 case Intrinsic::spv_extractv:
592 case Intrinsic::spv_faceforward:
593 case Intrinsic::spv_fdot:
594 case Intrinsic::spv_firstbitlow:
595 case Intrinsic::spv_firstbitshigh:
596 case Intrinsic::spv_firstbituhigh:
597 case Intrinsic::spv_frac:
598 case Intrinsic::spv_gep:
599 case Intrinsic::spv_global_offset:
600 case Intrinsic::spv_global_size:
601 case Intrinsic::spv_group_id:
602 case Intrinsic::spv_insertelt:
603 case Intrinsic::spv_insertv:
604 case Intrinsic::spv_isinf:
605 case Intrinsic::spv_isnan:
606 case Intrinsic::spv_lerp:
607 case Intrinsic::spv_length:
608 case Intrinsic::spv_normalize:
609 case Intrinsic::spv_num_subgroups:
610 case Intrinsic::spv_num_workgroups:
611 case Intrinsic::spv_ptrcast:
612 case Intrinsic::spv_radians:
613 case Intrinsic::spv_reflect:
614 case Intrinsic::spv_refract:
615 case Intrinsic::spv_resource_getpointer:
616 case Intrinsic::spv_resource_handlefrombinding:
617 case Intrinsic::spv_resource_handlefromimplicitbinding:
618 case Intrinsic::spv_resource_nonuniformindex:
619 case Intrinsic::spv_resource_sample:
620 case Intrinsic::spv_rsqrt:
621 case Intrinsic::spv_saturate:
622 case Intrinsic::spv_sdot:
623 case Intrinsic::spv_sign:
624 case Intrinsic::spv_smoothstep:
625 case Intrinsic::spv_step:
626 case Intrinsic::spv_subgroup_id:
627 case Intrinsic::spv_subgroup_local_invocation_id:
628 case Intrinsic::spv_subgroup_max_size:
629 case Intrinsic::spv_subgroup_size:
630 case Intrinsic::spv_thread_id:
631 case Intrinsic::spv_thread_id_in_group:
632 case Intrinsic::spv_udot:
633 case Intrinsic::spv_undef:
634 case Intrinsic::spv_value_md:
635 case Intrinsic::spv_workgroup_size:
647 case SPIRV::OpTypeVoid:
648 case SPIRV::OpTypeBool:
649 case SPIRV::OpTypeInt:
650 case SPIRV::OpTypeFloat:
651 case SPIRV::OpTypeVector:
652 case SPIRV::OpTypeMatrix:
653 case SPIRV::OpTypeImage:
654 case SPIRV::OpTypeSampler:
655 case SPIRV::OpTypeSampledImage:
656 case SPIRV::OpTypeArray:
657 case SPIRV::OpTypeRuntimeArray:
658 case SPIRV::OpTypeStruct:
659 case SPIRV::OpTypeOpaque:
660 case SPIRV::OpTypePointer:
661 case SPIRV::OpTypeFunction:
662 case SPIRV::OpTypeEvent:
663 case SPIRV::OpTypeDeviceEvent:
664 case SPIRV::OpTypeReserveId:
665 case SPIRV::OpTypeQueue:
666 case SPIRV::OpTypePipe:
667 case SPIRV::OpTypeForwardPointer:
668 case SPIRV::OpTypePipeStorage:
669 case SPIRV::OpTypeNamedBarrier:
670 case SPIRV::OpTypeAccelerationStructureNV:
671 case SPIRV::OpTypeCooperativeMatrixNV:
672 case SPIRV::OpTypeCooperativeMatrixKHR:
682 if (
MI.getNumDefs() == 0)
685 for (
const auto &MO :
MI.all_defs()) {
687 if (
Reg.isPhysical()) {
691 for (
const auto &
UseMI :
MRI.use_nodbg_instructions(
Reg)) {
692 if (
UseMI.getOpcode() != SPIRV::OpName) {
699 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
700 MI.isLifetimeMarker()) {
703 <<
"Not dead: Opcode is LOCAL_ESCAPE, fake use, or lifetime marker.\n");
714 if (
MI.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
715 MI.getOpcode() == TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS) {
718 LLVM_DEBUG(
dbgs() <<
"Dead: Intrinsic with no real side effects.\n");
723 if (
MI.mayStore() ||
MI.isCall() ||
724 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
725 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo()) {
726 LLVM_DEBUG(
dbgs() <<
"Not dead: instruction has side effects.\n");
737 LLVM_DEBUG(
dbgs() <<
"Dead: known opcode with no side effects\n");
744void SPIRVInstructionSelector::removeOpNamesForDeadMI(MachineInstr &
MI)
const {
746 for (
const auto &MO :
MI.all_defs()) {
750 SmallVector<MachineInstr *, 4> UselessOpNames;
751 for (MachineInstr &
UseMI :
MRI->use_nodbg_instructions(
Reg)) {
753 "There is still a use of the dead function.");
756 for (MachineInstr *OpNameMI : UselessOpNames) {
758 OpNameMI->eraseFromParent();
763void SPIRVInstructionSelector::removeDeadInstruction(MachineInstr &
MI)
const {
766 removeOpNamesForDeadMI(
MI);
767 MI.eraseFromParent();
770bool SPIRVInstructionSelector::select(MachineInstr &
I) {
771 resetVRegsType(*
I.getParent()->getParent());
773 assert(
I.getParent() &&
"Instruction should be in a basic block!");
774 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
779 removeDeadInstruction(
I);
786 if (Opcode == SPIRV::ASSIGN_TYPE) {
787 Register DstReg =
I.getOperand(0).getReg();
788 Register SrcReg =
I.getOperand(1).getReg();
789 auto *
Def =
MRI->getVRegDef(SrcReg);
791 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
792 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
794 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
795 Register SelectDstReg =
Def->getOperand(0).getReg();
799 Def->removeFromParent();
800 MRI->replaceRegWith(DstReg, SelectDstReg);
802 I.removeFromParent();
804 Res = selectImpl(
I, *CoverageInfo);
806 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
807 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
811 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
818 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
819 MRI->replaceRegWith(SrcReg, DstReg);
821 I.removeFromParent();
823 }
else if (
I.getNumDefs() == 1) {
835 removeDeadInstruction(
I);
839 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
840 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
846 bool HasDefs =
I.getNumDefs() > 0;
849 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
850 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
851 if (spvSelect(ResVReg, ResType,
I)) {
853 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
864 case TargetOpcode::G_CONSTANT:
865 case TargetOpcode::G_FCONSTANT:
867 case TargetOpcode::G_SADDO:
868 case TargetOpcode::G_SSUBO:
875 MachineInstr &
I)
const {
876 const TargetRegisterClass *DstRC =
MRI->getRegClassOrNull(DestReg);
877 const TargetRegisterClass *SrcRC =
MRI->getRegClassOrNull(SrcReg);
878 if (DstRC != SrcRC && SrcRC)
879 MRI->setRegClass(DestReg, SrcRC);
880 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
887bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
888 SPIRVTypeInst ResType,
889 MachineInstr &
I)
const {
890 const unsigned Opcode =
I.getOpcode();
892 return selectImpl(
I, *CoverageInfo);
894 case TargetOpcode::G_CONSTANT:
895 case TargetOpcode::G_FCONSTANT:
896 return selectConst(ResVReg, ResType,
I);
897 case TargetOpcode::G_GLOBAL_VALUE:
898 return selectGlobalValue(ResVReg,
I);
899 case TargetOpcode::G_IMPLICIT_DEF:
900 return selectOpUndef(ResVReg, ResType,
I);
901 case TargetOpcode::G_FREEZE:
902 return selectFreeze(ResVReg, ResType,
I);
904 case TargetOpcode::G_INTRINSIC:
905 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
906 case TargetOpcode::G_INTRINSIC_CONVERGENT:
907 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
908 return selectIntrinsic(ResVReg, ResType,
I);
909 case TargetOpcode::G_BITREVERSE:
910 return selectBitreverse(ResVReg, ResType,
I);
912 case TargetOpcode::G_BUILD_VECTOR:
913 return selectBuildVector(ResVReg, ResType,
I);
914 case TargetOpcode::G_SPLAT_VECTOR:
915 return selectSplatVector(ResVReg, ResType,
I);
917 case TargetOpcode::G_SHUFFLE_VECTOR: {
918 MachineBasicBlock &BB = *
I.getParent();
919 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
922 .
addUse(
I.getOperand(1).getReg())
923 .
addUse(
I.getOperand(2).getReg());
924 for (
auto V :
I.getOperand(3).getShuffleMask())
929 case TargetOpcode::G_MEMMOVE:
930 case TargetOpcode::G_MEMCPY:
931 case TargetOpcode::G_MEMSET:
932 return selectMemOperation(ResVReg,
I);
934 case TargetOpcode::G_ICMP:
935 return selectICmp(ResVReg, ResType,
I);
936 case TargetOpcode::G_FCMP:
937 return selectFCmp(ResVReg, ResType,
I);
939 case TargetOpcode::G_FRAME_INDEX:
940 return selectFrameIndex(ResVReg, ResType,
I);
942 case TargetOpcode::G_LOAD:
943 return selectLoad(ResVReg, ResType,
I);
944 case TargetOpcode::G_STORE:
945 return selectStore(
I);
947 case TargetOpcode::G_BR:
948 return selectBranch(
I);
949 case TargetOpcode::G_BRCOND:
950 return selectBranchCond(
I);
952 case TargetOpcode::G_PHI:
953 return selectPhi(ResVReg,
I);
955 case TargetOpcode::G_FPTOSI:
956 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
957 case TargetOpcode::G_FPTOUI:
958 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
960 case TargetOpcode::G_FPTOSI_SAT:
961 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
962 case TargetOpcode::G_FPTOUI_SAT:
963 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
965 case TargetOpcode::G_SITOFP:
966 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
967 case TargetOpcode::G_UITOFP:
968 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
970 case TargetOpcode::G_CTPOP:
971 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
972 case TargetOpcode::G_SMIN:
973 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
974 case TargetOpcode::G_UMIN:
975 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
977 case TargetOpcode::G_SMAX:
978 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
979 case TargetOpcode::G_UMAX:
980 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
982 case TargetOpcode::G_SCMP:
983 return selectSUCmp(ResVReg, ResType,
I,
true);
984 case TargetOpcode::G_UCMP:
985 return selectSUCmp(ResVReg, ResType,
I,
false);
986 case TargetOpcode::G_LROUND:
987 case TargetOpcode::G_LLROUND: {
989 MRI->createVirtualRegister(
MRI->getRegClass(ResVReg),
"lround");
990 MRI->setRegClass(regForLround, &SPIRV::iIDRegClass);
992 regForLround, *(
I.getParent()->getParent()));
994 CL::round, GL::Round,
false);
996 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConvertFToS))
1003 case TargetOpcode::G_STRICT_FMA:
1004 case TargetOpcode::G_FMA: {
1007 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpFmaKHR))
1010 .
addUse(
I.getOperand(1).getReg())
1011 .
addUse(
I.getOperand(2).getReg())
1012 .
addUse(
I.getOperand(3).getReg())
1017 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
1020 case TargetOpcode::G_STRICT_FLDEXP:
1021 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
1023 case TargetOpcode::G_FPOW:
1024 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
1025 case TargetOpcode::G_FPOWI:
1026 return selectExtInst(ResVReg, ResType,
I, CL::pown);
1028 case TargetOpcode::G_FEXP:
1029 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
1030 case TargetOpcode::G_FEXP2:
1031 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
1032 case TargetOpcode::G_FEXP10:
1033 return selectExp10(ResVReg, ResType,
I);
1035 case TargetOpcode::G_FMODF:
1036 return selectModf(ResVReg, ResType,
I);
1037 case TargetOpcode::G_FSINCOS:
1038 return selectSincos(ResVReg, ResType,
I);
1040 case TargetOpcode::G_FLOG:
1041 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
1042 case TargetOpcode::G_FLOG2:
1043 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
1044 case TargetOpcode::G_FLOG10:
1045 return selectLog10(ResVReg, ResType,
I);
1047 case TargetOpcode::G_FABS:
1048 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
1049 case TargetOpcode::G_ABS:
1050 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
1052 case TargetOpcode::G_FMINNUM:
1053 case TargetOpcode::G_FMINIMUM:
1054 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
1055 case TargetOpcode::G_FMAXNUM:
1056 case TargetOpcode::G_FMAXIMUM:
1057 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
1059 case TargetOpcode::G_FCOPYSIGN:
1060 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
1062 case TargetOpcode::G_FCEIL:
1063 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
1064 case TargetOpcode::G_FFLOOR:
1065 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
1067 case TargetOpcode::G_FCOS:
1068 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
1069 case TargetOpcode::G_FSIN:
1070 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
1071 case TargetOpcode::G_FTAN:
1072 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
1073 case TargetOpcode::G_FACOS:
1074 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
1075 case TargetOpcode::G_FASIN:
1076 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
1077 case TargetOpcode::G_FATAN:
1078 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
1079 case TargetOpcode::G_FATAN2:
1080 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
1081 case TargetOpcode::G_FCOSH:
1082 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
1083 case TargetOpcode::G_FSINH:
1084 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
1085 case TargetOpcode::G_FTANH:
1086 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
1088 case TargetOpcode::G_STRICT_FSQRT:
1089 case TargetOpcode::G_FSQRT:
1090 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
1092 case TargetOpcode::G_CTTZ:
1093 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
1094 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
1095 case TargetOpcode::G_CTLZ:
1096 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
1097 return selectExtInst(ResVReg, ResType,
I, CL::clz);
1099 case TargetOpcode::G_INTRINSIC_ROUND:
1100 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
1101 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
1102 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
1103 case TargetOpcode::G_INTRINSIC_TRUNC:
1104 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
1105 case TargetOpcode::G_FRINT:
1106 case TargetOpcode::G_FNEARBYINT:
1107 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
1109 case TargetOpcode::G_SMULH:
1110 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
1111 case TargetOpcode::G_UMULH:
1112 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
1114 case TargetOpcode::G_SADDSAT:
1115 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
1116 case TargetOpcode::G_UADDSAT:
1117 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
1118 case TargetOpcode::G_SSUBSAT:
1119 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
1120 case TargetOpcode::G_USUBSAT:
1121 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
1123 case TargetOpcode::G_FFREXP:
1124 return selectFrexp(ResVReg, ResType,
I);
1126 case TargetOpcode::G_UADDO:
1127 return selectOverflowArith(ResVReg, ResType,
I,
1128 ResType->
getOpcode() == SPIRV::OpTypeVector
1129 ? SPIRV::OpIAddCarryV
1130 : SPIRV::OpIAddCarryS);
1131 case TargetOpcode::G_USUBO:
1132 return selectOverflowArith(ResVReg, ResType,
I,
1133 ResType->
getOpcode() == SPIRV::OpTypeVector
1134 ? SPIRV::OpISubBorrowV
1135 : SPIRV::OpISubBorrowS);
1136 case TargetOpcode::G_UMULO:
1137 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
1138 case TargetOpcode::G_SMULO:
1139 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
1141 case TargetOpcode::G_SEXT:
1142 return selectExt(ResVReg, ResType,
I,
true);
1143 case TargetOpcode::G_ANYEXT:
1144 case TargetOpcode::G_ZEXT:
1145 return selectExt(ResVReg, ResType,
I,
false);
1146 case TargetOpcode::G_TRUNC:
1147 return selectTrunc(ResVReg, ResType,
I);
1148 case TargetOpcode::G_FPTRUNC:
1149 case TargetOpcode::G_FPEXT:
1150 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
1152 case TargetOpcode::G_PTRTOINT:
1153 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
1154 case TargetOpcode::G_INTTOPTR:
1155 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
1156 case TargetOpcode::G_BITCAST:
1157 return selectBitcast(ResVReg, ResType,
I);
1158 case TargetOpcode::G_ADDRSPACE_CAST:
1159 return selectAddrSpaceCast(ResVReg, ResType,
I);
1160 case TargetOpcode::G_PTR_ADD: {
1162 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1166 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1167 (*II).getOpcode() == TargetOpcode::COPY ||
1168 (*II).getOpcode() == SPIRV::OpVariable) &&
1171 bool IsGVInit =
false;
1173 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
1174 UseEnd =
MRI->use_instr_end();
1175 UseIt != UseEnd; UseIt = std::next(UseIt)) {
1176 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1177 (*UseIt).getOpcode() == SPIRV::OpSpecConstantOp ||
1178 (*UseIt).getOpcode() == SPIRV::OpVariable) {
1188 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
1191 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
1192 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
1201 "incompatible result and operand types in a bitcast");
1203 MachineInstrBuilder MIB =
1204 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
1211 : SPIRV::OpInBoundsPtrAccessChain))
1215 .
addUse(
I.getOperand(2).getReg())
1218 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1222 static_cast<uint32_t
>(SPIRV::Opcode::InBoundsPtrAccessChain))
1224 .
addUse(
I.getOperand(2).getReg())
1233 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1236 .
addImm(
static_cast<uint32_t
>(
1237 SPIRV::Opcode::InBoundsPtrAccessChain))
1240 .
addUse(
I.getOperand(2).getReg());
1245 case TargetOpcode::G_ATOMICRMW_OR:
1246 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
1247 case TargetOpcode::G_ATOMICRMW_ADD:
1248 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
1249 case TargetOpcode::G_ATOMICRMW_AND:
1250 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
1251 case TargetOpcode::G_ATOMICRMW_MAX:
1252 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
1253 case TargetOpcode::G_ATOMICRMW_MIN:
1254 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
1255 case TargetOpcode::G_ATOMICRMW_SUB:
1256 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
1257 case TargetOpcode::G_ATOMICRMW_XOR:
1258 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
1259 case TargetOpcode::G_ATOMICRMW_UMAX:
1260 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
1261 case TargetOpcode::G_ATOMICRMW_UMIN:
1262 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
1263 case TargetOpcode::G_ATOMICRMW_XCHG:
1264 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
1265 case TargetOpcode::G_ATOMIC_CMPXCHG:
1266 return selectAtomicCmpXchg(ResVReg, ResType,
I);
1268 case TargetOpcode::G_ATOMICRMW_FADD:
1269 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
1270 case TargetOpcode::G_ATOMICRMW_FSUB:
1272 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
1273 ResType->
getOpcode() == SPIRV::OpTypeVector
1275 : SPIRV::OpFNegate);
1276 case TargetOpcode::G_ATOMICRMW_FMIN:
1277 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
1278 case TargetOpcode::G_ATOMICRMW_FMAX:
1279 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
1281 case TargetOpcode::G_FENCE:
1282 return selectFence(
I);
1284 case TargetOpcode::G_STACKSAVE:
1285 return selectStackSave(ResVReg, ResType,
I);
1286 case TargetOpcode::G_STACKRESTORE:
1287 return selectStackRestore(
I);
1289 case TargetOpcode::G_UNMERGE_VALUES:
1295 case TargetOpcode::G_TRAP:
1296 case TargetOpcode::G_UBSANTRAP:
1297 case TargetOpcode::DBG_LABEL:
1299 case TargetOpcode::G_DEBUGTRAP:
1300 return selectDebugTrap(ResVReg, ResType,
I);
1307bool SPIRVInstructionSelector::selectDebugTrap(
Register ResVReg,
1308 SPIRVTypeInst ResType,
1309 MachineInstr &
I)
const {
1310 unsigned Opcode = SPIRV::OpNop;
1317bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1318 SPIRVTypeInst ResType,
1320 GL::GLSLExtInst GLInst,
1321 bool setMIFlags,
bool useMISrc,
1324 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
1325 std::string DiagMsg;
1326 raw_string_ostream OS(DiagMsg);
1327 I.print(OS,
true,
false,
false,
false);
1328 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
1331 return selectExtInst(ResVReg, ResType,
I,
1332 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}},
1333 setMIFlags, useMISrc, SrcRegs);
1336bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1337 SPIRVTypeInst ResType,
1339 CL::OpenCLExtInst CLInst,
1340 bool setMIFlags,
bool useMISrc,
1342 return selectExtInst(ResVReg, ResType,
I,
1343 {{SPIRV::InstructionSet::OpenCL_std, CLInst}},
1344 setMIFlags, useMISrc, SrcRegs);
1347bool SPIRVInstructionSelector::selectExtInst(
1348 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
1349 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst,
bool setMIFlags,
1351 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1352 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1353 return selectExtInst(ResVReg, ResType,
I, ExtInsts, setMIFlags, useMISrc,
1357bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1358 SPIRVTypeInst ResType,
1361 bool setMIFlags,
bool useMISrc,
1364 for (
const auto &[InstructionSet, Opcode] : Insts) {
1368 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1371 .
addImm(
static_cast<uint32_t
>(InstructionSet))
1376 const unsigned NumOps =
I.getNumOperands();
1379 I.getOperand(Index).getType() ==
1380 MachineOperand::MachineOperandType::MO_IntrinsicID)
1383 MIB.
add(
I.getOperand(Index));
1395bool SPIRVInstructionSelector::selectFrexp(
Register ResVReg,
1396 SPIRVTypeInst ResType,
1397 MachineInstr &
I)
const {
1398 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
1399 {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
1400 for (
const auto &Ex : ExtInsts) {
1401 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1402 uint32_t Opcode = Ex.second;
1406 MachineIRBuilder MIRBuilder(
I);
1409 PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
1414 BuildMI(*It->getParent(), It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
1417 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1420 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1423 .
addImm(
static_cast<uint32_t
>(Ex.first))
1425 .
add(
I.getOperand(2))
1429 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1430 .
addDef(
I.getOperand(1).getReg())
1439bool SPIRVInstructionSelector::selectSincos(
Register ResVReg,
1440 SPIRVTypeInst ResType,
1441 MachineInstr &
I)
const {
1442 Register CosResVReg =
I.getOperand(1).getReg();
1443 unsigned SrcIdx =
I.getNumExplicitDefs();
1448 MachineIRBuilder MIRBuilder(
I);
1450 ResType, MIRBuilder, SPIRV::StorageClass::Function);
1455 BuildMI(*It->getParent(), It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
1458 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1460 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1463 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
1465 .
add(
I.getOperand(SrcIdx))
1468 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1476 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1479 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
1481 .
add(
I.getOperand(SrcIdx))
1483 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1486 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
1488 .
add(
I.getOperand(SrcIdx))
1495bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1496 SPIRVTypeInst ResType,
1498 std::vector<Register> Srcs,
1499 unsigned Opcode)
const {
1500 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1510bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1511 SPIRVTypeInst ResType,
1513 unsigned Opcode)
const {
1515 Register SrcReg =
I.getOperand(1).getReg();
1518 MRI->def_instr_begin(SrcReg);
1519 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1520 unsigned DefOpCode = DefIt->getOpcode();
1521 if (DefOpCode == SPIRV::ASSIGN_TYPE || DefOpCode == TargetOpcode::COPY) {
1524 if (
auto *VRD =
getVRegDef(*
MRI, DefIt->getOperand(1).getReg()))
1525 DefOpCode = VRD->getOpcode();
1527 if (DefOpCode == TargetOpcode::G_GLOBAL_VALUE ||
1528 DefOpCode == TargetOpcode::G_CONSTANT ||
1529 DefOpCode == SPIRV::OpVariable || DefOpCode == SPIRV::OpConstantI) {
1535 uint32_t SpecOpcode = 0;
1537 case SPIRV::OpConvertPtrToU:
1538 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertPtrToU);
1540 case SPIRV::OpConvertUToPtr:
1541 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertUToPtr);
1546 TII.get(SPIRV::OpSpecConstantOp))
1556 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).getReg()},
1560bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1561 SPIRVTypeInst ResType,
1562 MachineInstr &
I)
const {
1563 Register OpReg =
I.getOperand(1).getReg();
1564 SPIRVTypeInst OpType =
1568 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1578 if (
MemOp->isVolatile())
1579 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1580 if (
MemOp->isNonTemporal())
1581 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1583 if (!ST->isShader() &&
MemOp->getAlign().value())
1584 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1588 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1589 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1593 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1595 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1599 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1603 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1605 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1617 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1619 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1621 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1625bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1626 SPIRVTypeInst ResType,
1627 MachineInstr &
I)
const {
1629 Register Ptr =
I.getOperand(1 + OpOffset).getReg();
1634 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1635 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1637 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1639 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1641 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1645 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1646 return generateImageReadOrFetch(ResVReg, ResType, NewHandleReg, IdxReg,
1647 I.getDebugLoc(),
I);
1651 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1655 if (!
I.getNumMemOperands()) {
1656 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1658 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1661 MachineIRBuilder MIRBuilder(
I);
1668bool SPIRVInstructionSelector::selectStore(MachineInstr &
I)
const {
1670 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1671 Register Ptr =
I.getOperand(1 + OpOffset).getReg();
1676 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1677 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1679 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1682 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1686 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1687 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1688 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1689 TII.get(SPIRV::OpImageWrite))
1695 if (sampledTypeIsSignedInteger(LLVMHandleType))
1698 BMI.constrainAllUses(
TII,
TRI, RBI);
1704 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1707 if (!
I.getNumMemOperands()) {
1708 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1710 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1713 MachineIRBuilder MIRBuilder(
I);
1720bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1721 SPIRVTypeInst ResType,
1722 MachineInstr &
I)
const {
1723 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1725 "llvm.stacksave intrinsic: this instruction requires the following "
1726 "SPIR-V extension: SPV_INTEL_variable_length_array",
1729 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1736bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &
I)
const {
1737 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1739 "llvm.stackrestore intrinsic: this instruction requires the following "
1740 "SPIR-V extension: SPV_INTEL_variable_length_array",
1742 if (!
I.getOperand(0).isReg())
1745 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1746 .
addUse(
I.getOperand(0).getReg())
1752SPIRVInstructionSelector::getOrCreateMemSetGlobal(MachineInstr &
I)
const {
1753 MachineIRBuilder MIRBuilder(
I);
1754 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1761 GlobalVariable *GV =
new GlobalVariable(*CurFunction.
getParent(), LLVMArrTy,
1765 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1766 Type *ArrTy = ArrayType::get(ValTy, Num);
1768 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1771 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1778 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1781 .
addImm(SPIRV::StorageClass::UniformConstant)
1792bool SPIRVInstructionSelector::selectCopyMemory(MachineInstr &
I,
1795 Register DstReg =
I.getOperand(0).getReg();
1805 "Unable to determine pointee type size for OpCopyMemory");
1806 const DataLayout &
DL =
I.getMF()->getFunction().getDataLayout();
1807 if (CopySize !=
DL.getTypeStoreSize(
const_cast<Type *
>(LLVMPointeeTy)))
1809 "OpCopyMemory requires the size to match the pointee type size");
1810 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemory))
1813 if (
I.getNumMemOperands()) {
1814 MachineIRBuilder MIRBuilder(
I);
1821bool SPIRVInstructionSelector::selectCopyMemorySized(MachineInstr &
I,
1824 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1825 .
addUse(
I.getOperand(0).getReg())
1827 .
addUse(
I.getOperand(2).getReg());
1828 if (
I.getNumMemOperands()) {
1829 MachineIRBuilder MIRBuilder(
I);
1836bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1837 MachineInstr &
I)
const {
1838 Register SrcReg =
I.getOperand(1).getReg();
1839 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1840 Register VarReg = getOrCreateMemSetGlobal(
I);
1843 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1845 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1847 if (!selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast))
1851 if (!selectCopyMemory(
I, SrcReg))
1854 if (!selectCopyMemorySized(
I, SrcReg))
1857 if (ResVReg.
isValid() && ResVReg !=
I.getOperand(0).getReg())
1858 if (!BuildCOPY(ResVReg,
I.getOperand(0).getReg(),
I))
1863bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1864 SPIRVTypeInst ResType,
1867 unsigned NegateOpcode)
const {
1869 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1872 Register ScopeReg = buildI32Constant(Scope,
I);
1874 Register Ptr =
I.getOperand(1).getReg();
1880 Register MemSemReg = buildI32Constant(MemSem ,
I);
1882 Register ValueReg =
I.getOperand(2).getReg();
1883 if (NegateOpcode != 0) {
1886 if (!selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode))
1891 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1902bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &
I)
const {
1903 unsigned ArgI =
I.getNumOperands() - 1;
1905 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1906 SPIRVTypeInst SrcType =
1908 if (!SrcType || SrcType->
getOpcode() != SPIRV::OpTypeVector)
1910 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1912 SPIRVTypeInst ScalarType =
1915 unsigned CurrentIndex = 0;
1916 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1917 Register ResVReg =
I.getOperand(i).getReg();
1920 LLT ResLLT =
MRI->getType(ResVReg);
1926 ResType = ScalarType;
1932 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
1935 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
1941 for (
unsigned j = 0;
j < NumElements; ++
j) {
1942 MIB.
addImm(CurrentIndex + j);
1944 CurrentIndex += NumElements;
1948 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1960bool SPIRVInstructionSelector::selectFence(MachineInstr &
I)
const {
1963 Register MemSemReg = buildI32Constant(MemSem,
I);
1965 uint32_t
Scope =
static_cast<uint32_t
>(
1967 Register ScopeReg = buildI32Constant(Scope,
I);
1969 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1976bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1977 SPIRVTypeInst ResType,
1979 unsigned Opcode)
const {
1980 Type *ResTy =
nullptr;
1984 "Not enough info to select the arithmetic with overflow instruction");
1987 "with overflow instruction");
1993 MachineIRBuilder MIRBuilder(
I);
1995 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1996 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
2002 Register ZeroReg = buildZerosVal(ResType,
I);
2005 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
2007 if (ResName.
size() > 0)
2012 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
2015 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
2016 MIB.
addUse(
I.getOperand(i).getReg());
2021 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
2022 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
2024 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2025 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
2032 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
2033 .
addDef(
I.getOperand(1).getReg())
2041bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
2042 SPIRVTypeInst ResType,
2043 MachineInstr &
I)
const {
2047 Register Ptr =
I.getOperand(2).getReg();
2050 const MachineMemOperand *MemOp = *
I.memoperands_begin();
2053 ScopeReg = buildI32Constant(Scope,
I);
2055 unsigned ScSem =
static_cast<uint32_t
>(
2058 unsigned MemSemEq =
static_cast<uint32_t
>(
getMemSemantics(AO)) | ScSem;
2059 Register MemSemEqReg = buildI32Constant(MemSemEq,
I);
2061 unsigned MemSemNeq =
static_cast<uint32_t
>(
getMemSemantics(FO)) | ScSem;
2062 if (MemSemEq == MemSemNeq)
2063 MemSemNeqReg = MemSemEqReg;
2065 MemSemNeqReg = buildI32Constant(MemSemEq,
I);
2068 ScopeReg =
I.getOperand(5).getReg();
2069 MemSemEqReg =
I.getOperand(6).getReg();
2070 MemSemNeqReg =
I.getOperand(7).getReg();
2074 Register Val =
I.getOperand(4).getReg();
2078 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
2097 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
2104 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
2116 case SPIRV::StorageClass::DeviceOnlyINTEL:
2117 case SPIRV::StorageClass::HostOnlyINTEL:
2126 bool IsGRef =
false;
2127 bool IsAllowedRefs =
2128 llvm::all_of(
MRI->use_instructions(ResVReg), [&IsGRef](
auto const &It) {
2129 unsigned Opcode = It.getOpcode();
2130 if (Opcode == SPIRV::OpConstantComposite ||
2131 Opcode == SPIRV::OpVariable ||
2132 isSpvIntrinsic(It, Intrinsic::spv_init_global))
2133 return IsGRef = true;
2134 return Opcode == SPIRV::OpName;
2136 return IsAllowedRefs && IsGRef;
2139Register SPIRVInstructionSelector::getUcharPtrTypeReg(
2140 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
2142 Type::getInt8Ty(
I.getMF()->getFunction().getContext()),
I, SC));
2146SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &
I,
Register Dest,
2148 uint32_t Opcode)
const {
2149 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2150 TII.get(SPIRV::OpSpecConstantOp))
2158SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
2159 SPIRVTypeInst SrcPtrTy)
const {
2160 SPIRVTypeInst GenericPtrTy =
2162 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
2164 SPIRV::StorageClass::Generic),
2166 MachineFunction *MF =
I.getParent()->getParent();
2168 MachineInstrBuilder MIB = buildSpecConstantOp(
2170 static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric));
2180bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
2181 SPIRVTypeInst ResType,
2182 MachineInstr &
I)
const {
2186 Register SrcPtr =
I.getOperand(1).getReg();
2190 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
2191 ResType->
getOpcode() != SPIRV::OpTypePointer)
2192 return BuildCOPY(ResVReg, SrcPtr,
I);
2202 unsigned SpecOpcode =
2204 ?
static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric)
2207 ? static_cast<uint32_t>(
SPIRV::Opcode::GenericCastToPtr)
2214 buildSpecConstantOp(
I, ResVReg, SrcPtr, getUcharPtrTypeReg(
I, DstSC),
2216 .constrainAllUses(
TII,
TRI, RBI);
2218 MachineInstrBuilder MIB = buildConstGenericPtr(
I, SrcPtr, SrcPtrTy);
2220 buildSpecConstantOp(
2222 static_cast<uint32_t
>(SPIRV::Opcode::GenericCastToPtr))
2223 .constrainAllUses(
TII,
TRI, RBI);
2230 return BuildCOPY(ResVReg, SrcPtr,
I);
2232 if ((SrcSC == SPIRV::StorageClass::Function &&
2233 DstSC == SPIRV::StorageClass::Private) ||
2234 (DstSC == SPIRV::StorageClass::Function &&
2235 SrcSC == SPIRV::StorageClass::Private))
2236 return BuildCOPY(ResVReg, SrcPtr,
I);
2240 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
2243 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
2246 SPIRVTypeInst GenericPtrTy =
2265 return selectUnOp(ResVReg, ResType,
I,
2266 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
2268 return selectUnOp(ResVReg, ResType,
I,
2269 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
2271 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
2273 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
2283 return SPIRV::OpFOrdEqual;
2285 return SPIRV::OpFOrdGreaterThanEqual;
2287 return SPIRV::OpFOrdGreaterThan;
2289 return SPIRV::OpFOrdLessThanEqual;
2291 return SPIRV::OpFOrdLessThan;
2293 return SPIRV::OpFOrdNotEqual;
2295 return SPIRV::OpOrdered;
2297 return SPIRV::OpFUnordEqual;
2299 return SPIRV::OpFUnordGreaterThanEqual;
2301 return SPIRV::OpFUnordGreaterThan;
2303 return SPIRV::OpFUnordLessThanEqual;
2305 return SPIRV::OpFUnordLessThan;
2307 return SPIRV::OpFUnordNotEqual;
2309 return SPIRV::OpUnordered;
2319 return SPIRV::OpIEqual;
2321 return SPIRV::OpINotEqual;
2323 return SPIRV::OpSGreaterThanEqual;
2325 return SPIRV::OpSGreaterThan;
2327 return SPIRV::OpSLessThanEqual;
2329 return SPIRV::OpSLessThan;
2331 return SPIRV::OpUGreaterThanEqual;
2333 return SPIRV::OpUGreaterThan;
2335 return SPIRV::OpULessThanEqual;
2337 return SPIRV::OpULessThan;
2346 return SPIRV::OpPtrEqual;
2348 return SPIRV::OpPtrNotEqual;
2359 return SPIRV::OpLogicalEqual;
2361 return SPIRV::OpLogicalNotEqual;
2395bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
2396 SPIRVTypeInst ResType,
2398 unsigned OpAnyOrAll)
const {
2399 assert(
I.getNumOperands() == 3);
2400 assert(
I.getOperand(2).isReg());
2402 Register InputRegister =
I.getOperand(2).getReg();
2409 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
2410 if (IsBoolTy && !IsVectorTy) {
2411 assert(ResVReg ==
I.getOperand(0).getReg());
2412 return BuildCOPY(ResVReg, InputRegister,
I);
2416 unsigned SpirvNotEqualId =
2417 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
2419 SPIRVTypeInst SpvBoolTy = SpvBoolScalarTy;
2424 IsBoolTy ? InputRegister
2432 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
2434 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SpirvNotEqualId))
2451bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
2452 SPIRVTypeInst ResType,
2453 MachineInstr &
I)
const {
2454 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
2457bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
2458 SPIRVTypeInst ResType,
2459 MachineInstr &
I)
const {
2460 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
2464bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
2465 SPIRVTypeInst ResType,
2466 MachineInstr &
I)
const {
2467 assert(
I.getNumOperands() == 4);
2468 assert(
I.getOperand(2).isReg());
2469 assert(
I.getOperand(3).isReg());
2471 [[maybe_unused]] SPIRVTypeInst VecType =
2476 "dot product requires a vector of at least 2 components");
2478 [[maybe_unused]] SPIRVTypeInst EltType =
2487 .
addUse(
I.getOperand(2).getReg())
2488 .
addUse(
I.getOperand(3).getReg())
2493bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
2494 SPIRVTypeInst ResType,
2497 assert(
I.getNumOperands() == 4);
2498 assert(
I.getOperand(2).isReg());
2499 assert(
I.getOperand(3).isReg());
2502 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2506 .
addUse(
I.getOperand(2).getReg())
2507 .
addUse(
I.getOperand(3).getReg())
2514bool SPIRVInstructionSelector::selectIntegerDotExpansion(
2515 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
2516 assert(
I.getNumOperands() == 4);
2517 assert(
I.getOperand(2).isReg());
2518 assert(
I.getOperand(3).isReg());
2522 Register Vec0 =
I.getOperand(2).getReg();
2523 Register Vec1 =
I.getOperand(3).getReg();
2527 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIMulV))
2536 "dot product requires a vector of at least 2 components");
2539 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2549 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2560 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
2572bool SPIRVInstructionSelector::selectOpIsInf(
Register ResVReg,
2573 SPIRVTypeInst ResType,
2574 MachineInstr &
I)
const {
2576 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsInf))
2579 .
addUse(
I.getOperand(2).getReg())
2584bool SPIRVInstructionSelector::selectOpIsNan(
Register ResVReg,
2585 SPIRVTypeInst ResType,
2586 MachineInstr &
I)
const {
2588 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsNan))
2591 .
addUse(
I.getOperand(2).getReg())
2596template <
bool Signed>
2597bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2598 SPIRVTypeInst ResType,
2599 MachineInstr &
I)
const {
2600 assert(
I.getNumOperands() == 5);
2601 assert(
I.getOperand(2).isReg());
2602 assert(
I.getOperand(3).isReg());
2603 assert(
I.getOperand(4).isReg());
2606 Register Acc =
I.getOperand(2).getReg();
2610 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2612 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(DotOp))
2617 MIB.
addImm(SPIRV::BuiltIn::PackedVectorFormat4x8Bit);
2620 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
2632template <
bool Signed>
2633bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2634 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
2635 assert(
I.getNumOperands() == 5);
2636 assert(
I.getOperand(2).isReg());
2637 assert(
I.getOperand(3).isReg());
2638 assert(
I.getOperand(4).isReg());
2641 Register Acc =
I.getOperand(2).getReg();
2647 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2651 for (
unsigned i = 0; i < 4; i++) {
2653 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2663 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2674 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIMulS))
2682 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2693 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2694 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
2709bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2710 SPIRVTypeInst ResType,
2711 MachineInstr &
I)
const {
2712 assert(
I.getNumOperands() == 3);
2713 assert(
I.getOperand(2).isReg());
2715 Register VZero = buildZerosValF(ResType,
I);
2716 Register VOne = buildOnesValF(ResType,
I);
2718 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2721 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2723 .
addUse(
I.getOperand(2).getReg())
2730bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2731 SPIRVTypeInst ResType,
2732 MachineInstr &
I)
const {
2733 assert(
I.getNumOperands() == 3);
2734 assert(
I.getOperand(2).isReg());
2736 Register InputRegister =
I.getOperand(2).getReg();
2738 auto &
DL =
I.getDebugLoc();
2748 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2750 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2752 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2758 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2763 if (NeedsConversion) {
2764 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2775bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2776 SPIRVTypeInst ResType,
2778 unsigned Opcode)
const {
2782 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2788 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2789 BMI.addUse(
I.getOperand(J).getReg());
2796bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2797 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
2802 if (!selectWaveOpInst(BallotReg, BallotType,
I,
2803 SPIRV::OpGroupNonUniformBallot))
2808 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2813 .
addImm(SPIRV::GroupOperation::Reduce)
2820bool SPIRVInstructionSelector::selectWavePrefixBitCount(
Register ResVReg,
2821 SPIRVTypeInst ResType,
2822 MachineInstr &
I)
const {
2824 assert(
I.getNumOperands() == 3);
2826 auto Op =
I.getOperand(2);
2838 if (InputType->
getOpcode() != SPIRV::OpTypeBool)
2849 Register BallotVReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2860 BuildMI(BB,
I,
DL,
TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2864 .
addImm(SPIRV::GroupOperation::ExclusiveScan)
2871bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2872 SPIRVTypeInst ResType,
2874 bool IsUnsigned)
const {
2875 return selectWaveReduce(
2876 ResVReg, ResType,
I, IsUnsigned,
2877 [&](
Register InputRegister,
bool IsUnsigned) {
2878 const bool IsFloatTy =
2880 const unsigned IntOp = IsUnsigned ? SPIRV::OpGroupNonUniformUMax
2881 : SPIRV::OpGroupNonUniformSMax;
2882 return IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntOp;
2886bool SPIRVInstructionSelector::selectWaveReduceMin(
Register ResVReg,
2887 SPIRVTypeInst ResType,
2889 bool IsUnsigned)
const {
2890 return selectWaveReduce(
2891 ResVReg, ResType,
I, IsUnsigned,
2892 [&](
Register InputRegister,
bool IsUnsigned) {
2893 const bool IsFloatTy =
2895 const unsigned IntOp = IsUnsigned ? SPIRV::OpGroupNonUniformUMin
2896 : SPIRV::OpGroupNonUniformSMin;
2897 return IsFloatTy ? SPIRV::OpGroupNonUniformFMin : IntOp;
2901bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2902 SPIRVTypeInst ResType,
2903 MachineInstr &
I)
const {
2904 return selectWaveReduce(ResVReg, ResType,
I,
false,
2905 [&](
Register InputRegister,
bool IsUnsigned) {
2907 InputRegister, SPIRV::OpTypeFloat);
2908 return IsFloatTy ? SPIRV::OpGroupNonUniformFAdd
2909 : SPIRV::OpGroupNonUniformIAdd;
2913template <
typename PickOpcodeFn>
2914bool SPIRVInstructionSelector::selectWaveReduce(
2915 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
bool IsUnsigned,
2916 PickOpcodeFn &&PickOpcode)
const {
2917 assert(
I.getNumOperands() == 3);
2918 assert(
I.getOperand(2).isReg());
2920 Register InputRegister =
I.getOperand(2).getReg();
2927 const unsigned Opcode = PickOpcode(InputRegister, IsUnsigned);
2933 .
addImm(SPIRV::GroupOperation::Reduce)
2934 .
addUse(
I.getOperand(2).getReg())
2939bool SPIRVInstructionSelector::selectWaveExclusiveScanSum(
2940 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
2941 return selectWaveExclusiveScan(ResVReg, ResType,
I,
false,
2942 [&](
Register InputRegister,
bool IsUnsigned) {
2944 InputRegister, SPIRV::OpTypeFloat);
2946 ? SPIRV::OpGroupNonUniformFAdd
2947 : SPIRV::OpGroupNonUniformIAdd;
2951bool SPIRVInstructionSelector::selectWaveExclusiveScanProduct(
2952 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
2953 return selectWaveExclusiveScan(ResVReg, ResType,
I,
false,
2954 [&](
Register InputRegister,
bool IsUnsigned) {
2956 InputRegister, SPIRV::OpTypeFloat);
2958 ? SPIRV::OpGroupNonUniformFMul
2959 : SPIRV::OpGroupNonUniformIMul;
2963template <
typename PickOpcodeFn>
2964bool SPIRVInstructionSelector::selectWaveExclusiveScan(
2965 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
bool IsUnsigned,
2966 PickOpcodeFn &&PickOpcode)
const {
2967 assert(
I.getNumOperands() == 3);
2968 assert(
I.getOperand(2).isReg());
2970 Register InputRegister =
I.getOperand(2).getReg();
2977 const unsigned Opcode = PickOpcode(InputRegister, IsUnsigned);
2983 .
addImm(SPIRV::GroupOperation::ExclusiveScan)
2984 .
addUse(
I.getOperand(2).getReg())
2989bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2990 SPIRVTypeInst ResType,
2991 MachineInstr &
I)
const {
2993 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2996 .
addUse(
I.getOperand(1).getReg())
3001bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
3002 SPIRVTypeInst ResType,
3003 MachineInstr &
I)
const {
3009 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
3011 Register OpReg =
I.getOperand(1).getReg();
3012 if (MachineInstr *Def =
MRI->getVRegDef(OpReg)) {
3013 if (
Def->getOpcode() == TargetOpcode::COPY)
3014 Def =
MRI->getVRegDef(
Def->getOperand(1).getReg());
3016 switch (
Def->getOpcode()) {
3017 case SPIRV::ASSIGN_TYPE:
3018 if (MachineInstr *AssignToDef =
3019 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
3020 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
3021 Reg =
Def->getOperand(2).getReg();
3024 case SPIRV::OpUndef:
3025 Reg =
Def->getOperand(1).getReg();
3028 unsigned DestOpCode;
3030 DestOpCode = SPIRV::OpConstantNull;
3032 DestOpCode = TargetOpcode::COPY;
3035 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
3036 .
addDef(
I.getOperand(0).getReg())
3044bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
3045 SPIRVTypeInst ResType,
3046 MachineInstr &
I)
const {
3048 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3050 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
3054 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
3059 for (
unsigned i =
I.getNumExplicitDefs();
3060 i <
I.getNumExplicitOperands() && IsConst; ++i)
3064 if (!IsConst &&
N < 2)
3066 "There must be at least two constituent operands in a vector");
3069 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3070 TII.get(IsConst ? SPIRV::OpConstantComposite
3071 : SPIRV::OpCompositeConstruct))
3074 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
3075 MIB.
addUse(
I.getOperand(i).getReg());
3080bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
3081 SPIRVTypeInst ResType,
3082 MachineInstr &
I)
const {
3084 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3086 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
3092 if (!
I.getOperand(
OpIdx).isReg())
3099 if (!IsConst &&
N < 2)
3101 "There must be at least two constituent operands in a vector");
3104 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3105 TII.get(IsConst ? SPIRV::OpConstantComposite
3106 : SPIRV::OpCompositeConstruct))
3109 for (
unsigned i = 0; i <
N; ++i)
3115bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
3116 SPIRVTypeInst ResType,
3117 MachineInstr &
I)
const {
3122 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
3124 Opcode = SPIRV::OpDemoteToHelperInvocation;
3126 Opcode = SPIRV::OpKill;
3128 if (MachineInstr *NextI =
I.getNextNode()) {
3130 NextI->eraseFromParent();
3140bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
3141 SPIRVTypeInst ResType,
unsigned CmpOpc,
3142 MachineInstr &
I)
const {
3143 Register Cmp0 =
I.getOperand(2).getReg();
3144 Register Cmp1 =
I.getOperand(3).getReg();
3147 "CMP operands should have the same type");
3148 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
3158bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
3159 SPIRVTypeInst ResType,
3160 MachineInstr &
I)
const {
3161 auto Pred =
I.getOperand(1).getPredicate();
3164 Register CmpOperand =
I.getOperand(2).getReg();
3171 return selectCmp(ResVReg, ResType, CmpOpc,
I);
3175SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &
I,
3176 SPIRVTypeInst ResType)
const {
3178 SPIRVTypeInst SpvI32Ty =
3181 auto ConstInt = ConstantInt::get(LLVMTy, Val);
3188 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3191 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
3194 .
addImm(APInt(32, Val).getZExtValue());
3196 GR.
add(ConstInt,
MI);
3201bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
3202 SPIRVTypeInst ResType,
3203 MachineInstr &
I)
const {
3205 return selectCmp(ResVReg, ResType, CmpOp,
I);
3208bool SPIRVInstructionSelector::selectExp10(
Register ResVReg,
3209 SPIRVTypeInst ResType,
3210 MachineInstr &
I)
const {
3212 return selectExtInst(ResVReg, ResType,
I, CL::exp10);
3219 if (ResType->
getOpcode() != SPIRV::OpTypeVector &&
3220 ResType->
getOpcode() != SPIRV::OpTypeFloat)
3223 MachineIRBuilder MIRBuilder(
I);
3225 SPIRVTypeInst SpirvScalarType = ResType->
getOpcode() == SPIRV::OpTypeVector
3231 "only float operands supported by GLSL extended math");
3234 MIRBuilder, SpirvScalarType);
3236 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
3237 ? SPIRV::OpVectorTimesScalar
3240 if (!selectOpWithSrcs(ArgReg, ResType,
I,
3241 {
I.getOperand(1).getReg(), ConstReg}, Opcode))
3243 if (!selectExtInst(ResVReg, ResType,
I,
3244 {{SPIRV::InstructionSet::GLSL_std_450, GL::Exp2}},
false,
3254Register SPIRVInstructionSelector::buildZerosVal(SPIRVTypeInst ResType,
3255 MachineInstr &
I)
const {
3258 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3263bool SPIRVInstructionSelector::isScalarOrVectorIntConstantZero(
3269 if (!CompType || CompType->
getOpcode() != SPIRV::OpTypeInt)
3277 if (
Def->getOpcode() == SPIRV::OpConstantNull)
3280 if (
Def->getOpcode() == TargetOpcode::G_CONSTANT ||
3281 Def->getOpcode() == SPIRV::OpConstantI)
3290 MachineInstr *
Def =
MRI->getVRegDef(
Reg);
3294 if (
Def->getOpcode() == TargetOpcode::G_BUILD_VECTOR ||
3295 (
Def->getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS &&
3297 Intrinsic::spv_const_composite)) {
3298 unsigned StartOp =
Def->getOpcode() == TargetOpcode::G_BUILD_VECTOR ? 1 : 2;
3299 for (
unsigned i = StartOp; i <
Def->getNumOperands(); ++i) {
3300 if (!IsZero(
Def->getOperand(i).getReg()))
3309Register SPIRVInstructionSelector::buildZerosValF(SPIRVTypeInst ResType,
3310 MachineInstr &
I)
const {
3314 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3319Register SPIRVInstructionSelector::buildOnesValF(SPIRVTypeInst ResType,
3320 MachineInstr &
I)
const {
3324 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3330 SPIRVTypeInst ResType,
3331 MachineInstr &
I)
const {
3335 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
3340bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
3341 SPIRVTypeInst ResType,
3342 MachineInstr &
I)
const {
3343 Register SelectFirstArg =
I.getOperand(2).getReg();
3344 Register SelectSecondArg =
I.getOperand(3).getReg();
3353 SPIRV::OpTypeVector;
3360 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
3361 }
else if (IsPtrTy) {
3362 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
3364 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
3368 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
3369 }
else if (IsPtrTy) {
3370 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
3372 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
3375 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3378 .
addUse(
I.getOperand(1).getReg())
3385bool SPIRVInstructionSelector::selectSelectDefaultArgs(
Register ResVReg,
3386 SPIRVTypeInst ResType,
3388 bool IsSigned)
const {
3390 Register ZeroReg = buildZerosVal(ResType,
I);
3391 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
3395 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
3396 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3399 .
addUse(
I.getOperand(1).getReg())
3406bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
3407 SPIRVTypeInst ResType,
3408 MachineInstr &
I,
bool IsSigned,
3409 unsigned Opcode)
const {
3410 Register SrcReg =
I.getOperand(1).getReg();
3416 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
3421 selectSelectDefaultArgs(SrcReg, TmpType,
I,
false);
3423 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
3426bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
3427 SPIRVTypeInst ResType, MachineInstr &
I,
3428 bool IsSigned)
const {
3429 Register SrcReg =
I.getOperand(1).getReg();
3431 return selectSelectDefaultArgs(ResVReg, ResType,
I, IsSigned);
3434 if (ResType == SrcType)
3435 return BuildCOPY(ResVReg, SrcReg,
I);
3437 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3438 return selectUnOp(ResVReg, ResType,
I, Opcode);
3441bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
3442 SPIRVTypeInst ResType,
3444 bool IsSigned)
const {
3445 MachineIRBuilder MIRBuilder(
I);
3446 MachineRegisterInfo *
MRI = MIRBuilder.getMRI();
3461 TII.get(IsSigned ? SPIRV::OpSLessThanEqual : SPIRV::OpULessThanEqual))
3464 .
addUse(
I.getOperand(1).getReg())
3465 .
addUse(
I.getOperand(2).getReg())
3471 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
3474 .
addUse(
I.getOperand(1).getReg())
3475 .
addUse(
I.getOperand(2).getReg())
3483 unsigned SelectOpcode =
3484 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
3489 .
addUse(buildOnesVal(
true, ResType,
I))
3490 .
addUse(buildZerosVal(ResType,
I))
3497 .
addUse(buildOnesVal(
false, ResType,
I))
3502bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
3505 SPIRVTypeInst IntTy,
3506 SPIRVTypeInst BoolTy)
const {
3509 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
3510 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
3512 Register One = buildOnesVal(
false, IntTy,
I);
3520 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
3529bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
3530 SPIRVTypeInst ResType,
3531 MachineInstr &
I)
const {
3532 Register IntReg =
I.getOperand(1).getReg();
3535 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
3536 if (ArgType == ResType)
3537 return BuildCOPY(ResVReg, IntReg,
I);
3539 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3540 return selectUnOp(ResVReg, ResType,
I, Opcode);
3543bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
3544 SPIRVTypeInst ResType,
3545 MachineInstr &
I)
const {
3546 unsigned Opcode =
I.getOpcode();
3547 unsigned TpOpcode = ResType->
getOpcode();
3549 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
3550 assert(Opcode == TargetOpcode::G_CONSTANT &&
3551 I.getOperand(1).getCImm()->isZero());
3552 MachineBasicBlock &DepMBB =
I.getMF()->front();
3555 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
3562 return Reg == ResVReg ?
true : BuildCOPY(ResVReg,
Reg,
I);
3565bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
3566 SPIRVTypeInst ResType,
3567 MachineInstr &
I)
const {
3568 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3575bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
3576 SPIRVTypeInst ResType,
3577 MachineInstr &
I)
const {
3579 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
3583 .
addUse(
I.getOperand(3).getReg())
3585 .
addUse(
I.getOperand(2).getReg());
3586 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
3592bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
3593 SPIRVTypeInst ResType,
3594 MachineInstr &
I)
const {
3595 Type *MaybeResTy =
nullptr;
3600 "Expected aggregate type for extractv instruction");
3602 SPIRV::AccessQualifier::ReadWrite,
false);
3606 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3609 .
addUse(
I.getOperand(2).getReg());
3610 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
3616bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
3617 SPIRVTypeInst ResType,
3618 MachineInstr &
I)
const {
3620 return selectInsertVal(ResVReg, ResType,
I);
3622 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
3625 .
addUse(
I.getOperand(2).getReg())
3626 .
addUse(
I.getOperand(3).getReg())
3627 .
addUse(
I.getOperand(4).getReg())
3632bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
3633 SPIRVTypeInst ResType,
3634 MachineInstr &
I)
const {
3636 return selectExtractVal(ResVReg, ResType,
I);
3638 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
3641 .
addUse(
I.getOperand(2).getReg())
3642 .
addUse(
I.getOperand(3).getReg())
3647bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
3648 SPIRVTypeInst ResType,
3649 MachineInstr &
I)
const {
3650 const bool IsGEPInBounds =
I.getOperand(2).getImm();
3656 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
3657 : SPIRV::OpAccessChain)
3658 : (IsGEPInBounds ?
SPIRV::OpInBoundsPtrAccessChain
3659 :
SPIRV::OpPtrAccessChain);
3661 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3665 .
addUse(
I.getOperand(3).getReg());
3667 (Opcode == SPIRV::OpPtrAccessChain ||
3668 Opcode == SPIRV::OpInBoundsPtrAccessChain ||
3670 "Cannot translate GEP to OpAccessChain. First index must be 0.");
3673 const unsigned StartingIndex =
3674 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
3677 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
3678 Res.addUse(
I.getOperand(i).getReg());
3679 Res.constrainAllUses(
TII,
TRI, RBI);
3684bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
3686 unsigned Lim =
I.getNumExplicitOperands();
3687 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
3688 Register OpReg =
I.getOperand(i).getReg();
3689 MachineInstr *OpDefine =
MRI->getVRegDef(OpReg);
3692 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
3693 OpDefine->
getOpcode() == TargetOpcode::G_INTTOPTR ||
3700 MachineFunction *MF =
I.getMF();
3712 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3713 TII.get(SPIRV::OpSpecConstantOp))
3716 .
addImm(
static_cast<uint32_t
>(SPIRV::Opcode::Bitcast))
3718 GR.
add(OpDefine, MIB);
3724bool SPIRVInstructionSelector::selectDerivativeInst(
3725 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
3726 const unsigned DPdOpCode)
const {
3729 errorIfInstrOutsideShader(
I);
3734 Register SrcReg =
I.getOperand(2).getReg();
3739 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DPdOpCode))
3742 .
addUse(
I.getOperand(2).getReg());
3744 MachineIRBuilder MIRBuilder(
I);
3747 if (componentCount != 1)
3751 const TargetRegisterClass *RegClass = GR.
getRegClass(SrcType);
3752 Register ConvertToVReg =
MRI->createVirtualRegister(RegClass);
3753 Register DpdOpVReg =
MRI->createVirtualRegister(RegClass);
3755 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpFConvert))
3760 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DPdOpCode))
3765 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpFConvert))
3773bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
3774 SPIRVTypeInst ResType,
3775 MachineInstr &
I)
const {
3779 case Intrinsic::spv_load:
3780 return selectLoad(ResVReg, ResType,
I);
3781 case Intrinsic::spv_store:
3782 return selectStore(
I);
3783 case Intrinsic::spv_extractv:
3784 return selectExtractVal(ResVReg, ResType,
I);
3785 case Intrinsic::spv_insertv:
3786 return selectInsertVal(ResVReg, ResType,
I);
3787 case Intrinsic::spv_extractelt:
3788 return selectExtractElt(ResVReg, ResType,
I);
3789 case Intrinsic::spv_insertelt:
3790 return selectInsertElt(ResVReg, ResType,
I);
3791 case Intrinsic::spv_gep:
3792 return selectGEP(ResVReg, ResType,
I);
3793 case Intrinsic::spv_bitcast: {
3794 Register OpReg =
I.getOperand(2).getReg();
3795 SPIRVTypeInst OpType =
3799 return selectOpWithSrcs(ResVReg, ResType,
I, {OpReg}, SPIRV::OpBitcast);
3801 case Intrinsic::spv_unref_global:
3802 case Intrinsic::spv_init_global: {
3803 MachineInstr *
MI =
MRI->getVRegDef(
I.getOperand(1).getReg());
3804 MachineInstr *Init =
I.getNumExplicitOperands() > 2
3805 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
3808 Register GVarVReg =
MI->getOperand(0).getReg();
3809 if (!selectGlobalValue(GVarVReg, *
MI, Init))
3814 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
3816 MI->eraseFromParent();
3820 case Intrinsic::spv_undef: {
3821 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3827 case Intrinsic::spv_const_composite: {
3829 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
3835 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
3837 MachineIRBuilder MIR(
I);
3839 MIR, SPIRV::OpConstantComposite, 3,
3840 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3842 for (
auto *Instr : Instructions) {
3843 Instr->setDebugLoc(
I.getDebugLoc());
3848 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3855 case Intrinsic::spv_assign_name: {
3856 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3857 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3858 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3859 i <
I.getNumExplicitOperands(); ++i) {
3860 MIB.
addImm(
I.getOperand(i).getImm());
3865 case Intrinsic::spv_switch: {
3866 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3867 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3868 if (
I.getOperand(i).isReg())
3869 MIB.
addReg(
I.getOperand(i).getReg());
3870 else if (
I.getOperand(i).isCImm())
3871 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3872 else if (
I.getOperand(i).isMBB())
3873 MIB.
addMBB(
I.getOperand(i).getMBB());
3880 case Intrinsic::spv_loop_merge: {
3881 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3882 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3883 if (
I.getOperand(i).isMBB())
3884 MIB.
addMBB(
I.getOperand(i).getMBB());
3891 case Intrinsic::spv_loop_control_intel: {
3893 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopControlINTEL));
3894 for (
unsigned J = 1; J <
I.getNumExplicitOperands(); ++J)
3899 case Intrinsic::spv_selection_merge: {
3901 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
3902 assert(
I.getOperand(1).isMBB() &&
3903 "operand 1 to spv_selection_merge must be a basic block");
3904 MIB.
addMBB(
I.getOperand(1).getMBB());
3905 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
3909 case Intrinsic::spv_cmpxchg:
3910 return selectAtomicCmpXchg(ResVReg, ResType,
I);
3911 case Intrinsic::spv_unreachable:
3912 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
3915 case Intrinsic::spv_alloca:
3916 return selectFrameIndex(ResVReg, ResType,
I);
3917 case Intrinsic::spv_alloca_array:
3918 return selectAllocaArray(ResVReg, ResType,
I);
3919 case Intrinsic::spv_assume:
3921 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
3922 .
addUse(
I.getOperand(1).getReg())
3927 case Intrinsic::spv_expect:
3929 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
3932 .
addUse(
I.getOperand(2).getReg())
3933 .
addUse(
I.getOperand(3).getReg())
3938 case Intrinsic::arithmetic_fence:
3939 if (STI.
canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence)) {
3940 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpArithmeticFenceEXT))
3943 .
addUse(
I.getOperand(2).getReg())
3947 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3949 case Intrinsic::spv_thread_id:
3955 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3957 case Intrinsic::spv_thread_id_in_group:
3963 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3965 case Intrinsic::spv_group_id:
3971 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3973 case Intrinsic::spv_flattened_thread_id_in_group:
3980 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3982 case Intrinsic::spv_workgroup_size:
3983 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3985 case Intrinsic::spv_global_size:
3986 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3988 case Intrinsic::spv_global_offset:
3989 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3991 case Intrinsic::spv_num_workgroups:
3992 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3994 case Intrinsic::spv_subgroup_size:
3995 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3997 case Intrinsic::spv_num_subgroups:
3998 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
4000 case Intrinsic::spv_subgroup_id:
4001 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
4002 case Intrinsic::spv_subgroup_local_invocation_id:
4003 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
4004 ResVReg, ResType,
I);
4005 case Intrinsic::spv_subgroup_max_size:
4006 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
4008 case Intrinsic::spv_fdot:
4009 return selectFloatDot(ResVReg, ResType,
I);
4010 case Intrinsic::spv_udot:
4011 case Intrinsic::spv_sdot:
4012 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
4014 return selectIntegerDot(ResVReg, ResType,
I,
4015 IID == Intrinsic::spv_sdot);
4016 return selectIntegerDotExpansion(ResVReg, ResType,
I);
4017 case Intrinsic::spv_dot4add_i8packed:
4018 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
4020 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
4021 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
4022 case Intrinsic::spv_dot4add_u8packed:
4023 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
4025 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
4026 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
4027 case Intrinsic::spv_all:
4028 return selectAll(ResVReg, ResType,
I);
4029 case Intrinsic::spv_any:
4030 return selectAny(ResVReg, ResType,
I);
4031 case Intrinsic::spv_cross:
4032 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
4033 case Intrinsic::spv_distance:
4034 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
4035 case Intrinsic::spv_lerp:
4036 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
4037 case Intrinsic::spv_length:
4038 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
4039 case Intrinsic::spv_degrees:
4040 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
4041 case Intrinsic::spv_faceforward:
4042 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
4043 case Intrinsic::spv_frac:
4044 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
4045 case Intrinsic::spv_isinf:
4046 return selectOpIsInf(ResVReg, ResType,
I);
4047 case Intrinsic::spv_isnan:
4048 return selectOpIsNan(ResVReg, ResType,
I);
4049 case Intrinsic::spv_normalize:
4050 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
4051 case Intrinsic::spv_refract:
4052 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
4053 case Intrinsic::spv_reflect:
4054 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
4055 case Intrinsic::spv_rsqrt:
4056 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
4057 case Intrinsic::spv_sign:
4058 return selectSign(ResVReg, ResType,
I);
4059 case Intrinsic::spv_smoothstep:
4060 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
4061 case Intrinsic::spv_firstbituhigh:
4062 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
4063 case Intrinsic::spv_firstbitshigh:
4064 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
4065 case Intrinsic::spv_firstbitlow:
4066 return selectFirstBitLow(ResVReg, ResType,
I);
4067 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
4069 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
4070 Register ScopeReg = buildI32Constant(SPIRV::Scope::Workgroup,
I);
4072 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
4079 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
4080 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
4081 SPIRV::StorageClass::StorageClass ResSC =
4085 "Generic storage class");
4086 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpGenericCastToPtrExplicit))
4094 case Intrinsic::spv_lifetime_start:
4095 case Intrinsic::spv_lifetime_end: {
4096 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
4097 : SPIRV::OpLifetimeStop;
4098 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
4099 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
4108 case Intrinsic::spv_saturate:
4109 return selectSaturate(ResVReg, ResType,
I);
4110 case Intrinsic::spv_nclamp:
4111 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
4112 case Intrinsic::spv_uclamp:
4113 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
4114 case Intrinsic::spv_sclamp:
4115 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
4116 case Intrinsic::spv_subgroup_prefix_bit_count:
4117 return selectWavePrefixBitCount(ResVReg, ResType,
I);
4118 case Intrinsic::spv_wave_active_countbits:
4119 return selectWaveActiveCountBits(ResVReg, ResType,
I);
4120 case Intrinsic::spv_wave_all:
4121 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
4122 case Intrinsic::spv_wave_any:
4123 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
4124 case Intrinsic::spv_subgroup_ballot:
4125 return selectWaveOpInst(ResVReg, ResType,
I,
4126 SPIRV::OpGroupNonUniformBallot);
4127 case Intrinsic::spv_wave_is_first_lane:
4128 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
4129 case Intrinsic::spv_wave_reduce_umax:
4130 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
4131 case Intrinsic::spv_wave_reduce_max:
4132 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
4133 case Intrinsic::spv_wave_reduce_umin:
4134 return selectWaveReduceMin(ResVReg, ResType,
I,
true);
4135 case Intrinsic::spv_wave_reduce_min:
4136 return selectWaveReduceMin(ResVReg, ResType,
I,
false);
4137 case Intrinsic::spv_wave_reduce_sum:
4138 return selectWaveReduceSum(ResVReg, ResType,
I);
4139 case Intrinsic::spv_wave_readlane:
4140 return selectWaveOpInst(ResVReg, ResType,
I,
4141 SPIRV::OpGroupNonUniformShuffle);
4142 case Intrinsic::spv_wave_prefix_sum:
4143 return selectWaveExclusiveScanSum(ResVReg, ResType,
I);
4144 case Intrinsic::spv_wave_prefix_product:
4145 return selectWaveExclusiveScanProduct(ResVReg, ResType,
I);
4146 case Intrinsic::spv_step:
4147 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
4148 case Intrinsic::spv_radians:
4149 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
4153 case Intrinsic::instrprof_increment:
4154 case Intrinsic::instrprof_increment_step:
4155 case Intrinsic::instrprof_value_profile:
4158 case Intrinsic::spv_value_md:
4160 case Intrinsic::spv_resource_handlefrombinding: {
4161 return selectHandleFromBinding(ResVReg, ResType,
I);
4163 case Intrinsic::spv_resource_counterhandlefrombinding:
4164 return selectCounterHandleFromBinding(ResVReg, ResType,
I);
4165 case Intrinsic::spv_resource_updatecounter:
4166 return selectUpdateCounter(ResVReg, ResType,
I);
4167 case Intrinsic::spv_resource_store_typedbuffer: {
4168 return selectImageWriteIntrinsic(
I);
4170 case Intrinsic::spv_resource_load_typedbuffer: {
4171 return selectReadImageIntrinsic(ResVReg, ResType,
I);
4173 case Intrinsic::spv_resource_sample:
4174 case Intrinsic::spv_resource_sample_clamp:
4175 return selectSampleBasicIntrinsic(ResVReg, ResType,
I);
4176 case Intrinsic::spv_resource_samplebias:
4177 case Intrinsic::spv_resource_samplebias_clamp:
4178 return selectSampleBiasIntrinsic(ResVReg, ResType,
I);
4179 case Intrinsic::spv_resource_samplegrad:
4180 case Intrinsic::spv_resource_samplegrad_clamp:
4181 return selectSampleGradIntrinsic(ResVReg, ResType,
I);
4182 case Intrinsic::spv_resource_samplelevel:
4183 return selectSampleLevelIntrinsic(ResVReg, ResType,
I);
4184 case Intrinsic::spv_resource_samplecmp:
4185 case Intrinsic::spv_resource_samplecmp_clamp:
4186 return selectSampleCmpIntrinsic(ResVReg, ResType,
I);
4187 case Intrinsic::spv_resource_samplecmplevelzero:
4188 return selectSampleCmpLevelZeroIntrinsic(ResVReg, ResType,
I);
4189 case Intrinsic::spv_resource_gather:
4190 case Intrinsic::spv_resource_gather_cmp:
4191 return selectGatherIntrinsic(ResVReg, ResType,
I);
4192 case Intrinsic::spv_resource_getpointer: {
4193 return selectResourceGetPointer(ResVReg, ResType,
I);
4195 case Intrinsic::spv_pushconstant_getpointer: {
4196 return selectPushConstantGetPointer(ResVReg, ResType,
I);
4198 case Intrinsic::spv_discard: {
4199 return selectDiscard(ResVReg, ResType,
I);
4201 case Intrinsic::spv_resource_nonuniformindex: {
4202 return selectResourceNonUniformIndex(ResVReg, ResType,
I);
4204 case Intrinsic::spv_unpackhalf2x16: {
4205 return selectExtInst(ResVReg, ResType,
I, GL::UnpackHalf2x16);
4207 case Intrinsic::spv_packhalf2x16: {
4208 return selectExtInst(ResVReg, ResType,
I, GL::PackHalf2x16);
4210 case Intrinsic::spv_ddx:
4211 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdx);
4212 case Intrinsic::spv_ddy:
4213 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdy);
4214 case Intrinsic::spv_ddx_coarse:
4215 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdxCoarse);
4216 case Intrinsic::spv_ddy_coarse:
4217 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdyCoarse);
4218 case Intrinsic::spv_ddx_fine:
4219 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdxFine);
4220 case Intrinsic::spv_ddy_fine:
4221 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpDPdyFine);
4222 case Intrinsic::spv_fwidth:
4223 return selectDerivativeInst(ResVReg, ResType,
I, SPIRV::OpFwidth);
4225 std::string DiagMsg;
4226 raw_string_ostream OS(DiagMsg);
4228 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
4235bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
4236 SPIRVTypeInst ResType,
4237 MachineInstr &
I)
const {
4240 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
4247bool SPIRVInstructionSelector::selectCounterHandleFromBinding(
4248 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4250 assert(Intr.getIntrinsicID() ==
4251 Intrinsic::spv_resource_counterhandlefrombinding);
4254 Register MainHandleReg = Intr.getOperand(2).getReg();
4256 assert(MainHandleDef->getIntrinsicID() ==
4257 Intrinsic::spv_resource_handlefrombinding);
4261 uint32_t ArraySize =
getIConstVal(MainHandleDef->getOperand(4).getReg(),
MRI);
4262 Register IndexReg = MainHandleDef->getOperand(5).getReg();
4263 std::string CounterName =
4268 MachineIRBuilder MIRBuilder(
I);
4270 buildPointerToResource(SPIRVTypeInst(GR.
getPointeeType(ResType)),
4272 ArraySize, IndexReg, CounterName, MIRBuilder);
4274 return BuildCOPY(ResVReg, CounterVarReg,
I);
4277bool SPIRVInstructionSelector::selectUpdateCounter(
Register &ResVReg,
4278 SPIRVTypeInst ResType,
4279 MachineInstr &
I)
const {
4281 assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter);
4283 Register CounterHandleReg = Intr.getOperand(2).getReg();
4284 Register IncrReg = Intr.getOperand(3).getReg();
4291 SPIRVTypeInst CounterVarPointeeType = GR.
getPointeeType(CounterVarType);
4292 assert(CounterVarPointeeType &&
4293 CounterVarPointeeType->
getOpcode() == SPIRV::OpTypeStruct &&
4294 "Counter variable must be a struct");
4296 SPIRV::StorageClass::StorageBuffer &&
4297 "Counter variable must be in the storage buffer storage class");
4299 "Counter variable must have exactly 1 member in the struct");
4300 const SPIRVTypeInst MemberType =
4303 "Counter variable struct must have a single i32 member");
4307 MachineIRBuilder MIRBuilder(
I);
4309 Type::getInt32Ty(
I.getMF()->getFunction().getContext());
4312 LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
4318 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAccessChain))
4321 .
addUse(CounterHandleReg)
4328 Register Semantics = buildI32Constant(SPIRV::MemorySemantics::None,
I);
4331 Register Incr = buildI32Constant(
static_cast<uint32_t
>(IncrVal),
I);
4334 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAtomicIAdd))
4343 return BuildCOPY(ResVReg, AtomicRes,
I);
4351 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
4359bool SPIRVInstructionSelector::selectReadImageIntrinsic(
Register &ResVReg,
4360 SPIRVTypeInst ResType,
4361 MachineInstr &
I)
const {
4369 Register ImageReg =
I.getOperand(2).getReg();
4371 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
4377 Register IdxReg =
I.getOperand(3).getReg();
4379 MachineInstr &Pos =
I;
4381 return generateImageReadOrFetch(ResVReg, ResType, NewImageReg, IdxReg, Loc,
4385bool SPIRVInstructionSelector::generateSampleImage(
4388 DebugLoc Loc, MachineInstr &Pos)
const {
4390 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
4398 MRI->createVirtualRegister(
MRI->getRegClass(SamplerReg));
4399 if (!loadHandleBeforePosition(NewSamplerReg,
4405 MachineIRBuilder MIRBuilder(Pos);
4418 bool IsExplicitLod = ImOps.GradX.has_value() || ImOps.GradY.has_value() ||
4419 ImOps.Lod.has_value();
4420 unsigned Opcode = IsExplicitLod ? SPIRV::OpImageSampleExplicitLod
4421 : SPIRV::OpImageSampleImplicitLod;
4423 Opcode = IsExplicitLod ? SPIRV::OpImageSampleDrefExplicitLod
4424 : SPIRV::OpImageSampleDrefImplicitLod;
4433 MIB.
addUse(*ImOps.Compare);
4435 uint32_t ImageOperands = 0;
4437 ImageOperands |= SPIRV::ImageOperand::Bias;
4439 ImageOperands |= SPIRV::ImageOperand::Lod;
4440 if (ImOps.GradX && ImOps.GradY)
4441 ImageOperands |= SPIRV::ImageOperand::Grad;
4442 if (ImOps.Offset && !isScalarOrVectorIntConstantZero(*ImOps.Offset)) {
4444 ImageOperands |= SPIRV::ImageOperand::ConstOffset;
4447 "Non-constant offsets are not supported in sample instructions.");
4451 ImageOperands |= SPIRV::ImageOperand::MinLod;
4453 if (ImageOperands != 0) {
4454 MIB.
addImm(ImageOperands);
4455 if (ImageOperands & SPIRV::ImageOperand::Bias)
4457 if (ImageOperands & SPIRV::ImageOperand::Lod)
4459 if (ImageOperands & SPIRV::ImageOperand::Grad) {
4460 MIB.
addUse(*ImOps.GradX);
4461 MIB.
addUse(*ImOps.GradY);
4464 (SPIRV::ImageOperand::ConstOffset | SPIRV::ImageOperand::Offset))
4465 MIB.
addUse(*ImOps.Offset);
4466 if (ImageOperands & SPIRV::ImageOperand::MinLod)
4467 MIB.
addUse(*ImOps.MinLod);
4474bool SPIRVInstructionSelector::selectSampleBasicIntrinsic(
4475 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4476 Register ImageReg =
I.getOperand(2).getReg();
4477 Register SamplerReg =
I.getOperand(3).getReg();
4478 Register CoordinateReg =
I.getOperand(4).getReg();
4479 ImageOperands ImOps;
4480 if (
I.getNumOperands() > 5)
4481 ImOps.Offset =
I.getOperand(5).getReg();
4482 if (
I.getNumOperands() > 6)
4483 ImOps.MinLod =
I.getOperand(6).getReg();
4484 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4485 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4488bool SPIRVInstructionSelector::selectSampleBiasIntrinsic(
4489 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4490 Register ImageReg =
I.getOperand(2).getReg();
4491 Register SamplerReg =
I.getOperand(3).getReg();
4492 Register CoordinateReg =
I.getOperand(4).getReg();
4493 ImageOperands ImOps;
4494 ImOps.Bias =
I.getOperand(5).getReg();
4495 if (
I.getNumOperands() > 6)
4496 ImOps.Offset =
I.getOperand(6).getReg();
4497 if (
I.getNumOperands() > 7)
4498 ImOps.MinLod =
I.getOperand(7).getReg();
4499 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4500 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4503bool SPIRVInstructionSelector::selectSampleGradIntrinsic(
4504 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4505 Register ImageReg =
I.getOperand(2).getReg();
4506 Register SamplerReg =
I.getOperand(3).getReg();
4507 Register CoordinateReg =
I.getOperand(4).getReg();
4508 ImageOperands ImOps;
4509 ImOps.GradX =
I.getOperand(5).getReg();
4510 ImOps.GradY =
I.getOperand(6).getReg();
4511 if (
I.getNumOperands() > 7)
4512 ImOps.Offset =
I.getOperand(7).getReg();
4513 if (
I.getNumOperands() > 8)
4514 ImOps.MinLod =
I.getOperand(8).getReg();
4515 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4516 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4519bool SPIRVInstructionSelector::selectSampleLevelIntrinsic(
4520 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4521 Register ImageReg =
I.getOperand(2).getReg();
4522 Register SamplerReg =
I.getOperand(3).getReg();
4523 Register CoordinateReg =
I.getOperand(4).getReg();
4524 ImageOperands ImOps;
4525 ImOps.Lod =
I.getOperand(5).getReg();
4526 if (
I.getNumOperands() > 6)
4527 ImOps.Offset =
I.getOperand(6).getReg();
4528 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4529 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4532bool SPIRVInstructionSelector::selectSampleCmpIntrinsic(
Register &ResVReg,
4533 SPIRVTypeInst ResType,
4534 MachineInstr &
I)
const {
4535 Register ImageReg =
I.getOperand(2).getReg();
4536 Register SamplerReg =
I.getOperand(3).getReg();
4537 Register CoordinateReg =
I.getOperand(4).getReg();
4538 ImageOperands ImOps;
4539 ImOps.Compare =
I.getOperand(5).getReg();
4540 if (
I.getNumOperands() > 6)
4541 ImOps.Offset =
I.getOperand(6).getReg();
4542 if (
I.getNumOperands() > 7)
4543 ImOps.MinLod =
I.getOperand(7).getReg();
4544 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4545 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4548bool SPIRVInstructionSelector::selectSampleCmpLevelZeroIntrinsic(
4549 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4550 Register ImageReg =
I.getOperand(2).getReg();
4551 Register SamplerReg =
I.getOperand(3).getReg();
4552 Register CoordinateReg =
I.getOperand(4).getReg();
4553 ImageOperands ImOps;
4554 ImOps.Compare =
I.getOperand(5).getReg();
4555 if (
I.getNumOperands() > 6)
4556 ImOps.Offset =
I.getOperand(6).getReg();
4559 return generateSampleImage(ResVReg, ResType, ImageReg, SamplerReg,
4560 CoordinateReg, ImOps,
I.getDebugLoc(),
I);
4563bool SPIRVInstructionSelector::selectGatherIntrinsic(
Register &ResVReg,
4564 SPIRVTypeInst ResType,
4565 MachineInstr &
I)
const {
4566 Register ImageReg =
I.getOperand(2).getReg();
4567 Register SamplerReg =
I.getOperand(3).getReg();
4568 Register CoordinateReg =
I.getOperand(4).getReg();
4571 "ImageReg is not an image type.");
4576 ComponentOrCompareReg =
I.getOperand(5).getReg();
4577 OffsetReg =
I.getOperand(6).getReg();
4579 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
4580 if (!loadHandleBeforePosition(NewImageReg, ImageType, *ImageDef,
I)) {
4584 auto Dim =
static_cast<SPIRV::Dim::Dim
>(ImageType->
getOperand(2).
getImm());
4585 if (Dim != SPIRV::Dim::DIM_2D && Dim != SPIRV::Dim::DIM_Cube &&
4586 Dim != SPIRV::Dim::DIM_Rect) {
4588 "Gather operations are only supported for 2D, Cube, and Rect images.");
4594 MRI->createVirtualRegister(
MRI->getRegClass(SamplerReg));
4595 if (!loadHandleBeforePosition(
4600 MachineIRBuilder MIRBuilder(
I);
4601 SPIRVTypeInst SampledImageType =
4606 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSampledImage))
4614 bool IsGatherCmp =
IntrId == Intrinsic::spv_resource_gather_cmp;
4616 IsGatherCmp ? SPIRV::OpImageDrefGather : SPIRV::OpImageGather;
4618 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
4623 .
addUse(ComponentOrCompareReg);
4625 uint32_t ImageOperands = 0;
4626 if (OffsetReg && !isScalarOrVectorIntConstantZero(OffsetReg)) {
4627 if (Dim == SPIRV::Dim::DIM_Cube) {
4629 "Gather operations with offset are not supported for Cube images.");
4633 ImageOperands |= SPIRV::ImageOperand::ConstOffset;
4635 ImageOperands |= SPIRV::ImageOperand::Offset;
4639 if (ImageOperands != 0) {
4640 MIB.
addImm(ImageOperands);
4642 (SPIRV::ImageOperand::ConstOffset | SPIRV::ImageOperand::Offset))
4650bool SPIRVInstructionSelector::generateImageReadOrFetch(
4655 "ImageReg is not an image type.");
4657 bool IsSignedInteger =
4662 bool IsFetch = (SampledOp.getImm() == 1);
4665 if (ResultSize == 4) {
4668 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
4674 if (IsSignedInteger)
4680 SPIRVTypeInst ReadType = widenTypeToVec4(ResType, Pos);
4684 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
4689 if (IsSignedInteger)
4693 if (ResultSize == 1) {
4702 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
4705bool SPIRVInstructionSelector::selectResourceGetPointer(
Register &ResVReg,
4706 SPIRVTypeInst ResType,
4707 MachineInstr &
I)
const {
4708 Register ResourcePtr =
I.getOperand(2).getReg();
4710 if (RegType->
getOpcode() == SPIRV::OpTypeImage) {
4719 MachineIRBuilder MIRBuilder(
I);
4721 Register IndexReg =
I.getOperand(3).getReg();
4724 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAccessChain))
4734bool SPIRVInstructionSelector::selectPushConstantGetPointer(
4735 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4736 MRI->replaceRegWith(ResVReg,
I.getOperand(2).getReg());
4740bool SPIRVInstructionSelector::selectResourceNonUniformIndex(
4741 Register &ResVReg, SPIRVTypeInst ResType, MachineInstr &
I)
const {
4742 Register ObjReg =
I.getOperand(2).getReg();
4743 if (!BuildCOPY(ResVReg, ObjReg,
I))
4753 decorateUsesAsNonUniform(ResVReg);
4757void SPIRVInstructionSelector::decorateUsesAsNonUniform(
4760 while (WorkList.
size() > 0) {
4764 bool IsDecorated =
false;
4765 for (MachineInstr &Use :
MRI->use_instructions(CurrentReg)) {
4766 if (
Use.getOpcode() == SPIRV::OpDecorate &&
4767 Use.getOperand(1).getImm() == SPIRV::Decoration::NonUniformEXT) {
4773 if (
Use.getOperand(0).isReg() &&
Use.getOperand(0).isDef()) {
4775 if (ResultReg == CurrentReg)
4783 SPIRV::Decoration::NonUniformEXT, {});
4788bool SPIRVInstructionSelector::extractSubvector(
4790 MachineInstr &InsertionPoint)
const {
4792 [[maybe_unused]] uint64_t InputSize =
4795 assert(InputSize > 1 &&
"The input must be a vector.");
4796 assert(ResultSize > 1 &&
"The result must be a vector.");
4797 assert(ResultSize < InputSize &&
4798 "Cannot extract more element than there are in the input.");
4801 const TargetRegisterClass *ScalarRegClass = GR.
getRegClass(ScalarType);
4802 for (uint64_t
I = 0;
I < ResultSize;
I++) {
4803 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
4805 InsertionPoint.
getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4814 MachineInstrBuilder MIB =
BuildMI(*InsertionPoint.
getParent(), InsertionPoint,
4816 TII.get(SPIRV::OpCompositeConstruct))
4820 for (
Register ComponentReg : ComponentRegisters)
4821 MIB.
addUse(ComponentReg);
4826bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
4827 MachineInstr &
I)
const {
4834 Register ImageReg =
I.getOperand(1).getReg();
4836 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
4842 Register CoordinateReg =
I.getOperand(2).getReg();
4843 Register DataReg =
I.getOperand(3).getReg();
4846 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpImageWrite))
4854Register SPIRVInstructionSelector::buildPointerToResource(
4855 SPIRVTypeInst SpirvResType, SPIRV::StorageClass::StorageClass SC,
4856 uint32_t Set, uint32_t
Binding, uint32_t ArraySize,
Register IndexReg,
4857 StringRef Name, MachineIRBuilder MIRBuilder)
const {
4859 if (ArraySize == 1) {
4860 SPIRVTypeInst PtrType =
4863 "SpirvResType did not have an explicit layout.");
4868 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
4869 SPIRVTypeInst VarPointerType =
4872 VarPointerType, Set,
Binding, Name, MIRBuilder);
4874 SPIRVTypeInst ResPointerType =
4887bool SPIRVInstructionSelector::selectFirstBitSet16(
4888 Register ResVReg, SPIRVTypeInst ResType, MachineInstr &
I,
4889 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
4891 if (!selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).getReg()},
4895 return selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
4898bool SPIRVInstructionSelector::selectFirstBitSet32(
4900 unsigned BitSetOpcode)
const {
4901 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4904 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
4911bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
4913 unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
4920 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
4922 MachineIRBuilder MIRBuilder(
I);
4925 SPIRVTypeInst I64x2Type =
4927 SPIRVTypeInst Vec2ResType =
4930 std::vector<Register> PartialRegs;
4933 unsigned CurrentComponent = 0;
4934 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
4940 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4941 TII.get(SPIRV::OpVectorShuffle))
4946 .
addImm(CurrentComponent)
4947 .
addImm(CurrentComponent + 1);
4954 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
4955 BitSetOpcode, SwapPrimarySide))
4958 PartialRegs.push_back(SubVecBitSetReg);
4962 if (CurrentComponent != ComponentCount) {
4968 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
4969 SPIRV::OpVectorExtractDynamic))
4975 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
4976 BitSetOpcode, SwapPrimarySide))
4979 PartialRegs.push_back(FinalElemBitSetReg);
4984 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
4985 SPIRV::OpCompositeConstruct);
4988bool SPIRVInstructionSelector::selectFirstBitSet64(
4990 unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
5003 if (ComponentCount > 2) {
5004 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
5005 BitSetOpcode, SwapPrimarySide);
5009 MachineIRBuilder MIRBuilder(
I);
5011 BaseType, 2 * ComponentCount, MIRBuilder,
false);
5015 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
5021 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
5028 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
5031 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
5032 SPIRV::OpVectorExtractDynamic))
5034 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
5035 SPIRV::OpVectorExtractDynamic))
5039 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
5040 TII.get(SPIRV::OpVectorShuffle))
5048 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
5054 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
5055 TII.get(SPIRV::OpVectorShuffle))
5063 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
5083 SelectOp = SPIRV::OpSelectSISCond;
5084 AddOp = SPIRV::OpIAddS;
5092 SelectOp = SPIRV::OpSelectVIVCond;
5093 AddOp = SPIRV::OpIAddV;
5103 if (SwapPrimarySide) {
5104 PrimaryReg = LowReg;
5105 SecondaryReg = HighReg;
5106 PrimaryShiftReg = Reg0;
5107 SecondaryShiftReg = Reg32;
5112 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
5118 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
5124 if (!selectOpWithSrcs(ValReg, ResType,
I,
5125 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
5128 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
5131bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
5132 SPIRVTypeInst ResType,
5134 bool IsSigned)
const {
5136 Register OpReg =
I.getOperand(2).getReg();
5139 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
5140 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
5144 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
5146 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
5148 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
5152 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
5156bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
5157 SPIRVTypeInst ResType,
5158 MachineInstr &
I)
const {
5160 Register OpReg =
I.getOperand(2).getReg();
5165 unsigned ExtendOpcode = SPIRV::OpUConvert;
5166 unsigned BitSetOpcode = GL::FindILsb;
5170 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
5172 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
5174 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
5181bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
5182 SPIRVTypeInst ResType,
5183 MachineInstr &
I)
const {
5187 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariableLengthArrayINTEL))
5190 .
addUse(
I.getOperand(2).getReg())
5193 unsigned Alignment =
I.getOperand(3).getImm();
5199bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
5200 SPIRVTypeInst ResType,
5201 MachineInstr &
I)
const {
5205 BuildMI(*It->getParent(), It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
5208 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
5211 unsigned Alignment =
I.getOperand(2).getImm();
5218bool SPIRVInstructionSelector::selectBranch(MachineInstr &
I)
const {
5223 const MachineInstr *PrevI =
I.getPrevNode();
5225 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
5229 .
addMBB(
I.getOperand(0).getMBB())
5234 .
addMBB(
I.getOperand(0).getMBB())
5239bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &
I)
const {
5250 const MachineInstr *NextI =
I.getNextNode();
5252 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
5258 MachineBasicBlock *NextMBB =
I.getMF()->getBlockNumbered(NextMBBNum);
5260 .
addUse(
I.getOperand(0).getReg())
5261 .
addMBB(
I.getOperand(1).getMBB())
5267bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
5268 MachineInstr &
I)
const {
5270 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(TargetOpcode::PHI))
5272 const unsigned NumOps =
I.getNumOperands();
5273 for (
unsigned i = 1; i <
NumOps; i += 2) {
5274 MIB.
addUse(
I.getOperand(i + 0).getReg());
5275 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
5281bool SPIRVInstructionSelector::selectGlobalValue(
5282 Register ResVReg, MachineInstr &
I,
const MachineInstr *Init)
const {
5284 MachineIRBuilder MIRBuilder(
I);
5285 const GlobalValue *GV =
I.
getOperand(1).getGlobal();
5288 std::string GlobalIdent;
5290 unsigned &
ID = UnnamedGlobalIDs[GV];
5292 ID = UnnamedGlobalIDs.
size();
5293 GlobalIdent =
"__unnamed_" + Twine(
ID).str();
5320 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
5327 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
5330 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
5331 MachineInstrBuilder MIB1 =
5332 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
5335 MachineInstrBuilder MIB2 =
5337 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
5341 GR.
add(ConstVal, MIB2);
5348 MachineInstrBuilder MIB3 =
5349 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
5352 GR.
add(ConstVal, MIB3);
5356 assert(NewReg != ResVReg);
5357 return BuildCOPY(ResVReg, NewReg,
I);
5367 const std::optional<SPIRV::LinkageType::LinkageType> LnkType =
5373 SPIRVTypeInst ResType =
5377 GlobalVar->isConstant(), LnkType, MIRBuilder,
true);
5382 if (
GlobalVar->isExternallyInitialized() &&
5383 STI.getTargetTriple().getVendor() ==
Triple::AMD) {
5384 constexpr unsigned ReadWriteINTEL = 3u;
5387 MachineInstrBuilder MIB(*MF, --MIRBuilder.
getInsertPt());
5393bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
5394 SPIRVTypeInst ResType,
5395 MachineInstr &
I)
const {
5397 return selectExtInst(ResVReg, ResType,
I, CL::log10);
5405 MachineIRBuilder MIRBuilder(
I);
5410 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
5413 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
5415 .
add(
I.getOperand(1))
5420 ResType->
getOpcode() == SPIRV::OpTypeFloat);
5422 SPIRVTypeInst SpirvScalarType = ResType->
getOpcode() == SPIRV::OpTypeVector
5430 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
5431 ? SPIRV::OpVectorTimesScalar
5442bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
5443 SPIRVTypeInst ResType,
5444 MachineInstr &
I)
const {
5460 MachineIRBuilder MIRBuilder(
I);
5463 ResType, MIRBuilder, SPIRV::StorageClass::Function);
5475 MachineBasicBlock &EntryBB =
I.getMF()->front();
5479 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
5482 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function));
5488 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
5491 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
5494 .
add(
I.getOperand(
I.getNumExplicitDefs()))
5498 Register IntegralPartReg =
I.getOperand(1).getReg();
5499 if (IntegralPartReg.
isValid()) {
5501 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
5512 assert(
false &&
"GLSL::Modf is deprecated.");
5523bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
5524 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
5525 SPIRVTypeInst ResType, MachineInstr &
I)
const {
5526 MachineIRBuilder MIRBuilder(
I);
5527 const SPIRVTypeInst Vec3Ty =
5530 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
5542 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
5546 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
5547 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
5552 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
5559 assert(
I.getOperand(2).isReg());
5560 const uint32_t ThreadId =
foldImm(
I.getOperand(2),
MRI);
5564 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
5575bool SPIRVInstructionSelector::loadBuiltinInputID(
5576 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
5577 SPIRVTypeInst ResType, MachineInstr &
I)
const {
5578 MachineIRBuilder MIRBuilder(
I);
5580 ResType, MIRBuilder, SPIRV::StorageClass::Input);
5595 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
5599 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
5608SPIRVTypeInst SPIRVInstructionSelector::widenTypeToVec4(SPIRVTypeInst
Type,
5609 MachineInstr &
I)
const {
5610 MachineIRBuilder MIRBuilder(
I);
5611 if (
Type->getOpcode() != SPIRV::OpTypeVector)
5615 if (VectorSize == 4)
5623bool SPIRVInstructionSelector::loadHandleBeforePosition(
5624 Register &HandleReg, SPIRVTypeInst ResType, GIntrinsic &HandleDef,
5625 MachineInstr &Pos)
const {
5628 Intrinsic::spv_resource_handlefrombinding);
5636 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
5637 MachineIRBuilder MIRBuilder(HandleDef);
5638 SPIRVTypeInst VarType = ResType;
5639 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
5641 if (IsStructuredBuffer) {
5647 buildPointerToResource(SPIRVTypeInst(VarType), SC, Set,
Binding,
5648 ArraySize, IndexReg, Name, MIRBuilder);
5652 uint32_t LoadOpcode =
5653 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
5663void SPIRVInstructionSelector::errorIfInstrOutsideShader(
5664 MachineInstr &
I)
const {
5666 std::string DiagMsg;
5667 raw_string_ostream OS(DiagMsg);
5668 I.print(OS,
true,
false,
false,
false);
5669 DiagMsg +=
" is only supported in shaders.\n";
5675InstructionSelector *
5679 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
LLVMTypeRef LLVMIntType(unsigned NumBits)
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static bool intrinsicHasSideEffects(Intrinsic::ID ID)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isOpcodeWithNoSideEffects(unsigned Opcode)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const fltSemantics & IEEEsingle()
static const fltSemantics & IEEEdouble()
static const fltSemantics & IEEEhalf()
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Intrinsic::ID getIntrinsicID() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr uint16_t getNumElements() const
Returns the number of elements in a vector LLT.
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
void constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI void emitGenericError(const Twine &ErrMsg) const
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool isScalarOrVectorSigned(SPIRVTypeInst Type) const
SPIRVTypeInst getOrCreateOpTypeSampledImage(SPIRVTypeInst ImageType, MachineIRBuilder &MIRBuilder)
void assignSPIRVTypeToVReg(SPIRVTypeInst Type, Register VReg, const MachineFunction &MF)
const TargetRegisterClass * getRegClass(SPIRVTypeInst SpvType) const
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
bool isAggregateType(SPIRVTypeInst Type) const
unsigned getScalarOrVectorBitWidth(SPIRVTypeInst Type) const
SPIRVTypeInst getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
SPIRVTypeInst getOrCreateSPIRVVectorType(SPIRVTypeInst BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
Register buildGlobalVariable(Register Reg, SPIRVTypeInst BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVTypeInst getResultType(Register VReg, MachineFunction *MF=nullptr)
unsigned getScalarOrVectorComponentCount(Register VReg) const
const Type * getTypeForSPIRVType(SPIRVTypeInst Ty) const
bool isBitcastCompatible(SPIRVTypeInst Type1, SPIRVTypeInst Type2) const
unsigned getPointerSize() const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
LLT getRegType(SPIRVTypeInst SpvType) const
void invalidateMachineInstr(MachineInstr *MI)
SPIRVTypeInst getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
SPIRVTypeInst getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register getSPIRVTypeID(SPIRVTypeInst SpirvType) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII)
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
SPIRVTypeInst retrieveScalarOrVectorIntType(SPIRVTypeInst Type) const
Register getOrCreateGlobalVariableWithBinding(SPIRVTypeInst VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVTypeInst changePointerStorageClass(SPIRVTypeInst PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType=nullptr)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVTypeInst getScalarOrVectorComponentType(SPIRVTypeInst Type) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
SPIRVTypeInst getOrCreateSPIRVFloatType(unsigned BitWidth, MachineInstr &I, const SPIRVInstrInfo &TII)
SPIRVTypeInst getPointeeType(SPIRVTypeInst PtrType)
SPIRVTypeInst getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVTypeInst SpvType)
SPIRVTypeInst getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Type * getDeducedGlobalValueType(const GlobalValue *Global)
Register getOrCreateUndef(MachineInstr &I, SPIRVTypeInst SpvType, const SPIRVInstrInfo &TII)
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
bool isAggregateType() const
Return true if the type is an aggregate type.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
NodeAddr< UseNode * > Use
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
Register createVirtualRegister(SPIRVTypeInst SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
MachineInstr * getDefInstrMaybeConstant(Register &ConstReg, const MachineRegisterInfo *MRI)
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
void addStringImm(const StringRef &Str, MCInst &Inst)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...