32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
47llvm::SPIRV::SelectionControl::SelectionControl
48getSelectionOperandForImm(
int Imm) {
50 return SPIRV::SelectionControl::Flatten;
52 return SPIRV::SelectionControl::DontFlatten;
54 return SPIRV::SelectionControl::None;
58#define GET_GLOBALISEL_PREDICATE_BITSET
59#include "SPIRVGenGlobalISel.inc"
60#undef GET_GLOBALISEL_PREDICATE_BITSET
87#define GET_GLOBALISEL_PREDICATES_DECL
88#include "SPIRVGenGlobalISel.inc"
89#undef GET_GLOBALISEL_PREDICATES_DECL
91#define GET_GLOBALISEL_TEMPORARIES_DECL
92#include "SPIRVGenGlobalISel.inc"
93#undef GET_GLOBALISEL_TEMPORARIES_DECL
115 unsigned BitSetOpcode)
const;
119 unsigned BitSetOpcode)
const;
123 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
127 unsigned BitSetOpcode,
128 bool SwapPrimarySide)
const;
135 unsigned Opcode)
const;
138 unsigned Opcode)
const;
155 unsigned NegateOpcode = 0)
const;
215 template <
bool Signed>
218 template <
bool Signed>
239 bool IsSigned,
unsigned Opcode)
const;
241 bool IsSigned)
const;
247 bool IsSigned)
const;
286 GL::GLSLExtInst GLInst)
const;
291 GL::GLSLExtInst GLInst)
const;
313 bool selectCounterHandleFromBinding(
Register &ResVReg,
322 bool selectResourceNonUniformIndex(
Register &ResVReg,
332 std::pair<Register, bool>
334 const SPIRVType *ResType =
nullptr)
const;
346 SPIRV::StorageClass::StorageClass SC)
const;
353 SPIRV::StorageClass::StorageClass SC,
365 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
368 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
373 void decorateUsesAsNonUniform(
Register &NonUniformReg)
const;
376bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
378 if (
TET->getTargetExtName() ==
"spirv.Image") {
381 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
382 return TET->getTypeParameter(0)->isIntegerTy();
386#define GET_GLOBALISEL_IMPL
387#include "SPIRVGenGlobalISel.inc"
388#undef GET_GLOBALISEL_IMPL
394 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
397#include
"SPIRVGenGlobalISel.inc"
400#include
"SPIRVGenGlobalISel.inc"
412 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
416void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
417 if (HasVRegsReset == &MF)
422 for (
unsigned I = 0,
E =
MRI.getNumVirtRegs();
I !=
E; ++
I) {
424 LLT RegType =
MRI.getType(
Reg);
432 for (
const auto &
MBB : MF) {
433 for (
const auto &
MI :
MBB) {
436 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
440 LLT DstType =
MRI.getType(DstReg);
442 LLT SrcType =
MRI.getType(SrcReg);
443 if (DstType != SrcType)
444 MRI.setType(DstReg,
MRI.getType(SrcReg));
446 const TargetRegisterClass *DstRC =
MRI.getRegClassOrNull(DstReg);
447 const TargetRegisterClass *SrcRC =
MRI.getRegClassOrNull(SrcReg);
448 if (DstRC != SrcRC && SrcRC)
449 MRI.setRegClass(DstReg, SrcRC);
465 case TargetOpcode::G_CONSTANT:
466 case TargetOpcode::G_FCONSTANT:
468 case TargetOpcode::G_INTRINSIC:
469 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
470 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
472 Intrinsic::spv_const_composite;
473 case TargetOpcode::G_BUILD_VECTOR:
474 case TargetOpcode::G_SPLAT_VECTOR: {
485 case SPIRV::OpConstantTrue:
486 case SPIRV::OpConstantFalse:
487 case SPIRV::OpConstantI:
488 case SPIRV::OpConstantF:
489 case SPIRV::OpConstantComposite:
490 case SPIRV::OpConstantCompositeContinuedINTEL:
491 case SPIRV::OpConstantSampler:
492 case SPIRV::OpConstantNull:
494 case SPIRV::OpConstantFunctionPointerINTEL:
510 for (
const auto &MO :
MI.all_defs()) {
512 if (
Reg.isPhysical() || !
MRI.use_nodbg_empty(
Reg))
515 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
516 MI.isLifetimeMarker())
520 if (
MI.mayStore() ||
MI.isCall() ||
521 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
522 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
527bool SPIRVInstructionSelector::select(MachineInstr &
I) {
528 resetVRegsType(*
I.getParent()->getParent());
530 assert(
I.getParent() &&
"Instruction should be in a basic block!");
531 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
536 if (Opcode == SPIRV::ASSIGN_TYPE) {
537 Register DstReg =
I.getOperand(0).getReg();
538 Register SrcReg =
I.getOperand(1).getReg();
539 auto *
Def =
MRI->getVRegDef(SrcReg);
541 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
542 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
544 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
545 Register SelectDstReg =
Def->getOperand(0).getReg();
549 Def->removeFromParent();
550 MRI->replaceRegWith(DstReg, SelectDstReg);
552 I.removeFromParent();
554 Res = selectImpl(
I, *CoverageInfo);
556 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
557 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
561 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
568 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
569 MRI->replaceRegWith(SrcReg, DstReg);
571 I.removeFromParent();
573 }
else if (
I.getNumDefs() == 1) {
580 if (DeadMIs.contains(&
I)) {
590 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
591 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
597 bool HasDefs =
I.getNumDefs() > 0;
600 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
601 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
602 if (spvSelect(ResVReg, ResType,
I)) {
604 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
607 I.removeFromParent();
615 case TargetOpcode::G_CONSTANT:
616 case TargetOpcode::G_FCONSTANT:
618 case TargetOpcode::G_SADDO:
619 case TargetOpcode::G_SSUBO:
626 MachineInstr &
I)
const {
627 const TargetRegisterClass *DstRC =
MRI->getRegClassOrNull(DestReg);
628 const TargetRegisterClass *SrcRC =
MRI->getRegClassOrNull(SrcReg);
629 if (DstRC != SrcRC && SrcRC)
630 MRI->setRegClass(DestReg, SrcRC);
631 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
632 TII.get(TargetOpcode::COPY))
638bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
640 MachineInstr &
I)
const {
641 const unsigned Opcode =
I.getOpcode();
643 return selectImpl(
I, *CoverageInfo);
645 case TargetOpcode::G_CONSTANT:
646 case TargetOpcode::G_FCONSTANT:
647 return selectConst(ResVReg, ResType,
I);
648 case TargetOpcode::G_GLOBAL_VALUE:
649 return selectGlobalValue(ResVReg,
I);
650 case TargetOpcode::G_IMPLICIT_DEF:
651 return selectOpUndef(ResVReg, ResType,
I);
652 case TargetOpcode::G_FREEZE:
653 return selectFreeze(ResVReg, ResType,
I);
655 case TargetOpcode::G_INTRINSIC:
656 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
657 case TargetOpcode::G_INTRINSIC_CONVERGENT:
658 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
659 return selectIntrinsic(ResVReg, ResType,
I);
660 case TargetOpcode::G_BITREVERSE:
661 return selectBitreverse(ResVReg, ResType,
I);
663 case TargetOpcode::G_BUILD_VECTOR:
664 return selectBuildVector(ResVReg, ResType,
I);
665 case TargetOpcode::G_SPLAT_VECTOR:
666 return selectSplatVector(ResVReg, ResType,
I);
668 case TargetOpcode::G_SHUFFLE_VECTOR: {
669 MachineBasicBlock &BB = *
I.getParent();
670 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
673 .
addUse(
I.getOperand(1).getReg())
674 .
addUse(
I.getOperand(2).getReg());
675 for (
auto V :
I.getOperand(3).getShuffleMask())
679 case TargetOpcode::G_MEMMOVE:
680 case TargetOpcode::G_MEMCPY:
681 case TargetOpcode::G_MEMSET:
682 return selectMemOperation(ResVReg,
I);
684 case TargetOpcode::G_ICMP:
685 return selectICmp(ResVReg, ResType,
I);
686 case TargetOpcode::G_FCMP:
687 return selectFCmp(ResVReg, ResType,
I);
689 case TargetOpcode::G_FRAME_INDEX:
690 return selectFrameIndex(ResVReg, ResType,
I);
692 case TargetOpcode::G_LOAD:
693 return selectLoad(ResVReg, ResType,
I);
694 case TargetOpcode::G_STORE:
695 return selectStore(
I);
697 case TargetOpcode::G_BR:
698 return selectBranch(
I);
699 case TargetOpcode::G_BRCOND:
700 return selectBranchCond(
I);
702 case TargetOpcode::G_PHI:
703 return selectPhi(ResVReg, ResType,
I);
705 case TargetOpcode::G_FPTOSI:
706 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
707 case TargetOpcode::G_FPTOUI:
708 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
710 case TargetOpcode::G_FPTOSI_SAT:
711 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
712 case TargetOpcode::G_FPTOUI_SAT:
713 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
715 case TargetOpcode::G_SITOFP:
716 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
717 case TargetOpcode::G_UITOFP:
718 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
720 case TargetOpcode::G_CTPOP:
721 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
722 case TargetOpcode::G_SMIN:
723 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
724 case TargetOpcode::G_UMIN:
725 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
727 case TargetOpcode::G_SMAX:
728 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
729 case TargetOpcode::G_UMAX:
730 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
732 case TargetOpcode::G_SCMP:
733 return selectSUCmp(ResVReg, ResType,
I,
true);
734 case TargetOpcode::G_UCMP:
735 return selectSUCmp(ResVReg, ResType,
I,
false);
736 case TargetOpcode::G_LROUND:
737 case TargetOpcode::G_LLROUND: {
739 MRI->createVirtualRegister(
MRI->getRegClass(ResVReg),
"lround");
740 MRI->setRegClass(regForLround, &SPIRV::iIDRegClass);
742 regForLround, *(
I.getParent()->getParent()));
744 I, CL::round, GL::Round);
746 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConvertFToS))
752 case TargetOpcode::G_STRICT_FMA:
753 case TargetOpcode::G_FMA:
754 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
756 case TargetOpcode::G_STRICT_FLDEXP:
757 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
759 case TargetOpcode::G_FPOW:
760 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
761 case TargetOpcode::G_FPOWI:
762 return selectExtInst(ResVReg, ResType,
I, CL::pown);
764 case TargetOpcode::G_FEXP:
765 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
766 case TargetOpcode::G_FEXP2:
767 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
768 case TargetOpcode::G_FMODF:
769 return selectModf(ResVReg, ResType,
I);
771 case TargetOpcode::G_FLOG:
772 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
773 case TargetOpcode::G_FLOG2:
774 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
775 case TargetOpcode::G_FLOG10:
776 return selectLog10(ResVReg, ResType,
I);
778 case TargetOpcode::G_FABS:
779 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
780 case TargetOpcode::G_ABS:
781 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
783 case TargetOpcode::G_FMINNUM:
784 case TargetOpcode::G_FMINIMUM:
785 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
786 case TargetOpcode::G_FMAXNUM:
787 case TargetOpcode::G_FMAXIMUM:
788 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
790 case TargetOpcode::G_FCOPYSIGN:
791 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
793 case TargetOpcode::G_FCEIL:
794 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
795 case TargetOpcode::G_FFLOOR:
796 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
798 case TargetOpcode::G_FCOS:
799 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
800 case TargetOpcode::G_FSIN:
801 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
802 case TargetOpcode::G_FTAN:
803 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
804 case TargetOpcode::G_FACOS:
805 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
806 case TargetOpcode::G_FASIN:
807 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
808 case TargetOpcode::G_FATAN:
809 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
810 case TargetOpcode::G_FATAN2:
811 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
812 case TargetOpcode::G_FCOSH:
813 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
814 case TargetOpcode::G_FSINH:
815 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
816 case TargetOpcode::G_FTANH:
817 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
819 case TargetOpcode::G_STRICT_FSQRT:
820 case TargetOpcode::G_FSQRT:
821 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
823 case TargetOpcode::G_CTTZ:
824 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
825 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
826 case TargetOpcode::G_CTLZ:
827 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
828 return selectExtInst(ResVReg, ResType,
I, CL::clz);
830 case TargetOpcode::G_INTRINSIC_ROUND:
831 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
832 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
833 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
834 case TargetOpcode::G_INTRINSIC_TRUNC:
835 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
836 case TargetOpcode::G_FRINT:
837 case TargetOpcode::G_FNEARBYINT:
838 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
840 case TargetOpcode::G_SMULH:
841 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
842 case TargetOpcode::G_UMULH:
843 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
845 case TargetOpcode::G_SADDSAT:
846 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
847 case TargetOpcode::G_UADDSAT:
848 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
849 case TargetOpcode::G_SSUBSAT:
850 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
851 case TargetOpcode::G_USUBSAT:
852 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
854 case TargetOpcode::G_FFREXP:
855 return selectFrexp(ResVReg, ResType,
I);
857 case TargetOpcode::G_UADDO:
858 return selectOverflowArith(ResVReg, ResType,
I,
859 ResType->
getOpcode() == SPIRV::OpTypeVector
860 ? SPIRV::OpIAddCarryV
861 : SPIRV::OpIAddCarryS);
862 case TargetOpcode::G_USUBO:
863 return selectOverflowArith(ResVReg, ResType,
I,
864 ResType->
getOpcode() == SPIRV::OpTypeVector
865 ? SPIRV::OpISubBorrowV
866 : SPIRV::OpISubBorrowS);
867 case TargetOpcode::G_UMULO:
868 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
869 case TargetOpcode::G_SMULO:
870 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
872 case TargetOpcode::G_SEXT:
873 return selectExt(ResVReg, ResType,
I,
true);
874 case TargetOpcode::G_ANYEXT:
875 case TargetOpcode::G_ZEXT:
876 return selectExt(ResVReg, ResType,
I,
false);
877 case TargetOpcode::G_TRUNC:
878 return selectTrunc(ResVReg, ResType,
I);
879 case TargetOpcode::G_FPTRUNC:
880 case TargetOpcode::G_FPEXT:
881 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
883 case TargetOpcode::G_PTRTOINT:
884 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
885 case TargetOpcode::G_INTTOPTR:
886 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
887 case TargetOpcode::G_BITCAST:
888 return selectBitcast(ResVReg, ResType,
I);
889 case TargetOpcode::G_ADDRSPACE_CAST:
890 return selectAddrSpaceCast(ResVReg, ResType,
I);
891 case TargetOpcode::G_PTR_ADD: {
893 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
897 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
898 (*II).getOpcode() == TargetOpcode::COPY ||
899 (*II).getOpcode() == SPIRV::OpVariable) &&
902 bool IsGVInit =
false;
904 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
905 UseEnd =
MRI->use_instr_end();
906 UseIt != UseEnd; UseIt = std::next(UseIt)) {
907 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
908 (*UseIt).getOpcode() == SPIRV::OpVariable) {
918 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
921 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
922 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
931 "incompatible result and operand types in a bitcast");
933 MachineInstrBuilder MIB =
934 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
941 ? SPIRV::OpInBoundsAccessChain
942 : SPIRV::OpInBoundsPtrAccessChain))
946 .
addUse(
I.getOperand(2).getReg())
949 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
953 static_cast<uint32_t
>(SPIRV::Opcode::InBoundsPtrAccessChain))
955 .
addUse(
I.getOperand(2).getReg())
963 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
966 .
addImm(
static_cast<uint32_t
>(
967 SPIRV::Opcode::InBoundsPtrAccessChain))
970 .
addUse(
I.getOperand(2).getReg());
974 case TargetOpcode::G_ATOMICRMW_OR:
975 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
976 case TargetOpcode::G_ATOMICRMW_ADD:
977 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
978 case TargetOpcode::G_ATOMICRMW_AND:
979 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
980 case TargetOpcode::G_ATOMICRMW_MAX:
981 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
982 case TargetOpcode::G_ATOMICRMW_MIN:
983 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
984 case TargetOpcode::G_ATOMICRMW_SUB:
985 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
986 case TargetOpcode::G_ATOMICRMW_XOR:
987 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
988 case TargetOpcode::G_ATOMICRMW_UMAX:
989 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
990 case TargetOpcode::G_ATOMICRMW_UMIN:
991 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
992 case TargetOpcode::G_ATOMICRMW_XCHG:
993 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
994 case TargetOpcode::G_ATOMIC_CMPXCHG:
995 return selectAtomicCmpXchg(ResVReg, ResType,
I);
997 case TargetOpcode::G_ATOMICRMW_FADD:
998 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
999 case TargetOpcode::G_ATOMICRMW_FSUB:
1001 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
1003 case TargetOpcode::G_ATOMICRMW_FMIN:
1004 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
1005 case TargetOpcode::G_ATOMICRMW_FMAX:
1006 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
1008 case TargetOpcode::G_FENCE:
1009 return selectFence(
I);
1011 case TargetOpcode::G_STACKSAVE:
1012 return selectStackSave(ResVReg, ResType,
I);
1013 case TargetOpcode::G_STACKRESTORE:
1014 return selectStackRestore(
I);
1016 case TargetOpcode::G_UNMERGE_VALUES:
1022 case TargetOpcode::G_TRAP:
1023 case TargetOpcode::G_UBSANTRAP:
1024 case TargetOpcode::DBG_LABEL:
1026 case TargetOpcode::G_DEBUGTRAP:
1027 return selectDebugTrap(ResVReg, ResType,
I);
1034bool SPIRVInstructionSelector::selectDebugTrap(
Register ResVReg,
1036 MachineInstr &
I)
const {
1037 unsigned Opcode = SPIRV::OpNop;
1039 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
1043bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1046 GL::GLSLExtInst GLInst)
const {
1048 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
1049 std::string DiagMsg;
1050 raw_string_ostream OS(DiagMsg);
1051 I.print(OS,
true,
false,
false,
false);
1052 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
1055 return selectExtInst(ResVReg, ResType,
I,
1056 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
1059bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1062 CL::OpenCLExtInst CLInst)
const {
1063 return selectExtInst(ResVReg, ResType,
I,
1064 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
1067bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1070 CL::OpenCLExtInst CLInst,
1071 GL::GLSLExtInst GLInst)
const {
1072 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1073 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1074 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
1077bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1082 for (
const auto &Ex : Insts) {
1083 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1084 uint32_t Opcode = Ex.second;
1087 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1090 .
addImm(
static_cast<uint32_t
>(Set))
1093 const unsigned NumOps =
I.getNumOperands();
1096 I.getOperand(Index).getType() ==
1097 MachineOperand::MachineOperandType::MO_IntrinsicID)
1100 MIB.
add(
I.getOperand(Index));
1106bool SPIRVInstructionSelector::selectExtInstForLRound(
1108 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst)
const {
1109 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1110 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1111 return selectExtInstForLRound(ResVReg, ResType,
I, ExtInsts);
1114bool SPIRVInstructionSelector::selectExtInstForLRound(
1117 for (
const auto &Ex : Insts) {
1118 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1119 uint32_t Opcode = Ex.second;
1122 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1125 .
addImm(
static_cast<uint32_t
>(Set))
1127 const unsigned NumOps =
I.getNumOperands();
1130 I.getOperand(Index).getType() ==
1131 MachineOperand::MachineOperandType::MO_IntrinsicID)
1134 MIB.
add(
I.getOperand(Index));
1142bool SPIRVInstructionSelector::selectFrexp(
Register ResVReg,
1144 MachineInstr &
I)
const {
1145 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
1146 {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
1147 for (
const auto &Ex : ExtInsts) {
1148 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1149 uint32_t Opcode = Ex.second;
1153 MachineIRBuilder MIRBuilder(
I);
1156 PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
1161 auto MIB =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
1162 TII.get(SPIRV::OpVariable))
1165 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1169 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1172 .
addImm(
static_cast<uint32_t
>(Ex.first))
1174 .
add(
I.getOperand(2))
1179 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1180 .
addDef(
I.getOperand(1).getReg())
1189bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1192 std::vector<Register> Srcs,
1193 unsigned Opcode)
const {
1194 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1203bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1206 unsigned Opcode)
const {
1208 Register SrcReg =
I.getOperand(1).getReg();
1211 MRI->def_instr_begin(SrcReg);
1212 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1213 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1214 (*DefIt).getOpcode() == SPIRV::OpVariable) {
1220 uint32_t SpecOpcode = 0;
1222 case SPIRV::OpConvertPtrToU:
1223 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertPtrToU);
1225 case SPIRV::OpConvertUToPtr:
1226 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertUToPtr);
1230 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1231 TII.get(SPIRV::OpSpecConstantOp))
1239 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).getReg()},
1243bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1245 MachineInstr &
I)
const {
1246 Register OpReg =
I.getOperand(1).getReg();
1250 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1258 if (
MemOp->isVolatile())
1259 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1260 if (
MemOp->isNonTemporal())
1261 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1262 if (
MemOp->getAlign().value())
1263 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1269 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1270 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1274 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1276 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1280 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1284 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1286 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1298 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1300 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1302 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1306bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1308 MachineInstr &
I)
const {
1315 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1316 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1318 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1320 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1322 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1326 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1327 return generateImageReadOrFetch(ResVReg, ResType, NewHandleReg, IdxReg,
1328 I.getDebugLoc(),
I);
1332 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1336 if (!
I.getNumMemOperands()) {
1337 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1339 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1342 MachineIRBuilder MIRBuilder(
I);
1348bool SPIRVInstructionSelector::selectStore(MachineInstr &
I)
const {
1350 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1356 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1357 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1359 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1362 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1366 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1367 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1368 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1369 TII.get(SPIRV::OpImageWrite))
1375 if (sampledTypeIsSignedInteger(LLVMHandleType))
1378 return BMI.constrainAllUses(
TII,
TRI, RBI);
1383 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1386 if (!
I.getNumMemOperands()) {
1387 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1389 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1392 MachineIRBuilder MIRBuilder(
I);
1398bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1400 MachineInstr &
I)
const {
1401 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1403 "llvm.stacksave intrinsic: this instruction requires the following "
1404 "SPIR-V extension: SPV_INTEL_variable_length_array",
1407 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1413bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &
I)
const {
1414 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1416 "llvm.stackrestore intrinsic: this instruction requires the following "
1417 "SPIR-V extension: SPV_INTEL_variable_length_array",
1419 if (!
I.getOperand(0).isReg())
1422 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1423 .
addUse(
I.getOperand(0).getReg())
1427bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1428 MachineInstr &
I)
const {
1430 Register SrcReg =
I.getOperand(1).getReg();
1432 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1433 MachineIRBuilder MIRBuilder(
I);
1434 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1437 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1438 Type *ArrTy = ArrayType::get(ValTy, Num);
1440 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1443 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1450 GlobalVariable *GV =
new GlobalVariable(*CurFunction.
getParent(), LLVMArrTy,
1455 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1458 .
addImm(SPIRV::StorageClass::UniformConstant)
1467 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1469 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1471 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1472 .
addUse(
I.getOperand(0).getReg())
1474 .
addUse(
I.getOperand(2).getReg());
1475 if (
I.getNumMemOperands()) {
1476 MachineIRBuilder MIRBuilder(
I);
1485bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1489 unsigned NegateOpcode)
const {
1492 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1495 auto ScopeConstant = buildI32Constant(Scope,
I);
1496 Register ScopeReg = ScopeConstant.first;
1497 Result &= ScopeConstant.second;
1505 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1506 Register MemSemReg = MemSemConstant.first;
1507 Result &= MemSemConstant.second;
1509 Register ValueReg =
I.getOperand(2).getReg();
1510 if (NegateOpcode != 0) {
1513 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1518 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1528bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &
I)
const {
1529 unsigned ArgI =
I.getNumOperands() - 1;
1531 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1534 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1536 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1542 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1543 Register ResVReg =
I.getOperand(i).getReg();
1547 ResType = ScalarType;
1553 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1557 .
addImm(
static_cast<int64_t
>(i));
1563bool SPIRVInstructionSelector::selectFence(MachineInstr &
I)
const {
1566 auto MemSemConstant = buildI32Constant(MemSem,
I);
1567 Register MemSemReg = MemSemConstant.first;
1568 bool Result = MemSemConstant.second;
1570 uint32_t
Scope =
static_cast<uint32_t
>(
1572 auto ScopeConstant = buildI32Constant(Scope,
I);
1573 Register ScopeReg = ScopeConstant.first;
1574 Result &= ScopeConstant.second;
1577 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1583bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1586 unsigned Opcode)
const {
1587 Type *ResTy =
nullptr;
1591 "Not enough info to select the arithmetic with overflow instruction");
1594 "with overflow instruction");
1600 MachineIRBuilder MIRBuilder(
I);
1602 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1603 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1609 Register ZeroReg = buildZerosVal(ResType,
I);
1612 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1614 if (ResName.
size() > 0)
1619 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1622 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1623 MIB.
addUse(
I.getOperand(i).getReg());
1628 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1629 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1631 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1632 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1640 .
addDef(
I.getOperand(1).getReg())
1647bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1649 MachineInstr &
I)
const {
1657 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1660 auto ScopeConstant = buildI32Constant(Scope,
I);
1661 ScopeReg = ScopeConstant.first;
1662 Result &= ScopeConstant.second;
1664 unsigned ScSem =
static_cast<uint32_t
>(
1667 unsigned MemSemEq =
static_cast<uint32_t
>(
getMemSemantics(AO)) | ScSem;
1668 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1669 MemSemEqReg = MemSemEqConstant.first;
1670 Result &= MemSemEqConstant.second;
1672 unsigned MemSemNeq =
static_cast<uint32_t
>(
getMemSemantics(FO)) | ScSem;
1673 if (MemSemEq == MemSemNeq)
1674 MemSemNeqReg = MemSemEqReg;
1676 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1677 MemSemNeqReg = MemSemNeqConstant.first;
1678 Result &= MemSemNeqConstant.second;
1681 ScopeReg =
I.getOperand(5).getReg();
1682 MemSemEqReg =
I.getOperand(6).getReg();
1683 MemSemNeqReg =
I.getOperand(7).getReg();
1687 Register Val =
I.getOperand(4).getReg();
1692 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1719 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1730 case SPIRV::StorageClass::DeviceOnlyINTEL:
1731 case SPIRV::StorageClass::HostOnlyINTEL:
1740 bool IsGRef =
false;
1741 bool IsAllowedRefs =
1742 llvm::all_of(
MRI->use_instructions(ResVReg), [&IsGRef](
auto const &It) {
1743 unsigned Opcode = It.getOpcode();
1744 if (Opcode == SPIRV::OpConstantComposite ||
1745 Opcode == SPIRV::OpVariable ||
1746 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1747 return IsGRef = true;
1748 return Opcode == SPIRV::OpName;
1750 return IsAllowedRefs && IsGRef;
1753Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1754 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1756 Type::getInt8Ty(
I.getMF()->getFunction().getContext()),
I, SC));
1760SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &
I,
Register Dest,
1762 uint32_t Opcode)
const {
1763 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1764 TII.get(SPIRV::OpSpecConstantOp))
1772SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
1776 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1778 SPIRV::StorageClass::Generic),
1780 MachineFunction *MF =
I.getParent()->getParent();
1782 MachineInstrBuilder MIB = buildSpecConstantOp(
1784 static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric));
1794bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1796 MachineInstr &
I)
const {
1800 Register SrcPtr =
I.getOperand(1).getReg();
1804 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1805 ResType->
getOpcode() != SPIRV::OpTypePointer)
1806 return BuildCOPY(ResVReg, SrcPtr,
I);
1816 unsigned SpecOpcode =
1818 ?
static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric)
1821 ? static_cast<uint32_t>(
SPIRV::Opcode::GenericCastToPtr)
1828 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1829 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1830 .constrainAllUses(
TII,
TRI, RBI);
1832 MachineInstrBuilder MIB = buildConstGenericPtr(
I, SrcPtr, SrcPtrTy);
1834 buildSpecConstantOp(
1836 getUcharPtrTypeReg(
I, DstSC),
1837 static_cast<uint32_t
>(SPIRV::Opcode::GenericCastToPtr))
1838 .constrainAllUses(
TII,
TRI, RBI);
1844 return BuildCOPY(ResVReg, SrcPtr,
I);
1846 if ((SrcSC == SPIRV::StorageClass::Function &&
1847 DstSC == SPIRV::StorageClass::Private) ||
1848 (DstSC == SPIRV::StorageClass::Function &&
1849 SrcSC == SPIRV::StorageClass::Private))
1850 return BuildCOPY(ResVReg, SrcPtr,
I);
1854 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1857 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1878 return selectUnOp(ResVReg, ResType,
I,
1879 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1881 return selectUnOp(ResVReg, ResType,
I,
1882 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1884 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1886 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1896 return SPIRV::OpFOrdEqual;
1898 return SPIRV::OpFOrdGreaterThanEqual;
1900 return SPIRV::OpFOrdGreaterThan;
1902 return SPIRV::OpFOrdLessThanEqual;
1904 return SPIRV::OpFOrdLessThan;
1906 return SPIRV::OpFOrdNotEqual;
1908 return SPIRV::OpOrdered;
1910 return SPIRV::OpFUnordEqual;
1912 return SPIRV::OpFUnordGreaterThanEqual;
1914 return SPIRV::OpFUnordGreaterThan;
1916 return SPIRV::OpFUnordLessThanEqual;
1918 return SPIRV::OpFUnordLessThan;
1920 return SPIRV::OpFUnordNotEqual;
1922 return SPIRV::OpUnordered;
1932 return SPIRV::OpIEqual;
1934 return SPIRV::OpINotEqual;
1936 return SPIRV::OpSGreaterThanEqual;
1938 return SPIRV::OpSGreaterThan;
1940 return SPIRV::OpSLessThanEqual;
1942 return SPIRV::OpSLessThan;
1944 return SPIRV::OpUGreaterThanEqual;
1946 return SPIRV::OpUGreaterThan;
1948 return SPIRV::OpULessThanEqual;
1950 return SPIRV::OpULessThan;
1959 return SPIRV::OpPtrEqual;
1961 return SPIRV::OpPtrNotEqual;
1972 return SPIRV::OpLogicalEqual;
1974 return SPIRV::OpLogicalNotEqual;
2008bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
2011 unsigned OpAnyOrAll)
const {
2012 assert(
I.getNumOperands() == 3);
2013 assert(
I.getOperand(2).isReg());
2015 Register InputRegister =
I.getOperand(2).getReg();
2022 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
2023 if (IsBoolTy && !IsVectorTy) {
2024 assert(ResVReg ==
I.getOperand(0).getReg());
2025 return BuildCOPY(ResVReg, InputRegister,
I);
2029 unsigned SpirvNotEqualId =
2030 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
2037 IsBoolTy ? InputRegister
2046 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
2066bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
2068 MachineInstr &
I)
const {
2069 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
2072bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
2074 MachineInstr &
I)
const {
2075 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
2079bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
2081 MachineInstr &
I)
const {
2082 assert(
I.getNumOperands() == 4);
2083 assert(
I.getOperand(2).isReg());
2084 assert(
I.getOperand(3).isReg());
2091 "dot product requires a vector of at least 2 components");
2099 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
2102 .
addUse(
I.getOperand(2).getReg())
2103 .
addUse(
I.getOperand(3).getReg())
2107bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
2111 assert(
I.getNumOperands() == 4);
2112 assert(
I.getOperand(2).isReg());
2113 assert(
I.getOperand(3).isReg());
2116 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2120 .
addUse(
I.getOperand(2).getReg())
2121 .
addUse(
I.getOperand(3).getReg())
2127bool SPIRVInstructionSelector::selectIntegerDotExpansion(
2129 assert(
I.getNumOperands() == 4);
2130 assert(
I.getOperand(2).isReg());
2131 assert(
I.getOperand(3).isReg());
2135 Register Vec0 =
I.getOperand(2).getReg();
2136 Register Vec1 =
I.getOperand(3).getReg();
2149 "dot product requires a vector of at least 2 components");
2163 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2186bool SPIRVInstructionSelector::selectOpIsInf(
Register ResVReg,
2188 MachineInstr &
I)
const {
2190 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsInf))
2193 .
addUse(
I.getOperand(2).getReg())
2197bool SPIRVInstructionSelector::selectOpIsNan(
Register ResVReg,
2199 MachineInstr &
I)
const {
2201 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsNan))
2204 .
addUse(
I.getOperand(2).getReg())
2208template <
bool Signed>
2209bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2211 MachineInstr &
I)
const {
2212 assert(
I.getNumOperands() == 5);
2213 assert(
I.getOperand(2).isReg());
2214 assert(
I.getOperand(3).isReg());
2215 assert(
I.getOperand(4).isReg());
2218 Register Acc =
I.getOperand(2).getReg();
2222 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2242template <
bool Signed>
2243bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2245 assert(
I.getNumOperands() == 5);
2246 assert(
I.getOperand(2).isReg());
2247 assert(
I.getOperand(3).isReg());
2248 assert(
I.getOperand(4).isReg());
2253 Register Acc =
I.getOperand(2).getReg();
2259 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2263 for (
unsigned i = 0; i < 4; i++) {
2265 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2276 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2296 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2308 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2324bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2326 MachineInstr &
I)
const {
2327 assert(
I.getNumOperands() == 3);
2328 assert(
I.getOperand(2).isReg());
2330 Register VZero = buildZerosValF(ResType,
I);
2331 Register VOne = buildOnesValF(ResType,
I);
2333 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2336 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2338 .
addUse(
I.getOperand(2).getReg())
2344bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2346 MachineInstr &
I)
const {
2347 assert(
I.getNumOperands() == 3);
2348 assert(
I.getOperand(2).isReg());
2350 Register InputRegister =
I.getOperand(2).getReg();
2352 auto &
DL =
I.getDebugLoc();
2362 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2364 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2366 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2373 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2378 if (NeedsConversion) {
2379 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2390bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2393 unsigned Opcode)
const {
2397 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2403 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2404 BMI.addUse(
I.getOperand(J).getReg());
2410bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2416 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2417 SPIRV::OpGroupNonUniformBallot);
2421 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2426 .
addImm(SPIRV::GroupOperation::Reduce)
2433bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2436 bool IsUnsigned)
const {
2437 assert(
I.getNumOperands() == 3);
2438 assert(
I.getOperand(2).isReg());
2440 Register InputRegister =
I.getOperand(2).getReg();
2449 auto IntegerOpcodeType =
2450 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2451 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2452 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2457 .
addImm(SPIRV::GroupOperation::Reduce)
2458 .
addUse(
I.getOperand(2).getReg())
2462bool SPIRVInstructionSelector::selectWaveReduceMin(
Register ResVReg,
2465 bool IsUnsigned)
const {
2466 assert(
I.getNumOperands() == 3);
2467 assert(
I.getOperand(2).isReg());
2469 Register InputRegister =
I.getOperand(2).getReg();
2478 auto IntegerOpcodeType =
2479 IsUnsigned ? SPIRV::OpGroupNonUniformUMin : SPIRV::OpGroupNonUniformSMin;
2480 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMin : IntegerOpcodeType;
2481 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2486 .
addImm(SPIRV::GroupOperation::Reduce)
2487 .
addUse(
I.getOperand(2).getReg())
2491bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2493 MachineInstr &
I)
const {
2494 assert(
I.getNumOperands() == 3);
2495 assert(
I.getOperand(2).isReg());
2497 Register InputRegister =
I.getOperand(2).getReg();
2507 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2508 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2513 .
addImm(SPIRV::GroupOperation::Reduce)
2514 .
addUse(
I.getOperand(2).getReg());
2517bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2519 MachineInstr &
I)
const {
2521 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2524 .
addUse(
I.getOperand(1).getReg())
2528bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2530 MachineInstr &
I)
const {
2536 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2538 Register OpReg =
I.getOperand(1).getReg();
2539 if (MachineInstr *Def =
MRI->getVRegDef(OpReg)) {
2540 if (
Def->getOpcode() == TargetOpcode::COPY)
2541 Def =
MRI->getVRegDef(
Def->getOperand(1).getReg());
2543 switch (
Def->getOpcode()) {
2544 case SPIRV::ASSIGN_TYPE:
2545 if (MachineInstr *AssignToDef =
2546 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2547 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2548 Reg =
Def->getOperand(2).getReg();
2551 case SPIRV::OpUndef:
2552 Reg =
Def->getOperand(1).getReg();
2555 unsigned DestOpCode;
2557 DestOpCode = SPIRV::OpConstantNull;
2559 DestOpCode = TargetOpcode::COPY;
2562 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2563 .
addDef(
I.getOperand(0).getReg())
2570bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2572 MachineInstr &
I)
const {
2574 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2576 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2580 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2585 for (
unsigned i =
I.getNumExplicitDefs();
2586 i <
I.getNumExplicitOperands() && IsConst; ++i)
2590 if (!IsConst &&
N < 2)
2592 "There must be at least two constituent operands in a vector");
2595 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2596 TII.get(IsConst ? SPIRV::OpConstantComposite
2597 : SPIRV::OpCompositeConstruct))
2600 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2601 MIB.
addUse(
I.getOperand(i).getReg());
2605bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2607 MachineInstr &
I)
const {
2609 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2611 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2617 if (!
I.getOperand(
OpIdx).isReg())
2624 if (!IsConst &&
N < 2)
2626 "There must be at least two constituent operands in a vector");
2629 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2630 TII.get(IsConst ? SPIRV::OpConstantComposite
2631 : SPIRV::OpCompositeConstruct))
2634 for (
unsigned i = 0; i <
N; ++i)
2639bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2641 MachineInstr &
I)
const {
2646 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2648 Opcode = SPIRV::OpDemoteToHelperInvocation;
2650 Opcode = SPIRV::OpKill;
2652 if (MachineInstr *NextI =
I.getNextNode()) {
2654 NextI->removeFromParent();
2659 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2663bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2666 MachineInstr &
I)
const {
2667 Register Cmp0 =
I.getOperand(2).getReg();
2668 Register Cmp1 =
I.getOperand(3).getReg();
2671 "CMP operands should have the same type");
2672 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2681bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2683 MachineInstr &
I)
const {
2684 auto Pred =
I.getOperand(1).getPredicate();
2687 Register CmpOperand =
I.getOperand(2).getReg();
2694 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2697std::pair<Register, bool>
2698SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &
I,
2704 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2712 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2715 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2718 .
addImm(APInt(32, Val).getZExtValue());
2720 GR.
add(ConstInt,
MI);
2725bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2727 MachineInstr &
I)
const {
2729 return selectCmp(ResVReg, ResType, CmpOp,
I);
2733 MachineInstr &
I)
const {
2736 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2742 MachineInstr &
I)
const {
2746 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2752 MachineInstr &
I)
const {
2756 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2763 MachineInstr &
I)
const {
2767 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2772bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2774 MachineInstr &
I)
const {
2775 Register SelectFirstArg =
I.getOperand(2).getReg();
2776 Register SelectSecondArg =
I.getOperand(3).getReg();
2785 SPIRV::OpTypeVector;
2792 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
2793 }
else if (IsPtrTy) {
2794 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
2796 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
2800 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
2801 }
else if (IsPtrTy) {
2802 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
2804 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2807 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2810 .
addUse(
I.getOperand(1).getReg())
2816bool SPIRVInstructionSelector::selectSelectDefaultArgs(
Register ResVReg,
2819 bool IsSigned)
const {
2821 Register ZeroReg = buildZerosVal(ResType,
I);
2822 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2826 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2827 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2830 .
addUse(
I.getOperand(1).getReg())
2836bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2838 MachineInstr &
I,
bool IsSigned,
2839 unsigned Opcode)
const {
2840 Register SrcReg =
I.getOperand(1).getReg();
2846 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2851 selectSelectDefaultArgs(SrcReg, TmpType,
I,
false);
2853 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2856bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2858 MachineInstr &
I,
bool IsSigned)
const {
2859 Register SrcReg =
I.getOperand(1).getReg();
2861 return selectSelectDefaultArgs(ResVReg, ResType,
I, IsSigned);
2864 if (SrcType == ResType)
2865 return BuildCOPY(ResVReg, SrcReg,
I);
2867 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2868 return selectUnOp(ResVReg, ResType,
I, Opcode);
2871bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2874 bool IsSigned)
const {
2875 MachineIRBuilder MIRBuilder(
I);
2876 MachineRegisterInfo *
MRI = MIRBuilder.getMRI();
2891 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2892 : SPIRV::OpULessThanEqual))
2895 .
addUse(
I.getOperand(1).getReg())
2896 .
addUse(
I.getOperand(2).getReg())
2902 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2905 .
addUse(
I.getOperand(1).getReg())
2906 .
addUse(
I.getOperand(2).getReg())
2914 unsigned SelectOpcode =
2915 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2920 .
addUse(buildOnesVal(
true, ResType,
I))
2921 .
addUse(buildZerosVal(ResType,
I))
2928 .
addUse(buildOnesVal(
false, ResType,
I))
2932bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2939 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2940 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2942 Register One = buildOnesVal(
false, IntTy,
I);
2958bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2960 MachineInstr &
I)
const {
2961 Register IntReg =
I.getOperand(1).getReg();
2964 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2965 if (ArgType == ResType)
2966 return BuildCOPY(ResVReg, IntReg,
I);
2968 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2969 return selectUnOp(ResVReg, ResType,
I, Opcode);
2972bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2974 MachineInstr &
I)
const {
2975 unsigned Opcode =
I.getOpcode();
2976 unsigned TpOpcode = ResType->
getOpcode();
2978 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
2979 assert(Opcode == TargetOpcode::G_CONSTANT &&
2980 I.getOperand(1).getCImm()->isZero());
2981 MachineBasicBlock &DepMBB =
I.getMF()->front();
2984 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
2991 return Reg == ResVReg ?
true : BuildCOPY(ResVReg,
Reg,
I);
2994bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2996 MachineInstr &
I)
const {
2997 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3003bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
3005 MachineInstr &
I)
const {
3007 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
3011 .
addUse(
I.getOperand(3).getReg())
3013 .
addUse(
I.getOperand(2).getReg());
3014 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
3019bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
3021 MachineInstr &
I)
const {
3023 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3026 .
addUse(
I.getOperand(2).getReg());
3027 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
3032bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
3034 MachineInstr &
I)
const {
3036 return selectInsertVal(ResVReg, ResType,
I);
3038 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
3041 .
addUse(
I.getOperand(2).getReg())
3042 .
addUse(
I.getOperand(3).getReg())
3043 .
addUse(
I.getOperand(4).getReg())
3047bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
3049 MachineInstr &
I)
const {
3051 return selectExtractVal(ResVReg, ResType,
I);
3053 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
3056 .
addUse(
I.getOperand(2).getReg())
3057 .
addUse(
I.getOperand(3).getReg())
3061bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
3063 MachineInstr &
I)
const {
3064 const bool IsGEPInBounds =
I.getOperand(2).getImm();
3070 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
3071 : SPIRV::OpAccessChain)
3072 : (IsGEPInBounds ?
SPIRV::OpInBoundsPtrAccessChain
3073 :
SPIRV::OpPtrAccessChain);
3075 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3079 .
addUse(
I.getOperand(3).getReg());
3081 const unsigned StartingIndex =
3082 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
3085 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
3086 Res.addUse(
I.getOperand(i).getReg());
3087 return Res.constrainAllUses(
TII,
TRI, RBI);
3091bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
3094 unsigned Lim =
I.getNumExplicitOperands();
3095 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
3096 Register OpReg =
I.getOperand(i).getReg();
3097 MachineInstr *OpDefine =
MRI->getVRegDef(OpReg);
3099 SmallPtrSet<SPIRVType *, 4> Visited;
3100 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
3101 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
3108 MachineFunction *MF =
I.getMF();
3120 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3121 TII.get(SPIRV::OpSpecConstantOp))
3124 .
addImm(
static_cast<uint32_t
>(SPIRV::Opcode::Bitcast))
3126 GR.
add(OpDefine, MIB);
3134bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
3136 MachineInstr &
I)
const {
3140 case Intrinsic::spv_load:
3141 return selectLoad(ResVReg, ResType,
I);
3142 case Intrinsic::spv_store:
3143 return selectStore(
I);
3144 case Intrinsic::spv_extractv:
3145 return selectExtractVal(ResVReg, ResType,
I);
3146 case Intrinsic::spv_insertv:
3147 return selectInsertVal(ResVReg, ResType,
I);
3148 case Intrinsic::spv_extractelt:
3149 return selectExtractElt(ResVReg, ResType,
I);
3150 case Intrinsic::spv_insertelt:
3151 return selectInsertElt(ResVReg, ResType,
I);
3152 case Intrinsic::spv_gep:
3153 return selectGEP(ResVReg, ResType,
I);
3154 case Intrinsic::spv_bitcast: {
3155 Register OpReg =
I.getOperand(2).getReg();
3160 return selectOpWithSrcs(ResVReg, ResType,
I, {OpReg}, SPIRV::OpBitcast);
3162 case Intrinsic::spv_unref_global:
3163 case Intrinsic::spv_init_global: {
3164 MachineInstr *
MI =
MRI->getVRegDef(
I.getOperand(1).getReg());
3165 MachineInstr *Init =
I.getNumExplicitOperands() > 2
3166 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
3169 Register GVarVReg =
MI->getOperand(0).getReg();
3170 bool Res = selectGlobalValue(GVarVReg, *
MI, Init);
3174 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
3176 MI->removeFromParent();
3180 case Intrinsic::spv_undef: {
3181 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3186 case Intrinsic::spv_const_composite: {
3188 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
3194 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
3196 MachineIRBuilder MIR(
I);
3198 MIR, SPIRV::OpConstantComposite, 3,
3199 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3201 for (
auto *Instr : Instructions) {
3202 Instr->setDebugLoc(
I.getDebugLoc());
3208 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3214 case Intrinsic::spv_assign_name: {
3215 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3216 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3217 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3218 i <
I.getNumExplicitOperands(); ++i) {
3219 MIB.
addImm(
I.getOperand(i).getImm());
3223 case Intrinsic::spv_switch: {
3224 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3225 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3226 if (
I.getOperand(i).isReg())
3227 MIB.
addReg(
I.getOperand(i).getReg());
3228 else if (
I.getOperand(i).isCImm())
3229 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3230 else if (
I.getOperand(i).isMBB())
3231 MIB.
addMBB(
I.getOperand(i).getMBB());
3237 case Intrinsic::spv_loop_merge: {
3238 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3239 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3240 if (
I.getOperand(i).isMBB())
3241 MIB.
addMBB(
I.getOperand(i).getMBB());
3247 case Intrinsic::spv_selection_merge: {
3249 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
3250 assert(
I.getOperand(1).isMBB() &&
3251 "operand 1 to spv_selection_merge must be a basic block");
3252 MIB.
addMBB(
I.getOperand(1).getMBB());
3253 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
3256 case Intrinsic::spv_cmpxchg:
3257 return selectAtomicCmpXchg(ResVReg, ResType,
I);
3258 case Intrinsic::spv_unreachable:
3259 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
3261 case Intrinsic::spv_alloca:
3262 return selectFrameIndex(ResVReg, ResType,
I);
3263 case Intrinsic::spv_alloca_array:
3264 return selectAllocaArray(ResVReg, ResType,
I);
3265 case Intrinsic::spv_assume:
3267 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
3268 .
addUse(
I.getOperand(1).getReg())
3271 case Intrinsic::spv_expect:
3273 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
3276 .
addUse(
I.getOperand(2).getReg())
3277 .
addUse(
I.getOperand(3).getReg())
3280 case Intrinsic::arithmetic_fence:
3283 TII.get(SPIRV::OpArithmeticFenceEXT))
3286 .
addUse(
I.getOperand(2).getReg())
3289 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3291 case Intrinsic::spv_thread_id:
3297 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3299 case Intrinsic::spv_thread_id_in_group:
3305 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3307 case Intrinsic::spv_group_id:
3313 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3315 case Intrinsic::spv_flattened_thread_id_in_group:
3322 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3324 case Intrinsic::spv_workgroup_size:
3325 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3327 case Intrinsic::spv_global_size:
3328 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3330 case Intrinsic::spv_global_offset:
3331 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3333 case Intrinsic::spv_num_workgroups:
3334 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3336 case Intrinsic::spv_subgroup_size:
3337 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3339 case Intrinsic::spv_num_subgroups:
3340 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
3342 case Intrinsic::spv_subgroup_id:
3343 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
3344 case Intrinsic::spv_subgroup_local_invocation_id:
3345 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
3346 ResVReg, ResType,
I);
3347 case Intrinsic::spv_subgroup_max_size:
3348 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
3350 case Intrinsic::spv_fdot:
3351 return selectFloatDot(ResVReg, ResType,
I);
3352 case Intrinsic::spv_udot:
3353 case Intrinsic::spv_sdot:
3354 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3356 return selectIntegerDot(ResVReg, ResType,
I,
3357 IID == Intrinsic::spv_sdot);
3358 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3359 case Intrinsic::spv_dot4add_i8packed:
3360 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3362 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3363 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3364 case Intrinsic::spv_dot4add_u8packed:
3365 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3367 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3368 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3369 case Intrinsic::spv_all:
3370 return selectAll(ResVReg, ResType,
I);
3371 case Intrinsic::spv_any:
3372 return selectAny(ResVReg, ResType,
I);
3373 case Intrinsic::spv_cross:
3374 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3375 case Intrinsic::spv_distance:
3376 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3377 case Intrinsic::spv_lerp:
3378 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3379 case Intrinsic::spv_length:
3380 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3381 case Intrinsic::spv_degrees:
3382 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3383 case Intrinsic::spv_faceforward:
3384 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
3385 case Intrinsic::spv_frac:
3386 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3387 case Intrinsic::spv_isinf:
3388 return selectOpIsInf(ResVReg, ResType,
I);
3389 case Intrinsic::spv_isnan:
3390 return selectOpIsNan(ResVReg, ResType,
I);
3391 case Intrinsic::spv_normalize:
3392 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3393 case Intrinsic::spv_refract:
3394 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
3395 case Intrinsic::spv_reflect:
3396 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
3397 case Intrinsic::spv_rsqrt:
3398 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3399 case Intrinsic::spv_sign:
3400 return selectSign(ResVReg, ResType,
I);
3401 case Intrinsic::spv_smoothstep:
3402 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
3403 case Intrinsic::spv_firstbituhigh:
3404 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3405 case Intrinsic::spv_firstbitshigh:
3406 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3407 case Intrinsic::spv_firstbitlow:
3408 return selectFirstBitLow(ResVReg, ResType,
I);
3409 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3411 auto MemSemConstant =
3412 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3413 Register MemSemReg = MemSemConstant.first;
3414 Result &= MemSemConstant.second;
3415 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3416 Register ScopeReg = ScopeConstant.first;
3417 Result &= ScopeConstant.second;
3420 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3426 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
3427 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
3428 SPIRV::StorageClass::StorageClass ResSC =
3432 "Generic storage class");
3434 TII.get(SPIRV::OpGenericCastToPtrExplicit))
3441 case Intrinsic::spv_lifetime_start:
3442 case Intrinsic::spv_lifetime_end: {
3443 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3444 : SPIRV::OpLifetimeStop;
3445 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3446 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3454 case Intrinsic::spv_saturate:
3455 return selectSaturate(ResVReg, ResType,
I);
3456 case Intrinsic::spv_nclamp:
3457 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3458 case Intrinsic::spv_uclamp:
3459 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3460 case Intrinsic::spv_sclamp:
3461 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3462 case Intrinsic::spv_wave_active_countbits:
3463 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3464 case Intrinsic::spv_wave_all:
3465 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3466 case Intrinsic::spv_wave_any:
3467 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3468 case Intrinsic::spv_wave_is_first_lane:
3469 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3470 case Intrinsic::spv_wave_reduce_umax:
3471 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
3472 case Intrinsic::spv_wave_reduce_max:
3473 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
3474 case Intrinsic::spv_wave_reduce_umin:
3475 return selectWaveReduceMin(ResVReg, ResType,
I,
true);
3476 case Intrinsic::spv_wave_reduce_min:
3477 return selectWaveReduceMin(ResVReg, ResType,
I,
false);
3478 case Intrinsic::spv_wave_reduce_sum:
3479 return selectWaveReduceSum(ResVReg, ResType,
I);
3480 case Intrinsic::spv_wave_readlane:
3481 return selectWaveOpInst(ResVReg, ResType,
I,
3482 SPIRV::OpGroupNonUniformShuffle);
3483 case Intrinsic::spv_step:
3484 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3485 case Intrinsic::spv_radians:
3486 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3490 case Intrinsic::instrprof_increment:
3491 case Intrinsic::instrprof_increment_step:
3492 case Intrinsic::instrprof_value_profile:
3495 case Intrinsic::spv_value_md:
3497 case Intrinsic::spv_resource_handlefrombinding: {
3498 return selectHandleFromBinding(ResVReg, ResType,
I);
3500 case Intrinsic::spv_resource_counterhandlefrombinding:
3501 return selectCounterHandleFromBinding(ResVReg, ResType,
I);
3502 case Intrinsic::spv_resource_updatecounter:
3503 return selectUpdateCounter(ResVReg, ResType,
I);
3504 case Intrinsic::spv_resource_store_typedbuffer: {
3505 return selectImageWriteIntrinsic(
I);
3507 case Intrinsic::spv_resource_load_typedbuffer: {
3508 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3510 case Intrinsic::spv_resource_getpointer: {
3511 return selectResourceGetPointer(ResVReg, ResType,
I);
3513 case Intrinsic::spv_discard: {
3514 return selectDiscard(ResVReg, ResType,
I);
3516 case Intrinsic::spv_resource_nonuniformindex: {
3517 return selectResourceNonUniformIndex(ResVReg, ResType,
I);
3519 case Intrinsic::spv_unpackhalf2x16: {
3520 return selectExtInst(ResVReg, ResType,
I, GL::UnpackHalf2x16);
3524 std::string DiagMsg;
3525 raw_string_ostream OS(DiagMsg);
3527 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3534bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3536 MachineInstr &
I)
const {
3539 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
3546bool SPIRVInstructionSelector::selectCounterHandleFromBinding(
3549 assert(Intr.getIntrinsicID() ==
3550 Intrinsic::spv_resource_counterhandlefrombinding);
3553 Register MainHandleReg = Intr.getOperand(2).getReg();
3555 assert(MainHandleDef->getIntrinsicID() ==
3556 Intrinsic::spv_resource_handlefrombinding);
3560 uint32_t ArraySize =
getIConstVal(MainHandleDef->getOperand(4).getReg(),
MRI);
3561 Register IndexReg = MainHandleDef->getOperand(5).getReg();
3562 std::string CounterName =
3567 MachineIRBuilder MIRBuilder(
I);
3568 Register CounterVarReg = buildPointerToResource(
3570 Binding, ArraySize, IndexReg, CounterName, MIRBuilder);
3572 return BuildCOPY(ResVReg, CounterVarReg,
I);
3575bool SPIRVInstructionSelector::selectUpdateCounter(
Register &ResVReg,
3577 MachineInstr &
I)
const {
3579 assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter);
3581 Register CounterHandleReg = Intr.getOperand(2).getReg();
3582 Register IncrReg = Intr.getOperand(3).getReg();
3590 assert(CounterVarPointeeType &&
3591 CounterVarPointeeType->
getOpcode() == SPIRV::OpTypeStruct &&
3592 "Counter variable must be a struct");
3594 SPIRV::StorageClass::StorageBuffer &&
3595 "Counter variable must be in the storage buffer storage class");
3597 "Counter variable must have exactly 1 member in the struct");
3601 "Counter variable struct must have a single i32 member");
3605 MachineIRBuilder MIRBuilder(
I);
3607 Type::getInt32Ty(
I.getMF()->getFunction().getContext());
3610 LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
3612 auto Zero = buildI32Constant(0,
I);
3618 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3619 TII.get(SPIRV::OpAccessChain))
3622 .
addUse(CounterHandleReg)
3630 auto Scope = buildI32Constant(SPIRV::Scope::Device,
I);
3633 auto Semantics = buildI32Constant(SPIRV::MemorySemantics::None,
I);
3634 if (!Semantics.second)
3638 auto Incr = buildI32Constant(
static_cast<uint32_t
>(IncrVal),
I);
3643 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAtomicIAdd))
3654 return BuildCOPY(ResVReg, AtomicRes,
I);
3662 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
3669bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3678 Register ImageReg =
I.getOperand(2).getReg();
3680 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3686 Register IdxReg =
I.getOperand(3).getReg();
3688 MachineInstr &Pos =
I;
3690 return generateImageReadOrFetch(ResVReg, ResType, NewImageReg, IdxReg, Loc,
3694bool SPIRVInstructionSelector::generateImageReadOrFetch(
3699 "ImageReg is not an image type.");
3701 bool IsSignedInteger =
3706 bool IsFetch = (SampledOp.getImm() == 1);
3709 if (ResultSize == 4) {
3712 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
3718 if (IsSignedInteger)
3723 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
3727 TII.get(IsFetch ? SPIRV::OpImageFetch : SPIRV::OpImageRead))
3732 if (IsSignedInteger)
3738 if (ResultSize == 1) {
3740 TII.get(SPIRV::OpCompositeExtract))
3747 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
3750bool SPIRVInstructionSelector::selectResourceGetPointer(
3752 Register ResourcePtr =
I.getOperand(2).getReg();
3754 if (RegType->
getOpcode() == SPIRV::OpTypeImage) {
3763 MachineIRBuilder MIRBuilder(
I);
3765 Register IndexReg =
I.getOperand(3).getReg();
3768 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3769 TII.get(SPIRV::OpAccessChain))
3778bool SPIRVInstructionSelector::selectResourceNonUniformIndex(
3780 Register ObjReg =
I.getOperand(2).getReg();
3781 if (!BuildCOPY(ResVReg, ObjReg,
I))
3791 decorateUsesAsNonUniform(ResVReg);
3795void SPIRVInstructionSelector::decorateUsesAsNonUniform(
3798 while (WorkList.
size() > 0) {
3802 bool IsDecorated =
false;
3803 for (MachineInstr &Use :
MRI->use_instructions(CurrentReg)) {
3804 if (
Use.getOpcode() == SPIRV::OpDecorate &&
3805 Use.getOperand(1).getImm() == SPIRV::Decoration::NonUniformEXT) {
3811 if (
Use.getOperand(0).isReg() &&
Use.getOperand(0).isDef()) {
3813 if (ResultReg == CurrentReg)
3821 SPIRV::Decoration::NonUniformEXT, {});
3826bool SPIRVInstructionSelector::extractSubvector(
3828 MachineInstr &InsertionPoint)
const {
3830 [[maybe_unused]] uint64_t InputSize =
3833 assert(InputSize > 1 &&
"The input must be a vector.");
3834 assert(ResultSize > 1 &&
"The result must be a vector.");
3835 assert(ResultSize < InputSize &&
3836 "Cannot extract more element than there are in the input.");
3839 const TargetRegisterClass *ScalarRegClass = GR.
getRegClass(ScalarType);
3840 for (uint64_t
I = 0;
I < ResultSize;
I++) {
3841 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3844 TII.get(SPIRV::OpCompositeExtract))
3855 MachineInstrBuilder MIB =
BuildMI(*InsertionPoint.
getParent(), InsertionPoint,
3857 TII.get(SPIRV::OpCompositeConstruct))
3861 for (
Register ComponentReg : ComponentRegisters)
3862 MIB.
addUse(ComponentReg);
3866bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3867 MachineInstr &
I)
const {
3874 Register ImageReg =
I.getOperand(1).getReg();
3876 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3882 Register CoordinateReg =
I.getOperand(2).getReg();
3883 Register DataReg =
I.getOperand(3).getReg();
3886 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3887 TII.get(SPIRV::OpImageWrite))
3894Register SPIRVInstructionSelector::buildPointerToResource(
3895 const SPIRVType *SpirvResType, SPIRV::StorageClass::StorageClass SC,
3896 uint32_t Set, uint32_t
Binding, uint32_t ArraySize,
Register IndexReg,
3897 StringRef Name, MachineIRBuilder MIRBuilder)
const {
3899 if (ArraySize == 1) {
3903 "SpirvResType did not have an explicit layout.");
3908 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
3912 VarPointerType, Set,
Binding, Name, MIRBuilder);
3927bool SPIRVInstructionSelector::selectFirstBitSet16(
3929 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3931 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).getReg()},
3935 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3938bool SPIRVInstructionSelector::selectFirstBitSet32(
3940 Register SrcReg,
unsigned BitSetOpcode)
const {
3941 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3944 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
3950bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3952 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3959 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3961 MachineIRBuilder MIRBuilder(
I);
3969 std::vector<Register> PartialRegs;
3972 unsigned CurrentComponent = 0;
3973 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3979 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3980 TII.get(SPIRV::OpVectorShuffle))
3985 .
addImm(CurrentComponent)
3986 .
addImm(CurrentComponent + 1);
3994 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3995 BitSetOpcode, SwapPrimarySide))
3998 PartialRegs.push_back(SubVecBitSetReg);
4002 if (CurrentComponent != ComponentCount) {
4008 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
4009 SPIRV::OpVectorExtractDynamic))
4015 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
4016 BitSetOpcode, SwapPrimarySide))
4019 PartialRegs.push_back(FinalElemBitSetReg);
4024 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
4025 SPIRV::OpCompositeConstruct);
4028bool SPIRVInstructionSelector::selectFirstBitSet64(
4030 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
4043 if (ComponentCount > 2) {
4044 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
4045 BitSetOpcode, SwapPrimarySide);
4049 MachineIRBuilder MIRBuilder(
I);
4051 BaseType, 2 * ComponentCount, MIRBuilder,
false);
4055 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
4061 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
4068 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
4071 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
4072 SPIRV::OpVectorExtractDynamic))
4074 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
4075 SPIRV::OpVectorExtractDynamic))
4079 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4080 TII.get(SPIRV::OpVectorShuffle))
4088 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
4095 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4096 TII.get(SPIRV::OpVectorShuffle))
4104 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
4125 SelectOp = SPIRV::OpSelectSISCond;
4126 AddOp = SPIRV::OpIAddS;
4134 SelectOp = SPIRV::OpSelectVIVCond;
4135 AddOp = SPIRV::OpIAddV;
4145 if (SwapPrimarySide) {
4146 PrimaryReg = LowReg;
4147 SecondaryReg = HighReg;
4148 PrimaryShiftReg = Reg0;
4149 SecondaryShiftReg = Reg32;
4154 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
4160 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
4166 if (!selectOpWithSrcs(ValReg, ResType,
I,
4167 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
4170 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
4173bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
4176 bool IsSigned)
const {
4178 Register OpReg =
I.getOperand(2).getReg();
4181 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
4182 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
4186 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4188 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4190 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4194 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
4198bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
4200 MachineInstr &
I)
const {
4202 Register OpReg =
I.getOperand(2).getReg();
4207 unsigned ExtendOpcode = SPIRV::OpUConvert;
4208 unsigned BitSetOpcode = GL::FindILsb;
4212 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4214 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4216 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4223bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
4225 MachineInstr &
I)
const {
4229 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
4230 TII.get(SPIRV::OpVariableLengthArrayINTEL))
4233 .
addUse(
I.getOperand(2).getReg())
4236 unsigned Alignment =
I.getOperand(3).getImm();
4242bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
4244 MachineInstr &
I)
const {
4248 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
4249 TII.get(SPIRV::OpVariable))
4252 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
4255 unsigned Alignment =
I.getOperand(2).getImm();
4262bool SPIRVInstructionSelector::selectBranch(MachineInstr &
I)
const {
4267 const MachineInstr *PrevI =
I.getPrevNode();
4269 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
4270 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4273 .
addMBB(
I.getOperand(0).getMBB())
4277 .
addMBB(
I.getOperand(0).getMBB())
4281bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &
I)
const {
4292 const MachineInstr *NextI =
I.getNextNode();
4294 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
4300 MachineBasicBlock *NextMBB =
I.getMF()->getBlockNumbered(NextMBBNum);
4301 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4302 .
addUse(
I.getOperand(0).getReg())
4303 .
addMBB(
I.getOperand(1).getMBB())
4308bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
4310 MachineInstr &
I)
const {
4311 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
4314 const unsigned NumOps =
I.getNumOperands();
4315 for (
unsigned i = 1; i <
NumOps; i += 2) {
4316 MIB.
addUse(
I.getOperand(i + 0).getReg());
4317 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
4325bool SPIRVInstructionSelector::selectGlobalValue(
4326 Register ResVReg, MachineInstr &
I,
const MachineInstr *Init)
const {
4328 MachineIRBuilder MIRBuilder(
I);
4329 const GlobalValue *GV =
I.
getOperand(1).getGlobal();
4332 std::string GlobalIdent;
4334 unsigned &
ID = UnnamedGlobalIDs[GV];
4336 ID = UnnamedGlobalIDs.size();
4337 GlobalIdent =
"__unnamed_" + Twine(
ID).str();
4364 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
4371 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4374 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
4375 MachineInstrBuilder MIB1 =
4376 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
4379 MachineInstrBuilder MIB2 =
4381 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
4385 GR.
add(ConstVal, MIB2);
4391 MachineInstrBuilder MIB3 =
4392 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
4395 GR.
add(ConstVal, MIB3);
4398 assert(NewReg != ResVReg);
4399 return BuildCOPY(ResVReg, NewReg,
I);
4409 const std::optional<SPIRV::LinkageType::LinkageType> LnkType =
4418 GlobalVar->isConstant(), LnkType, MIRBuilder,
true);
4422bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
4424 MachineInstr &
I)
const {
4426 return selectExtInst(ResVReg, ResType,
I, CL::log10);
4434 MachineIRBuilder MIRBuilder(
I);
4440 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4443 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
4445 .
add(
I.getOperand(1))
4450 ResType->
getOpcode() == SPIRV::OpTypeFloat);
4453 ResType->
getOpcode() == SPIRV::OpTypeVector
4460 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
4461 ? SPIRV::OpVectorTimesScalar
4471bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
4473 MachineInstr &
I)
const {
4489 MachineIRBuilder MIRBuilder(
I);
4492 ResType, MIRBuilder, SPIRV::StorageClass::Function);
4504 MachineBasicBlock &EntryBB =
I.getMF()->front();
4508 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
4511 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function));
4517 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4520 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
4523 .
add(
I.getOperand(
I.getNumExplicitDefs()))
4527 Register IntegralPartReg =
I.getOperand(1).getReg();
4528 if (IntegralPartReg.
isValid()) {
4530 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4539 assert(
false &&
"GLSL::Modf is deprecated.");
4550bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
4551 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4552 const SPIRVType *ResType, MachineInstr &
I)
const {
4553 MachineIRBuilder MIRBuilder(
I);
4557 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
4569 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
4573 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4574 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
4580 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4587 assert(
I.getOperand(2).isReg());
4588 const uint32_t ThreadId =
foldImm(
I.getOperand(2),
MRI);
4592 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4602bool SPIRVInstructionSelector::loadBuiltinInputID(
4603 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4604 const SPIRVType *ResType, MachineInstr &
I)
const {
4605 MachineIRBuilder MIRBuilder(
I);
4607 ResType, MIRBuilder, SPIRV::StorageClass::Input);
4622 SPIRV::StorageClass::Input,
nullptr,
true, std::nullopt, MIRBuilder,
4626 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4635 MachineInstr &
I)
const {
4636 MachineIRBuilder MIRBuilder(
I);
4637 if (
Type->getOpcode() != SPIRV::OpTypeVector)
4641 if (VectorSize == 4)
4649bool SPIRVInstructionSelector::loadHandleBeforePosition(
4651 MachineInstr &Pos)
const {
4654 Intrinsic::spv_resource_handlefrombinding);
4662 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
4663 MachineIRBuilder MIRBuilder(HandleDef);
4665 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
4667 if (IsStructuredBuffer) {
4672 Register VarReg = buildPointerToResource(VarType, SC, Set,
Binding, ArraySize,
4673 IndexReg, Name, MIRBuilder);
4677 uint32_t LoadOpcode =
4678 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
4681 TII.get(LoadOpcode))
4689InstructionSelector *
4693 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const fltSemantics & IEEEsingle()
static const fltSemantics & IEEEdouble()
static const fltSemantics & IEEEhalf()
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getResultType(Register VReg, MachineFunction *MF=nullptr)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, const std::optional< SPIRV::LinkageType::LinkageType > &LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
SPIRVType * changePointerStorageClass(SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
bool isBitcastCompatible(const SPIRVType *Type1, const SPIRVType *Type2) const
unsigned getScalarOrVectorComponentCount(Register VReg) const
bool isScalarOrVectorSigned(const SPIRVType *Type) const
Register getOrCreateGlobalVariableWithBinding(const SPIRVType *VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
void invalidateMachineInstr(MachineInstr *MI)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVType * getScalarOrVectorComponentType(Register VReg) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
bool isAggregateType(SPIRVType *Type) const
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Type * getDeducedGlobalValueType(const GlobalValue *Global)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
const SPIRVType * retrieveScalarOrVectorIntType(const SPIRVType *Type) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVM_C_ABI LLVMTypeRef LLVMIntType(unsigned NumBits)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
NodeAddr< UseNode * > Use
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const MachineInstr SPIRVType
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
std::optional< SPIRV::LinkageType::LinkageType > getSpirvLinkageTypeFor(const SPIRVSubtarget &ST, const GlobalValue &GV)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...