32#include "llvm/IR/IntrinsicsSPIRV.h"
36#define DEBUG_TYPE "spirv-isel"
43 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
47llvm::SPIRV::SelectionControl::SelectionControl
48getSelectionOperandForImm(
int Imm) {
50 return SPIRV::SelectionControl::Flatten;
52 return SPIRV::SelectionControl::DontFlatten;
54 return SPIRV::SelectionControl::None;
58#define GET_GLOBALISEL_PREDICATE_BITSET
59#include "SPIRVGenGlobalISel.inc"
60#undef GET_GLOBALISEL_PREDICATE_BITSET
87#define GET_GLOBALISEL_PREDICATES_DECL
88#include "SPIRVGenGlobalISel.inc"
89#undef GET_GLOBALISEL_PREDICATES_DECL
91#define GET_GLOBALISEL_TEMPORARIES_DECL
92#include "SPIRVGenGlobalISel.inc"
93#undef GET_GLOBALISEL_TEMPORARIES_DECL
115 unsigned BitSetOpcode)
const;
119 unsigned BitSetOpcode)
const;
123 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
127 unsigned BitSetOpcode,
128 bool SwapPrimarySide)
const;
135 unsigned Opcode)
const;
138 unsigned Opcode)
const;
155 unsigned NegateOpcode = 0)
const;
215 template <
bool Signed>
218 template <
bool Signed>
236 bool IsSigned,
unsigned Opcode)
const;
238 bool IsSigned)
const;
244 bool IsSigned)
const;
283 GL::GLSLExtInst GLInst)
const;
288 GL::GLSLExtInst GLInst)
const;
310 bool selectCounterHandleFromBinding(
Register &ResVReg,
319 bool selectResourceNonUniformIndex(
Register &ResVReg,
329 std::pair<Register, bool>
331 const SPIRVType *ResType =
nullptr)
const;
343 SPIRV::StorageClass::StorageClass SC)
const;
350 SPIRV::StorageClass::StorageClass SC,
362 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
365 bool loadBuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
370 void decorateUsesAsNonUniform(
Register &NonUniformReg)
const;
373bool sampledTypeIsSignedInteger(
const llvm::Type *HandleType) {
375 if (
TET->getTargetExtName() ==
"spirv.Image") {
378 assert(
TET->getTargetExtName() ==
"spirv.SignedImage");
379 return TET->getTypeParameter(0)->isIntegerTy();
383#define GET_GLOBALISEL_IMPL
384#include "SPIRVGenGlobalISel.inc"
385#undef GET_GLOBALISEL_IMPL
391 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
394#include
"SPIRVGenGlobalISel.inc"
397#include
"SPIRVGenGlobalISel.inc"
409 InstructionSelector::setupMF(MF, VT, CoverageInfo, PSI, BFI);
413void SPIRVInstructionSelector::resetVRegsType(MachineFunction &MF) {
414 if (HasVRegsReset == &MF)
419 for (
unsigned I = 0,
E =
MRI.getNumVirtRegs();
I !=
E; ++
I) {
421 LLT RegType =
MRI.getType(
Reg);
429 for (
const auto &
MBB : MF) {
430 for (
const auto &
MI :
MBB) {
433 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
437 LLT DstType =
MRI.getType(DstReg);
439 LLT SrcType =
MRI.getType(SrcReg);
440 if (DstType != SrcType)
441 MRI.setType(DstReg,
MRI.getType(SrcReg));
443 const TargetRegisterClass *DstRC =
MRI.getRegClassOrNull(DstReg);
444 const TargetRegisterClass *SrcRC =
MRI.getRegClassOrNull(SrcReg);
445 if (DstRC != SrcRC && SrcRC)
446 MRI.setRegClass(DstReg, SrcRC);
462 case TargetOpcode::G_CONSTANT:
463 case TargetOpcode::G_FCONSTANT:
465 case TargetOpcode::G_INTRINSIC:
466 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
467 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
469 Intrinsic::spv_const_composite;
470 case TargetOpcode::G_BUILD_VECTOR:
471 case TargetOpcode::G_SPLAT_VECTOR: {
482 case SPIRV::OpConstantTrue:
483 case SPIRV::OpConstantFalse:
484 case SPIRV::OpConstantI:
485 case SPIRV::OpConstantF:
486 case SPIRV::OpConstantComposite:
487 case SPIRV::OpConstantCompositeContinuedINTEL:
488 case SPIRV::OpConstantSampler:
489 case SPIRV::OpConstantNull:
491 case SPIRV::OpConstantFunctionPointerINTEL:
507 for (
const auto &MO :
MI.all_defs()) {
509 if (
Reg.isPhysical() || !
MRI.use_nodbg_empty(
Reg))
512 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
513 MI.isLifetimeMarker())
517 if (
MI.mayStore() ||
MI.isCall() ||
518 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
519 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
524bool SPIRVInstructionSelector::select(MachineInstr &
I) {
525 resetVRegsType(*
I.getParent()->getParent());
527 assert(
I.getParent() &&
"Instruction should be in a basic block!");
528 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
533 if (Opcode == SPIRV::ASSIGN_TYPE) {
534 Register DstReg =
I.getOperand(0).getReg();
535 Register SrcReg =
I.getOperand(1).getReg();
536 auto *
Def =
MRI->getVRegDef(SrcReg);
538 Def->getOpcode() != TargetOpcode::G_CONSTANT &&
539 Def->getOpcode() != TargetOpcode::G_FCONSTANT) {
541 if (
Def->getOpcode() == TargetOpcode::G_SELECT) {
542 Register SelectDstReg =
Def->getOperand(0).getReg();
546 Def->removeFromParent();
547 MRI->replaceRegWith(DstReg, SelectDstReg);
549 I.removeFromParent();
551 Res = selectImpl(
I, *CoverageInfo);
553 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
554 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
558 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
565 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
566 MRI->replaceRegWith(SrcReg, DstReg);
568 I.removeFromParent();
570 }
else if (
I.getNumDefs() == 1) {
577 if (DeadMIs.contains(&
I)) {
587 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
588 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
594 bool HasDefs =
I.getNumDefs() > 0;
597 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
598 I.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
599 if (spvSelect(ResVReg, ResType,
I)) {
601 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
604 I.removeFromParent();
612 case TargetOpcode::G_CONSTANT:
613 case TargetOpcode::G_FCONSTANT:
615 case TargetOpcode::G_SADDO:
616 case TargetOpcode::G_SSUBO:
623 MachineInstr &
I)
const {
624 const TargetRegisterClass *DstRC =
MRI->getRegClassOrNull(DestReg);
625 const TargetRegisterClass *SrcRC =
MRI->getRegClassOrNull(SrcReg);
626 if (DstRC != SrcRC && SrcRC)
627 MRI->setRegClass(DestReg, SrcRC);
628 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
629 TII.get(TargetOpcode::COPY))
635bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
637 MachineInstr &
I)
const {
638 const unsigned Opcode =
I.getOpcode();
640 return selectImpl(
I, *CoverageInfo);
642 case TargetOpcode::G_CONSTANT:
643 case TargetOpcode::G_FCONSTANT:
644 return selectConst(ResVReg, ResType,
I);
645 case TargetOpcode::G_GLOBAL_VALUE:
646 return selectGlobalValue(ResVReg,
I);
647 case TargetOpcode::G_IMPLICIT_DEF:
648 return selectOpUndef(ResVReg, ResType,
I);
649 case TargetOpcode::G_FREEZE:
650 return selectFreeze(ResVReg, ResType,
I);
652 case TargetOpcode::G_INTRINSIC:
653 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
654 case TargetOpcode::G_INTRINSIC_CONVERGENT:
655 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
656 return selectIntrinsic(ResVReg, ResType,
I);
657 case TargetOpcode::G_BITREVERSE:
658 return selectBitreverse(ResVReg, ResType,
I);
660 case TargetOpcode::G_BUILD_VECTOR:
661 return selectBuildVector(ResVReg, ResType,
I);
662 case TargetOpcode::G_SPLAT_VECTOR:
663 return selectSplatVector(ResVReg, ResType,
I);
665 case TargetOpcode::G_SHUFFLE_VECTOR: {
666 MachineBasicBlock &BB = *
I.getParent();
667 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
670 .
addUse(
I.getOperand(1).getReg())
671 .
addUse(
I.getOperand(2).getReg());
672 for (
auto V :
I.getOperand(3).getShuffleMask())
676 case TargetOpcode::G_MEMMOVE:
677 case TargetOpcode::G_MEMCPY:
678 case TargetOpcode::G_MEMSET:
679 return selectMemOperation(ResVReg,
I);
681 case TargetOpcode::G_ICMP:
682 return selectICmp(ResVReg, ResType,
I);
683 case TargetOpcode::G_FCMP:
684 return selectFCmp(ResVReg, ResType,
I);
686 case TargetOpcode::G_FRAME_INDEX:
687 return selectFrameIndex(ResVReg, ResType,
I);
689 case TargetOpcode::G_LOAD:
690 return selectLoad(ResVReg, ResType,
I);
691 case TargetOpcode::G_STORE:
692 return selectStore(
I);
694 case TargetOpcode::G_BR:
695 return selectBranch(
I);
696 case TargetOpcode::G_BRCOND:
697 return selectBranchCond(
I);
699 case TargetOpcode::G_PHI:
700 return selectPhi(ResVReg, ResType,
I);
702 case TargetOpcode::G_FPTOSI:
703 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
704 case TargetOpcode::G_FPTOUI:
705 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
707 case TargetOpcode::G_FPTOSI_SAT:
708 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
709 case TargetOpcode::G_FPTOUI_SAT:
710 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
712 case TargetOpcode::G_SITOFP:
713 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
714 case TargetOpcode::G_UITOFP:
715 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
717 case TargetOpcode::G_CTPOP:
718 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
719 case TargetOpcode::G_SMIN:
720 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
721 case TargetOpcode::G_UMIN:
722 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
724 case TargetOpcode::G_SMAX:
725 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
726 case TargetOpcode::G_UMAX:
727 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
729 case TargetOpcode::G_SCMP:
730 return selectSUCmp(ResVReg, ResType,
I,
true);
731 case TargetOpcode::G_UCMP:
732 return selectSUCmp(ResVReg, ResType,
I,
false);
733 case TargetOpcode::G_LROUND:
734 case TargetOpcode::G_LLROUND: {
736 MRI->createVirtualRegister(
MRI->getRegClass(ResVReg),
"lround");
737 MRI->setRegClass(regForLround, &SPIRV::iIDRegClass);
739 regForLround, *(
I.getParent()->getParent()));
741 I, CL::round, GL::Round);
743 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConvertFToS))
749 case TargetOpcode::G_STRICT_FMA:
750 case TargetOpcode::G_FMA:
751 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
753 case TargetOpcode::G_STRICT_FLDEXP:
754 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
756 case TargetOpcode::G_FPOW:
757 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
758 case TargetOpcode::G_FPOWI:
759 return selectExtInst(ResVReg, ResType,
I, CL::pown);
761 case TargetOpcode::G_FEXP:
762 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
763 case TargetOpcode::G_FEXP2:
764 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
765 case TargetOpcode::G_FMODF:
766 return selectModf(ResVReg, ResType,
I);
768 case TargetOpcode::G_FLOG:
769 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
770 case TargetOpcode::G_FLOG2:
771 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
772 case TargetOpcode::G_FLOG10:
773 return selectLog10(ResVReg, ResType,
I);
775 case TargetOpcode::G_FABS:
776 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
777 case TargetOpcode::G_ABS:
778 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
780 case TargetOpcode::G_FMINNUM:
781 case TargetOpcode::G_FMINIMUM:
782 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
783 case TargetOpcode::G_FMAXNUM:
784 case TargetOpcode::G_FMAXIMUM:
785 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
787 case TargetOpcode::G_FCOPYSIGN:
788 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
790 case TargetOpcode::G_FCEIL:
791 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
792 case TargetOpcode::G_FFLOOR:
793 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
795 case TargetOpcode::G_FCOS:
796 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
797 case TargetOpcode::G_FSIN:
798 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
799 case TargetOpcode::G_FTAN:
800 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
801 case TargetOpcode::G_FACOS:
802 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
803 case TargetOpcode::G_FASIN:
804 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
805 case TargetOpcode::G_FATAN:
806 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
807 case TargetOpcode::G_FATAN2:
808 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
809 case TargetOpcode::G_FCOSH:
810 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
811 case TargetOpcode::G_FSINH:
812 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
813 case TargetOpcode::G_FTANH:
814 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
816 case TargetOpcode::G_STRICT_FSQRT:
817 case TargetOpcode::G_FSQRT:
818 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
820 case TargetOpcode::G_CTTZ:
821 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
822 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
823 case TargetOpcode::G_CTLZ:
824 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
825 return selectExtInst(ResVReg, ResType,
I, CL::clz);
827 case TargetOpcode::G_INTRINSIC_ROUND:
828 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
829 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
830 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
831 case TargetOpcode::G_INTRINSIC_TRUNC:
832 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
833 case TargetOpcode::G_FRINT:
834 case TargetOpcode::G_FNEARBYINT:
835 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
837 case TargetOpcode::G_SMULH:
838 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
839 case TargetOpcode::G_UMULH:
840 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
842 case TargetOpcode::G_SADDSAT:
843 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
844 case TargetOpcode::G_UADDSAT:
845 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
846 case TargetOpcode::G_SSUBSAT:
847 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
848 case TargetOpcode::G_USUBSAT:
849 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
851 case TargetOpcode::G_FFREXP:
852 return selectFrexp(ResVReg, ResType,
I);
854 case TargetOpcode::G_UADDO:
855 return selectOverflowArith(ResVReg, ResType,
I,
856 ResType->
getOpcode() == SPIRV::OpTypeVector
857 ? SPIRV::OpIAddCarryV
858 : SPIRV::OpIAddCarryS);
859 case TargetOpcode::G_USUBO:
860 return selectOverflowArith(ResVReg, ResType,
I,
861 ResType->
getOpcode() == SPIRV::OpTypeVector
862 ? SPIRV::OpISubBorrowV
863 : SPIRV::OpISubBorrowS);
864 case TargetOpcode::G_UMULO:
865 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
866 case TargetOpcode::G_SMULO:
867 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
869 case TargetOpcode::G_SEXT:
870 return selectExt(ResVReg, ResType,
I,
true);
871 case TargetOpcode::G_ANYEXT:
872 case TargetOpcode::G_ZEXT:
873 return selectExt(ResVReg, ResType,
I,
false);
874 case TargetOpcode::G_TRUNC:
875 return selectTrunc(ResVReg, ResType,
I);
876 case TargetOpcode::G_FPTRUNC:
877 case TargetOpcode::G_FPEXT:
878 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
880 case TargetOpcode::G_PTRTOINT:
881 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
882 case TargetOpcode::G_INTTOPTR:
883 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
884 case TargetOpcode::G_BITCAST:
885 return selectBitcast(ResVReg, ResType,
I);
886 case TargetOpcode::G_ADDRSPACE_CAST:
887 return selectAddrSpaceCast(ResVReg, ResType,
I);
888 case TargetOpcode::G_PTR_ADD: {
890 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
894 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
895 (*II).getOpcode() == TargetOpcode::COPY ||
896 (*II).getOpcode() == SPIRV::OpVariable) &&
899 bool IsGVInit =
false;
901 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
902 UseEnd =
MRI->use_instr_end();
903 UseIt != UseEnd; UseIt = std::next(UseIt)) {
904 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
905 (*UseIt).getOpcode() == SPIRV::OpVariable) {
915 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
918 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
919 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
928 "incompatible result and operand types in a bitcast");
930 MachineInstrBuilder MIB =
931 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
938 ? SPIRV::OpInBoundsAccessChain
939 : SPIRV::OpInBoundsPtrAccessChain))
943 .
addUse(
I.getOperand(2).getReg())
946 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
950 static_cast<uint32_t
>(SPIRV::Opcode::InBoundsPtrAccessChain))
952 .
addUse(
I.getOperand(2).getReg())
960 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
963 .
addImm(
static_cast<uint32_t
>(
964 SPIRV::Opcode::InBoundsPtrAccessChain))
967 .
addUse(
I.getOperand(2).getReg());
971 case TargetOpcode::G_ATOMICRMW_OR:
972 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
973 case TargetOpcode::G_ATOMICRMW_ADD:
974 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
975 case TargetOpcode::G_ATOMICRMW_AND:
976 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
977 case TargetOpcode::G_ATOMICRMW_MAX:
978 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
979 case TargetOpcode::G_ATOMICRMW_MIN:
980 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
981 case TargetOpcode::G_ATOMICRMW_SUB:
982 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
983 case TargetOpcode::G_ATOMICRMW_XOR:
984 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
985 case TargetOpcode::G_ATOMICRMW_UMAX:
986 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
987 case TargetOpcode::G_ATOMICRMW_UMIN:
988 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
989 case TargetOpcode::G_ATOMICRMW_XCHG:
990 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
991 case TargetOpcode::G_ATOMIC_CMPXCHG:
992 return selectAtomicCmpXchg(ResVReg, ResType,
I);
994 case TargetOpcode::G_ATOMICRMW_FADD:
995 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
996 case TargetOpcode::G_ATOMICRMW_FSUB:
998 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
1000 case TargetOpcode::G_ATOMICRMW_FMIN:
1001 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
1002 case TargetOpcode::G_ATOMICRMW_FMAX:
1003 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
1005 case TargetOpcode::G_FENCE:
1006 return selectFence(
I);
1008 case TargetOpcode::G_STACKSAVE:
1009 return selectStackSave(ResVReg, ResType,
I);
1010 case TargetOpcode::G_STACKRESTORE:
1011 return selectStackRestore(
I);
1013 case TargetOpcode::G_UNMERGE_VALUES:
1019 case TargetOpcode::G_TRAP:
1020 case TargetOpcode::G_UBSANTRAP:
1021 case TargetOpcode::DBG_LABEL:
1023 case TargetOpcode::G_DEBUGTRAP:
1024 return selectDebugTrap(ResVReg, ResType,
I);
1031bool SPIRVInstructionSelector::selectDebugTrap(
Register ResVReg,
1033 MachineInstr &
I)
const {
1034 unsigned Opcode = SPIRV::OpNop;
1036 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
1040bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1043 GL::GLSLExtInst GLInst)
const {
1045 SPIRV::InstructionSet::InstructionSet::GLSL_std_450)) {
1046 std::string DiagMsg;
1047 raw_string_ostream OS(DiagMsg);
1048 I.print(OS,
true,
false,
false,
false);
1049 DiagMsg +=
" is only supported with the GLSL extended instruction set.\n";
1052 return selectExtInst(ResVReg, ResType,
I,
1053 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
1056bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1059 CL::OpenCLExtInst CLInst)
const {
1060 return selectExtInst(ResVReg, ResType,
I,
1061 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
1064bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1067 CL::OpenCLExtInst CLInst,
1068 GL::GLSLExtInst GLInst)
const {
1069 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1070 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1071 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
1074bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
1079 for (
const auto &Ex : Insts) {
1080 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1081 uint32_t Opcode = Ex.second;
1084 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1087 .
addImm(
static_cast<uint32_t
>(Set))
1090 const unsigned NumOps =
I.getNumOperands();
1093 I.getOperand(Index).getType() ==
1094 MachineOperand::MachineOperandType::MO_IntrinsicID)
1097 MIB.
add(
I.getOperand(Index));
1103bool SPIRVInstructionSelector::selectExtInstForLRound(
1105 CL::OpenCLExtInst CLInst, GL::GLSLExtInst GLInst)
const {
1106 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
1107 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
1108 return selectExtInstForLRound(ResVReg, ResType,
I, ExtInsts);
1111bool SPIRVInstructionSelector::selectExtInstForLRound(
1114 for (
const auto &Ex : Insts) {
1115 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1116 uint32_t Opcode = Ex.second;
1119 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1122 .
addImm(
static_cast<uint32_t
>(Set))
1124 const unsigned NumOps =
I.getNumOperands();
1127 I.getOperand(Index).getType() ==
1128 MachineOperand::MachineOperandType::MO_IntrinsicID)
1131 MIB.
add(
I.getOperand(Index));
1139bool SPIRVInstructionSelector::selectFrexp(
Register ResVReg,
1141 MachineInstr &
I)
const {
1142 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CL::frexp},
1143 {SPIRV::InstructionSet::GLSL_std_450, GL::Frexp}};
1144 for (
const auto &Ex : ExtInsts) {
1145 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
1146 uint32_t Opcode = Ex.second;
1150 MachineIRBuilder MIRBuilder(
I);
1153 PointeeTy, MIRBuilder, SPIRV::StorageClass::Function);
1158 auto MIB =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
1159 TII.get(SPIRV::OpVariable))
1162 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
1166 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1169 .
addImm(
static_cast<uint32_t
>(Ex.first))
1171 .
add(
I.getOperand(2))
1176 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1177 .
addDef(
I.getOperand(1).getReg())
1186bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
1189 std::vector<Register> Srcs,
1190 unsigned Opcode)
const {
1191 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1200bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
1203 unsigned Opcode)
const {
1205 Register SrcReg =
I.getOperand(1).getReg();
1208 MRI->def_instr_begin(SrcReg);
1209 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
1210 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1211 (*DefIt).getOpcode() == SPIRV::OpVariable) {
1217 uint32_t SpecOpcode = 0;
1219 case SPIRV::OpConvertPtrToU:
1220 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertPtrToU);
1222 case SPIRV::OpConvertUToPtr:
1223 SpecOpcode =
static_cast<uint32_t
>(SPIRV::Opcode::ConvertUToPtr);
1227 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1228 TII.get(SPIRV::OpSpecConstantOp))
1236 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).getReg()},
1240bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1242 MachineInstr &
I)
const {
1243 Register OpReg =
I.getOperand(1).getReg();
1247 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1255 if (
MemOp->isVolatile())
1256 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1257 if (
MemOp->isNonTemporal())
1258 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1259 if (
MemOp->getAlign().value())
1260 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1266 if (ST->canUseExtension(SPIRV::Extension::SPV_INTEL_memory_access_aliasing)) {
1267 if (
auto *MD =
MemOp->getAAInfo().Scope) {
1271 static_cast<uint32_t>(SPIRV::MemoryOperand::AliasScopeINTELMask);
1273 if (
auto *MD =
MemOp->getAAInfo().NoAlias) {
1277 static_cast<uint32_t>(SPIRV::MemoryOperand::NoAliasINTELMask);
1281 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1283 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1295 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1297 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1299 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1303bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1305 MachineInstr &
I)
const {
1312 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1313 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1315 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1317 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1319 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1323 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1324 return generateImageRead(ResVReg, ResType, NewHandleReg, IdxReg,
1325 I.getDebugLoc(),
I);
1329 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1333 if (!
I.getNumMemOperands()) {
1334 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1336 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1339 MachineIRBuilder MIRBuilder(
I);
1345bool SPIRVInstructionSelector::selectStore(MachineInstr &
I)
const {
1347 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1353 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1354 Register HandleReg = IntPtrDef->getOperand(2).getReg();
1356 MRI->createVirtualRegister(
MRI->getRegClass(HandleReg));
1359 if (!loadHandleBeforePosition(NewHandleReg, HandleType, *HandleDef,
I)) {
1363 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1364 if (HandleType->
getOpcode() == SPIRV::OpTypeImage) {
1365 auto BMI =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1366 TII.get(SPIRV::OpImageWrite))
1372 if (sampledTypeIsSignedInteger(LLVMHandleType))
1375 return BMI.constrainAllUses(
TII,
TRI, RBI);
1380 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1383 if (!
I.getNumMemOperands()) {
1384 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1386 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1389 MachineIRBuilder MIRBuilder(
I);
1395bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1397 MachineInstr &
I)
const {
1398 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1400 "llvm.stacksave intrinsic: this instruction requires the following "
1401 "SPIR-V extension: SPV_INTEL_variable_length_array",
1404 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1410bool SPIRVInstructionSelector::selectStackRestore(MachineInstr &
I)
const {
1411 if (!STI.
canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1413 "llvm.stackrestore intrinsic: this instruction requires the following "
1414 "SPIR-V extension: SPV_INTEL_variable_length_array",
1416 if (!
I.getOperand(0).isReg())
1419 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1420 .
addUse(
I.getOperand(0).getReg())
1424bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1425 MachineInstr &
I)
const {
1427 Register SrcReg =
I.getOperand(1).getReg();
1429 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1430 MachineIRBuilder MIRBuilder(
I);
1431 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1434 Type *ValTy = Type::getInt8Ty(
I.getMF()->getFunction().getContext());
1435 Type *ArrTy = ArrayType::get(ValTy, Num);
1437 ArrTy, MIRBuilder, SPIRV::StorageClass::UniformConstant);
1440 ArrTy, MIRBuilder, SPIRV::AccessQualifier::None,
false);
1447 GlobalVariable *GV =
new GlobalVariable(*CurFunction.
getParent(), LLVMArrTy,
1452 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1455 .
addImm(SPIRV::StorageClass::UniformConstant)
1464 ValTy,
I, SPIRV::StorageClass::UniformConstant);
1466 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1468 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1469 .
addUse(
I.getOperand(0).getReg())
1471 .
addUse(
I.getOperand(2).getReg());
1472 if (
I.getNumMemOperands()) {
1473 MachineIRBuilder MIRBuilder(
I);
1482bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1486 unsigned NegateOpcode)
const {
1489 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1492 auto ScopeConstant = buildI32Constant(Scope,
I);
1493 Register ScopeReg = ScopeConstant.first;
1494 Result &= ScopeConstant.second;
1502 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1503 Register MemSemReg = MemSemConstant.first;
1504 Result &= MemSemConstant.second;
1506 Register ValueReg =
I.getOperand(2).getReg();
1507 if (NegateOpcode != 0) {
1510 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1515 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1525bool SPIRVInstructionSelector::selectUnmergeValues(MachineInstr &
I)
const {
1526 unsigned ArgI =
I.getNumOperands() - 1;
1528 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1531 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1533 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1539 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1540 Register ResVReg =
I.getOperand(i).getReg();
1544 ResType = ScalarType;
1550 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1554 .
addImm(
static_cast<int64_t
>(i));
1560bool SPIRVInstructionSelector::selectFence(MachineInstr &
I)
const {
1563 auto MemSemConstant = buildI32Constant(MemSem,
I);
1564 Register MemSemReg = MemSemConstant.first;
1565 bool Result = MemSemConstant.second;
1567 uint32_t
Scope =
static_cast<uint32_t
>(
1569 auto ScopeConstant = buildI32Constant(Scope,
I);
1570 Register ScopeReg = ScopeConstant.first;
1571 Result &= ScopeConstant.second;
1574 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1580bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1583 unsigned Opcode)
const {
1584 Type *ResTy =
nullptr;
1588 "Not enough info to select the arithmetic with overflow instruction");
1591 "with overflow instruction");
1597 MachineIRBuilder MIRBuilder(
I);
1599 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1600 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1606 Register ZeroReg = buildZerosVal(ResType,
I);
1609 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1611 if (ResName.
size() > 0)
1616 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1619 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1620 MIB.
addUse(
I.getOperand(i).getReg());
1625 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1626 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1628 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1629 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1637 .
addDef(
I.getOperand(1).getReg())
1644bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1646 MachineInstr &
I)
const {
1654 const MachineMemOperand *MemOp = *
I.memoperands_begin();
1657 auto ScopeConstant = buildI32Constant(Scope,
I);
1658 ScopeReg = ScopeConstant.first;
1659 Result &= ScopeConstant.second;
1661 unsigned ScSem =
static_cast<uint32_t
>(
1664 unsigned MemSemEq =
static_cast<uint32_t
>(
getMemSemantics(AO)) | ScSem;
1665 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1666 MemSemEqReg = MemSemEqConstant.first;
1667 Result &= MemSemEqConstant.second;
1669 unsigned MemSemNeq =
static_cast<uint32_t
>(
getMemSemantics(FO)) | ScSem;
1670 if (MemSemEq == MemSemNeq)
1671 MemSemNeqReg = MemSemEqReg;
1673 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1674 MemSemNeqReg = MemSemNeqConstant.first;
1675 Result &= MemSemNeqConstant.second;
1678 ScopeReg =
I.getOperand(5).getReg();
1679 MemSemEqReg =
I.getOperand(6).getReg();
1680 MemSemNeqReg =
I.getOperand(7).getReg();
1684 Register Val =
I.getOperand(4).getReg();
1689 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1716 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1727 case SPIRV::StorageClass::DeviceOnlyINTEL:
1728 case SPIRV::StorageClass::HostOnlyINTEL:
1737 bool IsGRef =
false;
1738 bool IsAllowedRefs =
1739 llvm::all_of(
MRI->use_instructions(ResVReg), [&IsGRef](
auto const &It) {
1740 unsigned Opcode = It.getOpcode();
1741 if (Opcode == SPIRV::OpConstantComposite ||
1742 Opcode == SPIRV::OpVariable ||
1743 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1744 return IsGRef = true;
1745 return Opcode == SPIRV::OpName;
1747 return IsAllowedRefs && IsGRef;
1750Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1751 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1753 Type::getInt8Ty(
I.getMF()->getFunction().getContext()),
I, SC));
1757SPIRVInstructionSelector::buildSpecConstantOp(MachineInstr &
I,
Register Dest,
1759 uint32_t Opcode)
const {
1760 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1761 TII.get(SPIRV::OpSpecConstantOp))
1769SPIRVInstructionSelector::buildConstGenericPtr(MachineInstr &
I,
Register SrcPtr,
1773 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1775 SPIRV::StorageClass::Generic),
1777 MachineFunction *MF =
I.getParent()->getParent();
1779 MachineInstrBuilder MIB = buildSpecConstantOp(
1781 static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric));
1791bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1793 MachineInstr &
I)
const {
1797 Register SrcPtr =
I.getOperand(1).getReg();
1801 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1802 ResType->
getOpcode() != SPIRV::OpTypePointer)
1803 return BuildCOPY(ResVReg, SrcPtr,
I);
1813 unsigned SpecOpcode =
1815 ?
static_cast<uint32_t
>(SPIRV::Opcode::PtrCastToGeneric)
1818 ? static_cast<uint32_t>(
SPIRV::Opcode::GenericCastToPtr)
1825 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1826 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1827 .constrainAllUses(
TII,
TRI, RBI);
1829 MachineInstrBuilder MIB = buildConstGenericPtr(
I, SrcPtr, SrcPtrTy);
1831 buildSpecConstantOp(
1833 getUcharPtrTypeReg(
I, DstSC),
1834 static_cast<uint32_t
>(SPIRV::Opcode::GenericCastToPtr))
1835 .constrainAllUses(
TII,
TRI, RBI);
1841 return BuildCOPY(ResVReg, SrcPtr,
I);
1843 if ((SrcSC == SPIRV::StorageClass::Function &&
1844 DstSC == SPIRV::StorageClass::Private) ||
1845 (DstSC == SPIRV::StorageClass::Function &&
1846 SrcSC == SPIRV::StorageClass::Private))
1847 return BuildCOPY(ResVReg, SrcPtr,
I);
1851 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1854 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1875 return selectUnOp(ResVReg, ResType,
I,
1876 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1878 return selectUnOp(ResVReg, ResType,
I,
1879 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1881 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1883 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1893 return SPIRV::OpFOrdEqual;
1895 return SPIRV::OpFOrdGreaterThanEqual;
1897 return SPIRV::OpFOrdGreaterThan;
1899 return SPIRV::OpFOrdLessThanEqual;
1901 return SPIRV::OpFOrdLessThan;
1903 return SPIRV::OpFOrdNotEqual;
1905 return SPIRV::OpOrdered;
1907 return SPIRV::OpFUnordEqual;
1909 return SPIRV::OpFUnordGreaterThanEqual;
1911 return SPIRV::OpFUnordGreaterThan;
1913 return SPIRV::OpFUnordLessThanEqual;
1915 return SPIRV::OpFUnordLessThan;
1917 return SPIRV::OpFUnordNotEqual;
1919 return SPIRV::OpUnordered;
1929 return SPIRV::OpIEqual;
1931 return SPIRV::OpINotEqual;
1933 return SPIRV::OpSGreaterThanEqual;
1935 return SPIRV::OpSGreaterThan;
1937 return SPIRV::OpSLessThanEqual;
1939 return SPIRV::OpSLessThan;
1941 return SPIRV::OpUGreaterThanEqual;
1943 return SPIRV::OpUGreaterThan;
1945 return SPIRV::OpULessThanEqual;
1947 return SPIRV::OpULessThan;
1956 return SPIRV::OpPtrEqual;
1958 return SPIRV::OpPtrNotEqual;
1969 return SPIRV::OpLogicalEqual;
1971 return SPIRV::OpLogicalNotEqual;
2005bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
2008 unsigned OpAnyOrAll)
const {
2009 assert(
I.getNumOperands() == 3);
2010 assert(
I.getOperand(2).isReg());
2012 Register InputRegister =
I.getOperand(2).getReg();
2019 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
2020 if (IsBoolTy && !IsVectorTy) {
2021 assert(ResVReg ==
I.getOperand(0).getReg());
2022 return BuildCOPY(ResVReg, InputRegister,
I);
2026 unsigned SpirvNotEqualId =
2027 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
2034 IsBoolTy ? InputRegister
2043 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
2063bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
2065 MachineInstr &
I)
const {
2066 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
2069bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
2071 MachineInstr &
I)
const {
2072 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
2076bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
2078 MachineInstr &
I)
const {
2079 assert(
I.getNumOperands() == 4);
2080 assert(
I.getOperand(2).isReg());
2081 assert(
I.getOperand(3).isReg());
2088 "dot product requires a vector of at least 2 components");
2096 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
2099 .
addUse(
I.getOperand(2).getReg())
2100 .
addUse(
I.getOperand(3).getReg())
2104bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
2108 assert(
I.getNumOperands() == 4);
2109 assert(
I.getOperand(2).isReg());
2110 assert(
I.getOperand(3).isReg());
2113 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2117 .
addUse(
I.getOperand(2).getReg())
2118 .
addUse(
I.getOperand(3).getReg())
2124bool SPIRVInstructionSelector::selectIntegerDotExpansion(
2126 assert(
I.getNumOperands() == 4);
2127 assert(
I.getOperand(2).isReg());
2128 assert(
I.getOperand(3).isReg());
2132 Register Vec0 =
I.getOperand(2).getReg();
2133 Register Vec1 =
I.getOperand(3).getReg();
2146 "dot product requires a vector of at least 2 components");
2160 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2183bool SPIRVInstructionSelector::selectOpIsInf(
Register ResVReg,
2185 MachineInstr &
I)
const {
2187 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsInf))
2190 .
addUse(
I.getOperand(2).getReg())
2194bool SPIRVInstructionSelector::selectOpIsNan(
Register ResVReg,
2196 MachineInstr &
I)
const {
2198 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIsNan))
2201 .
addUse(
I.getOperand(2).getReg())
2205template <
bool Signed>
2206bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
2208 MachineInstr &
I)
const {
2209 assert(
I.getNumOperands() == 5);
2210 assert(
I.getOperand(2).isReg());
2211 assert(
I.getOperand(3).isReg());
2212 assert(
I.getOperand(4).isReg());
2215 Register Acc =
I.getOperand(2).getReg();
2219 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
2239template <
bool Signed>
2240bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
2242 assert(
I.getNumOperands() == 5);
2243 assert(
I.getOperand(2).isReg());
2244 assert(
I.getOperand(3).isReg());
2245 assert(
I.getOperand(4).isReg());
2250 Register Acc =
I.getOperand(2).getReg();
2256 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
2260 for (
unsigned i = 0; i < 4; i++) {
2262 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2273 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2293 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2305 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2321bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2323 MachineInstr &
I)
const {
2324 assert(
I.getNumOperands() == 3);
2325 assert(
I.getOperand(2).isReg());
2327 Register VZero = buildZerosValF(ResType,
I);
2328 Register VOne = buildOnesValF(ResType,
I);
2330 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2333 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2335 .
addUse(
I.getOperand(2).getReg())
2341bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2343 MachineInstr &
I)
const {
2344 assert(
I.getNumOperands() == 3);
2345 assert(
I.getOperand(2).isReg());
2347 Register InputRegister =
I.getOperand(2).getReg();
2349 auto &
DL =
I.getDebugLoc();
2359 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2361 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2363 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2370 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
2375 if (NeedsConversion) {
2376 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2387bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2390 unsigned Opcode)
const {
2394 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2400 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2401 BMI.addUse(
I.getOperand(J).getReg());
2407bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2413 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2414 SPIRV::OpGroupNonUniformBallot);
2418 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2423 .
addImm(SPIRV::GroupOperation::Reduce)
2430bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2433 bool IsUnsigned)
const {
2434 assert(
I.getNumOperands() == 3);
2435 assert(
I.getOperand(2).isReg());
2437 Register InputRegister =
I.getOperand(2).getReg();
2446 auto IntegerOpcodeType =
2447 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2448 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2449 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2454 .
addImm(SPIRV::GroupOperation::Reduce)
2455 .
addUse(
I.getOperand(2).getReg())
2459bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2461 MachineInstr &
I)
const {
2462 assert(
I.getNumOperands() == 3);
2463 assert(
I.getOperand(2).isReg());
2465 Register InputRegister =
I.getOperand(2).getReg();
2475 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2476 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2481 .
addImm(SPIRV::GroupOperation::Reduce)
2482 .
addUse(
I.getOperand(2).getReg());
2485bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2487 MachineInstr &
I)
const {
2489 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2492 .
addUse(
I.getOperand(1).getReg())
2496bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2498 MachineInstr &
I)
const {
2504 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2506 Register OpReg =
I.getOperand(1).getReg();
2507 if (MachineInstr *Def =
MRI->getVRegDef(OpReg)) {
2508 if (
Def->getOpcode() == TargetOpcode::COPY)
2509 Def =
MRI->getVRegDef(
Def->getOperand(1).getReg());
2511 switch (
Def->getOpcode()) {
2512 case SPIRV::ASSIGN_TYPE:
2513 if (MachineInstr *AssignToDef =
2514 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2515 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2516 Reg =
Def->getOperand(2).getReg();
2519 case SPIRV::OpUndef:
2520 Reg =
Def->getOperand(1).getReg();
2523 unsigned DestOpCode;
2525 DestOpCode = SPIRV::OpConstantNull;
2527 DestOpCode = TargetOpcode::COPY;
2530 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2531 .
addDef(
I.getOperand(0).getReg())
2538bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2540 MachineInstr &
I)
const {
2542 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2544 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2548 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2553 for (
unsigned i =
I.getNumExplicitDefs();
2554 i <
I.getNumExplicitOperands() && IsConst; ++i)
2558 if (!IsConst &&
N < 2)
2560 "There must be at least two constituent operands in a vector");
2563 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2564 TII.get(IsConst ? SPIRV::OpConstantComposite
2565 : SPIRV::OpCompositeConstruct))
2568 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2569 MIB.
addUse(
I.getOperand(i).getReg());
2573bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2575 MachineInstr &
I)
const {
2577 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2579 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2585 if (!
I.getOperand(
OpIdx).isReg())
2592 if (!IsConst &&
N < 2)
2594 "There must be at least two constituent operands in a vector");
2597 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2598 TII.get(IsConst ? SPIRV::OpConstantComposite
2599 : SPIRV::OpCompositeConstruct))
2602 for (
unsigned i = 0; i <
N; ++i)
2607bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2609 MachineInstr &
I)
const {
2614 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2616 Opcode = SPIRV::OpDemoteToHelperInvocation;
2618 Opcode = SPIRV::OpKill;
2620 if (MachineInstr *NextI =
I.getNextNode()) {
2622 NextI->removeFromParent();
2627 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2631bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2634 MachineInstr &
I)
const {
2635 Register Cmp0 =
I.getOperand(2).getReg();
2636 Register Cmp1 =
I.getOperand(3).getReg();
2639 "CMP operands should have the same type");
2640 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2649bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2651 MachineInstr &
I)
const {
2652 auto Pred =
I.getOperand(1).getPredicate();
2655 Register CmpOperand =
I.getOperand(2).getReg();
2662 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2665std::pair<Register, bool>
2666SPIRVInstructionSelector::buildI32Constant(uint32_t Val, MachineInstr &
I,
2672 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2680 ?
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2683 :
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2686 .
addImm(APInt(32, Val).getZExtValue());
2688 GR.
add(ConstInt,
MI);
2693bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2695 MachineInstr &
I)
const {
2697 return selectCmp(ResVReg, ResType, CmpOp,
I);
2701 MachineInstr &
I)
const {
2704 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2710 MachineInstr &
I)
const {
2714 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2720 MachineInstr &
I)
const {
2724 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2731 MachineInstr &
I)
const {
2735 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2740bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2742 MachineInstr &
I)
const {
2743 Register SelectFirstArg =
I.getOperand(2).getReg();
2744 Register SelectSecondArg =
I.getOperand(3).getReg();
2753 SPIRV::OpTypeVector;
2760 Opcode = IsScalarBool ? SPIRV::OpSelectVFSCond : SPIRV::OpSelectVFVCond;
2761 }
else if (IsPtrTy) {
2762 Opcode = IsScalarBool ? SPIRV::OpSelectVPSCond : SPIRV::OpSelectVPVCond;
2764 Opcode = IsScalarBool ? SPIRV::OpSelectVISCond : SPIRV::OpSelectVIVCond;
2768 Opcode = IsScalarBool ? SPIRV::OpSelectSFSCond : SPIRV::OpSelectVFVCond;
2769 }
else if (IsPtrTy) {
2770 Opcode = IsScalarBool ? SPIRV::OpSelectSPSCond : SPIRV::OpSelectVPVCond;
2772 Opcode = IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2775 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2778 .
addUse(
I.getOperand(1).getReg())
2784bool SPIRVInstructionSelector::selectSelectDefaultArgs(
Register ResVReg,
2787 bool IsSigned)
const {
2789 Register ZeroReg = buildZerosVal(ResType,
I);
2790 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2794 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2795 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2798 .
addUse(
I.getOperand(1).getReg())
2804bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2806 MachineInstr &
I,
bool IsSigned,
2807 unsigned Opcode)
const {
2808 Register SrcReg =
I.getOperand(1).getReg();
2814 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2819 selectSelectDefaultArgs(SrcReg, TmpType,
I,
false);
2821 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2824bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2826 MachineInstr &
I,
bool IsSigned)
const {
2827 Register SrcReg =
I.getOperand(1).getReg();
2829 return selectSelectDefaultArgs(ResVReg, ResType,
I, IsSigned);
2832 if (SrcType == ResType)
2833 return BuildCOPY(ResVReg, SrcReg,
I);
2835 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2836 return selectUnOp(ResVReg, ResType,
I, Opcode);
2839bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2842 bool IsSigned)
const {
2843 MachineIRBuilder MIRBuilder(
I);
2844 MachineRegisterInfo *
MRI = MIRBuilder.getMRI();
2859 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2860 : SPIRV::OpULessThanEqual))
2863 .
addUse(
I.getOperand(1).getReg())
2864 .
addUse(
I.getOperand(2).getReg())
2870 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2873 .
addUse(
I.getOperand(1).getReg())
2874 .
addUse(
I.getOperand(2).getReg())
2882 unsigned SelectOpcode =
2883 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2888 .
addUse(buildOnesVal(
true, ResType,
I))
2889 .
addUse(buildZerosVal(ResType,
I))
2896 .
addUse(buildOnesVal(
false, ResType,
I))
2900bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2907 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2908 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2910 Register One = buildOnesVal(
false, IntTy,
I);
2926bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2928 MachineInstr &
I)
const {
2929 Register IntReg =
I.getOperand(1).getReg();
2932 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2933 if (ArgType == ResType)
2934 return BuildCOPY(ResVReg, IntReg,
I);
2936 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2937 return selectUnOp(ResVReg, ResType,
I, Opcode);
2940bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2942 MachineInstr &
I)
const {
2943 unsigned Opcode =
I.getOpcode();
2944 unsigned TpOpcode = ResType->
getOpcode();
2946 if (TpOpcode == SPIRV::OpTypePointer || TpOpcode == SPIRV::OpTypeEvent) {
2947 assert(Opcode == TargetOpcode::G_CONSTANT &&
2948 I.getOperand(1).getCImm()->isZero());
2949 MachineBasicBlock &DepMBB =
I.getMF()->front();
2952 }
else if (Opcode == TargetOpcode::G_FCONSTANT) {
2959 return Reg == ResVReg ?
true : BuildCOPY(ResVReg,
Reg,
I);
2962bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2964 MachineInstr &
I)
const {
2965 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2971bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2973 MachineInstr &
I)
const {
2975 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2979 .
addUse(
I.getOperand(3).getReg())
2981 .
addUse(
I.getOperand(2).getReg());
2982 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2987bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2989 MachineInstr &
I)
const {
2991 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2994 .
addUse(
I.getOperand(2).getReg());
2995 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
3000bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
3002 MachineInstr &
I)
const {
3004 return selectInsertVal(ResVReg, ResType,
I);
3006 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
3009 .
addUse(
I.getOperand(2).getReg())
3010 .
addUse(
I.getOperand(3).getReg())
3011 .
addUse(
I.getOperand(4).getReg())
3015bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
3017 MachineInstr &
I)
const {
3019 return selectExtractVal(ResVReg, ResType,
I);
3021 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
3024 .
addUse(
I.getOperand(2).getReg())
3025 .
addUse(
I.getOperand(3).getReg())
3029bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
3031 MachineInstr &
I)
const {
3032 const bool IsGEPInBounds =
I.getOperand(2).getImm();
3038 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
3039 : SPIRV::OpAccessChain)
3040 : (IsGEPInBounds ?
SPIRV::OpInBoundsPtrAccessChain
3041 :
SPIRV::OpPtrAccessChain);
3043 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
3047 .
addUse(
I.getOperand(3).getReg());
3049 const unsigned StartingIndex =
3050 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
3053 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
3054 Res.addUse(
I.getOperand(i).getReg());
3055 return Res.constrainAllUses(
TII,
TRI, RBI);
3059bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
3062 unsigned Lim =
I.getNumExplicitOperands();
3063 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
3064 Register OpReg =
I.getOperand(i).getReg();
3065 MachineInstr *OpDefine =
MRI->getVRegDef(OpReg);
3067 SmallPtrSet<SPIRVType *, 4> Visited;
3068 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
3069 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
3076 MachineFunction *MF =
I.getMF();
3088 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3089 TII.get(SPIRV::OpSpecConstantOp))
3092 .
addImm(
static_cast<uint32_t
>(SPIRV::Opcode::Bitcast))
3094 GR.
add(OpDefine, MIB);
3102bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
3104 MachineInstr &
I)
const {
3108 case Intrinsic::spv_load:
3109 return selectLoad(ResVReg, ResType,
I);
3110 case Intrinsic::spv_store:
3111 return selectStore(
I);
3112 case Intrinsic::spv_extractv:
3113 return selectExtractVal(ResVReg, ResType,
I);
3114 case Intrinsic::spv_insertv:
3115 return selectInsertVal(ResVReg, ResType,
I);
3116 case Intrinsic::spv_extractelt:
3117 return selectExtractElt(ResVReg, ResType,
I);
3118 case Intrinsic::spv_insertelt:
3119 return selectInsertElt(ResVReg, ResType,
I);
3120 case Intrinsic::spv_gep:
3121 return selectGEP(ResVReg, ResType,
I);
3122 case Intrinsic::spv_unref_global:
3123 case Intrinsic::spv_init_global: {
3124 MachineInstr *
MI =
MRI->getVRegDef(
I.getOperand(1).getReg());
3125 MachineInstr *Init =
I.getNumExplicitOperands() > 2
3126 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
3129 Register GVarVReg =
MI->getOperand(0).getReg();
3130 bool Res = selectGlobalValue(GVarVReg, *
MI, Init);
3134 if (
MI->getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
3136 MI->removeFromParent();
3140 case Intrinsic::spv_undef: {
3141 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3146 case Intrinsic::spv_const_composite: {
3148 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
3154 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
3156 MachineIRBuilder MIR(
I);
3158 MIR, SPIRV::OpConstantComposite, 3,
3159 SPIRV::OpConstantCompositeContinuedINTEL, CompositeArgs, ResVReg,
3161 for (
auto *Instr : Instructions) {
3162 Instr->setDebugLoc(
I.getDebugLoc());
3168 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3174 case Intrinsic::spv_assign_name: {
3175 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
3176 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
3177 for (
unsigned i =
I.getNumExplicitDefs() + 2;
3178 i <
I.getNumExplicitOperands(); ++i) {
3179 MIB.
addImm(
I.getOperand(i).getImm());
3183 case Intrinsic::spv_switch: {
3184 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
3185 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3186 if (
I.getOperand(i).isReg())
3187 MIB.
addReg(
I.getOperand(i).getReg());
3188 else if (
I.getOperand(i).isCImm())
3189 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
3190 else if (
I.getOperand(i).isMBB())
3191 MIB.
addMBB(
I.getOperand(i).getMBB());
3197 case Intrinsic::spv_loop_merge: {
3198 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
3199 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
3200 if (
I.getOperand(i).isMBB())
3201 MIB.
addMBB(
I.getOperand(i).getMBB());
3207 case Intrinsic::spv_selection_merge: {
3209 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
3210 assert(
I.getOperand(1).isMBB() &&
3211 "operand 1 to spv_selection_merge must be a basic block");
3212 MIB.
addMBB(
I.getOperand(1).getMBB());
3213 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
3216 case Intrinsic::spv_cmpxchg:
3217 return selectAtomicCmpXchg(ResVReg, ResType,
I);
3218 case Intrinsic::spv_unreachable:
3219 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
3221 case Intrinsic::spv_alloca:
3222 return selectFrameIndex(ResVReg, ResType,
I);
3223 case Intrinsic::spv_alloca_array:
3224 return selectAllocaArray(ResVReg, ResType,
I);
3225 case Intrinsic::spv_assume:
3227 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
3228 .
addUse(
I.getOperand(1).getReg())
3231 case Intrinsic::spv_expect:
3233 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
3236 .
addUse(
I.getOperand(2).getReg())
3237 .
addUse(
I.getOperand(3).getReg())
3240 case Intrinsic::arithmetic_fence:
3243 TII.get(SPIRV::OpArithmeticFenceEXT))
3246 .
addUse(
I.getOperand(2).getReg())
3249 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3251 case Intrinsic::spv_thread_id:
3257 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3259 case Intrinsic::spv_thread_id_in_group:
3265 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3267 case Intrinsic::spv_group_id:
3273 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3275 case Intrinsic::spv_flattened_thread_id_in_group:
3282 return loadBuiltinInputID(SPIRV::BuiltIn::LocalInvocationIndex, ResVReg,
3284 case Intrinsic::spv_workgroup_size:
3285 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupSize, ResVReg,
3287 case Intrinsic::spv_global_size:
3288 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalSize, ResVReg, ResType,
3290 case Intrinsic::spv_global_offset:
3291 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalOffset, ResVReg,
3293 case Intrinsic::spv_num_workgroups:
3294 return loadVec3BuiltinInputID(SPIRV::BuiltIn::NumWorkgroups, ResVReg,
3296 case Intrinsic::spv_subgroup_size:
3297 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupSize, ResVReg, ResType,
3299 case Intrinsic::spv_num_subgroups:
3300 return loadBuiltinInputID(SPIRV::BuiltIn::NumSubgroups, ResVReg, ResType,
3302 case Intrinsic::spv_subgroup_id:
3303 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupId, ResVReg, ResType,
I);
3304 case Intrinsic::spv_subgroup_local_invocation_id:
3305 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupLocalInvocationId,
3306 ResVReg, ResType,
I);
3307 case Intrinsic::spv_subgroup_max_size:
3308 return loadBuiltinInputID(SPIRV::BuiltIn::SubgroupMaxSize, ResVReg, ResType,
3310 case Intrinsic::spv_fdot:
3311 return selectFloatDot(ResVReg, ResType,
I);
3312 case Intrinsic::spv_udot:
3313 case Intrinsic::spv_sdot:
3314 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3316 return selectIntegerDot(ResVReg, ResType,
I,
3317 IID == Intrinsic::spv_sdot);
3318 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3319 case Intrinsic::spv_dot4add_i8packed:
3320 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3322 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3323 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3324 case Intrinsic::spv_dot4add_u8packed:
3325 if (STI.
canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3327 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3328 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3329 case Intrinsic::spv_all:
3330 return selectAll(ResVReg, ResType,
I);
3331 case Intrinsic::spv_any:
3332 return selectAny(ResVReg, ResType,
I);
3333 case Intrinsic::spv_cross:
3334 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3335 case Intrinsic::spv_distance:
3336 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3337 case Intrinsic::spv_lerp:
3338 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3339 case Intrinsic::spv_length:
3340 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3341 case Intrinsic::spv_degrees:
3342 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3343 case Intrinsic::spv_faceforward:
3344 return selectExtInst(ResVReg, ResType,
I, GL::FaceForward);
3345 case Intrinsic::spv_frac:
3346 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3347 case Intrinsic::spv_isinf:
3348 return selectOpIsInf(ResVReg, ResType,
I);
3349 case Intrinsic::spv_isnan:
3350 return selectOpIsNan(ResVReg, ResType,
I);
3351 case Intrinsic::spv_normalize:
3352 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3353 case Intrinsic::spv_refract:
3354 return selectExtInst(ResVReg, ResType,
I, GL::Refract);
3355 case Intrinsic::spv_reflect:
3356 return selectExtInst(ResVReg, ResType,
I, GL::Reflect);
3357 case Intrinsic::spv_rsqrt:
3358 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3359 case Intrinsic::spv_sign:
3360 return selectSign(ResVReg, ResType,
I);
3361 case Intrinsic::spv_smoothstep:
3362 return selectExtInst(ResVReg, ResType,
I, CL::smoothstep, GL::SmoothStep);
3363 case Intrinsic::spv_firstbituhigh:
3364 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3365 case Intrinsic::spv_firstbitshigh:
3366 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3367 case Intrinsic::spv_firstbitlow:
3368 return selectFirstBitLow(ResVReg, ResType,
I);
3369 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3371 auto MemSemConstant =
3372 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3373 Register MemSemReg = MemSemConstant.first;
3374 Result &= MemSemConstant.second;
3375 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3376 Register ScopeReg = ScopeConstant.first;
3377 Result &= ScopeConstant.second;
3380 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3386 case Intrinsic::spv_generic_cast_to_ptr_explicit: {
3387 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 1).getReg();
3388 SPIRV::StorageClass::StorageClass ResSC =
3392 "Generic storage class");
3394 TII.get(SPIRV::OpGenericCastToPtrExplicit))
3401 case Intrinsic::spv_lifetime_start:
3402 case Intrinsic::spv_lifetime_end: {
3403 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3404 : SPIRV::OpLifetimeStop;
3405 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3406 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3414 case Intrinsic::spv_saturate:
3415 return selectSaturate(ResVReg, ResType,
I);
3416 case Intrinsic::spv_nclamp:
3417 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3418 case Intrinsic::spv_uclamp:
3419 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3420 case Intrinsic::spv_sclamp:
3421 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3422 case Intrinsic::spv_wave_active_countbits:
3423 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3424 case Intrinsic::spv_wave_all:
3425 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3426 case Intrinsic::spv_wave_any:
3427 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3428 case Intrinsic::spv_wave_is_first_lane:
3429 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3430 case Intrinsic::spv_wave_reduce_umax:
3431 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
3432 case Intrinsic::spv_wave_reduce_max:
3433 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
3434 case Intrinsic::spv_wave_reduce_sum:
3435 return selectWaveReduceSum(ResVReg, ResType,
I);
3436 case Intrinsic::spv_wave_readlane:
3437 return selectWaveOpInst(ResVReg, ResType,
I,
3438 SPIRV::OpGroupNonUniformShuffle);
3439 case Intrinsic::spv_step:
3440 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3441 case Intrinsic::spv_radians:
3442 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3446 case Intrinsic::instrprof_increment:
3447 case Intrinsic::instrprof_increment_step:
3448 case Intrinsic::instrprof_value_profile:
3451 case Intrinsic::spv_value_md:
3453 case Intrinsic::spv_resource_handlefrombinding: {
3454 return selectHandleFromBinding(ResVReg, ResType,
I);
3456 case Intrinsic::spv_resource_counterhandlefrombinding:
3457 return selectCounterHandleFromBinding(ResVReg, ResType,
I);
3458 case Intrinsic::spv_resource_updatecounter:
3459 return selectUpdateCounter(ResVReg, ResType,
I);
3460 case Intrinsic::spv_resource_store_typedbuffer: {
3461 return selectImageWriteIntrinsic(
I);
3463 case Intrinsic::spv_resource_load_typedbuffer: {
3464 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3466 case Intrinsic::spv_resource_getpointer: {
3467 return selectResourceGetPointer(ResVReg, ResType,
I);
3469 case Intrinsic::spv_discard: {
3470 return selectDiscard(ResVReg, ResType,
I);
3472 case Intrinsic::spv_resource_nonuniformindex: {
3473 return selectResourceNonUniformIndex(ResVReg, ResType,
I);
3476 std::string DiagMsg;
3477 raw_string_ostream OS(DiagMsg);
3479 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3486bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3488 MachineInstr &
I)
const {
3491 if (ResType->
getOpcode() == SPIRV::OpTypeImage)
3498bool SPIRVInstructionSelector::selectCounterHandleFromBinding(
3501 assert(Intr.getIntrinsicID() ==
3502 Intrinsic::spv_resource_counterhandlefrombinding);
3505 Register MainHandleReg = Intr.getOperand(2).getReg();
3507 assert(MainHandleDef->getIntrinsicID() ==
3508 Intrinsic::spv_resource_handlefrombinding);
3512 uint32_t ArraySize =
getIConstVal(MainHandleDef->getOperand(4).getReg(),
MRI);
3513 Register IndexReg = MainHandleDef->getOperand(5).getReg();
3514 std::string CounterName =
3519 MachineIRBuilder MIRBuilder(
I);
3520 Register CounterVarReg = buildPointerToResource(
3522 Binding, ArraySize, IndexReg, CounterName, MIRBuilder);
3524 return BuildCOPY(ResVReg, CounterVarReg,
I);
3527bool SPIRVInstructionSelector::selectUpdateCounter(
Register &ResVReg,
3529 MachineInstr &
I)
const {
3531 assert(Intr.getIntrinsicID() == Intrinsic::spv_resource_updatecounter);
3533 Register CounterHandleReg = Intr.getOperand(2).getReg();
3534 Register IncrReg = Intr.getOperand(3).getReg();
3542 assert(CounterVarPointeeType &&
3543 CounterVarPointeeType->
getOpcode() == SPIRV::OpTypeStruct &&
3544 "Counter variable must be a struct");
3546 SPIRV::StorageClass::StorageBuffer &&
3547 "Counter variable must be in the storage buffer storage class");
3549 "Counter variable must have exactly 1 member in the struct");
3553 "Counter variable struct must have a single i32 member");
3557 MachineIRBuilder MIRBuilder(
I);
3559 Type::getInt32Ty(
I.getMF()->getFunction().getContext());
3562 LLVMIntType, MIRBuilder, SPIRV::StorageClass::StorageBuffer);
3564 auto Zero = buildI32Constant(0,
I);
3570 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3571 TII.get(SPIRV::OpAccessChain))
3574 .
addUse(CounterHandleReg)
3582 auto Scope = buildI32Constant(SPIRV::Scope::Device,
I);
3585 auto Semantics = buildI32Constant(SPIRV::MemorySemantics::None,
I);
3586 if (!Semantics.second)
3590 auto Incr = buildI32Constant(
static_cast<uint32_t
>(IncrVal),
I);
3595 if (!
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAtomicIAdd))
3606 return BuildCOPY(ResVReg, AtomicRes,
I);
3614 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpIAddS))
3621bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3630 Register ImageReg =
I.getOperand(2).getReg();
3632 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3638 Register IdxReg =
I.getOperand(3).getReg();
3640 MachineInstr &Pos =
I;
3642 return generateImageRead(ResVReg, ResType, NewImageReg, IdxReg, Loc, Pos);
3645bool SPIRVInstructionSelector::generateImageRead(
Register &ResVReg,
3649 MachineInstr &Pos)
const {
3652 "ImageReg is not an image type.");
3653 bool IsSignedInteger =
3657 if (ResultSize == 4) {
3664 if (IsSignedInteger)
3669 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
3676 if (IsSignedInteger)
3682 if (ResultSize == 1) {
3684 TII.get(SPIRV::OpCompositeExtract))
3691 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
3694bool SPIRVInstructionSelector::selectResourceGetPointer(
3696 Register ResourcePtr =
I.getOperand(2).getReg();
3698 if (RegType->
getOpcode() == SPIRV::OpTypeImage) {
3707 MachineIRBuilder MIRBuilder(
I);
3709 Register IndexReg =
I.getOperand(3).getReg();
3712 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3713 TII.get(SPIRV::OpAccessChain))
3722bool SPIRVInstructionSelector::selectResourceNonUniformIndex(
3724 Register ObjReg =
I.getOperand(2).getReg();
3725 if (!BuildCOPY(ResVReg, ObjReg,
I))
3735 decorateUsesAsNonUniform(ResVReg);
3739void SPIRVInstructionSelector::decorateUsesAsNonUniform(
3742 while (WorkList.
size() > 0) {
3746 bool IsDecorated =
false;
3747 for (MachineInstr &Use :
MRI->use_instructions(CurrentReg)) {
3748 if (
Use.getOpcode() == SPIRV::OpDecorate &&
3749 Use.getOperand(1).getImm() == SPIRV::Decoration::NonUniformEXT) {
3755 if (
Use.getOperand(0).isReg() &&
Use.getOperand(0).isDef()) {
3757 if (ResultReg == CurrentReg)
3765 SPIRV::Decoration::NonUniformEXT, {});
3771bool SPIRVInstructionSelector::extractSubvector(
3773 MachineInstr &InsertionPoint)
const {
3775 [[maybe_unused]] uint64_t InputSize =
3778 assert(InputSize > 1 &&
"The input must be a vector.");
3779 assert(ResultSize > 1 &&
"The result must be a vector.");
3780 assert(ResultSize < InputSize &&
3781 "Cannot extract more element than there are in the input.");
3784 const TargetRegisterClass *ScalarRegClass = GR.
getRegClass(ScalarType);
3785 for (uint64_t
I = 0;
I < ResultSize;
I++) {
3786 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3789 TII.get(SPIRV::OpCompositeExtract))
3800 MachineInstrBuilder MIB =
BuildMI(*InsertionPoint.
getParent(), InsertionPoint,
3802 TII.get(SPIRV::OpCompositeConstruct))
3806 for (
Register ComponentReg : ComponentRegisters)
3807 MIB.
addUse(ComponentReg);
3811bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3812 MachineInstr &
I)
const {
3819 Register ImageReg =
I.getOperand(1).getReg();
3821 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3827 Register CoordinateReg =
I.getOperand(2).getReg();
3828 Register DataReg =
I.getOperand(3).getReg();
3831 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3832 TII.get(SPIRV::OpImageWrite))
3839Register SPIRVInstructionSelector::buildPointerToResource(
3840 const SPIRVType *SpirvResType, SPIRV::StorageClass::StorageClass SC,
3841 uint32_t Set, uint32_t
Binding, uint32_t ArraySize,
Register IndexReg,
3842 StringRef Name, MachineIRBuilder MIRBuilder)
const {
3844 if (ArraySize == 1) {
3848 "SpirvResType did not have an explicit layout.");
3853 const Type *VarType = ArrayType::get(
const_cast<Type *
>(ResType), ArraySize);
3857 VarPointerType, Set,
Binding, Name, MIRBuilder);
3872bool SPIRVInstructionSelector::selectFirstBitSet16(
3874 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3876 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).getReg()},
3880 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3883bool SPIRVInstructionSelector::selectFirstBitSet32(
3885 Register SrcReg,
unsigned BitSetOpcode)
const {
3886 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3889 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
3895bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3897 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3904 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3906 MachineIRBuilder MIRBuilder(
I);
3914 std::vector<Register> PartialRegs;
3917 unsigned CurrentComponent = 0;
3918 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3924 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3925 TII.get(SPIRV::OpVectorShuffle))
3930 .
addImm(CurrentComponent)
3931 .
addImm(CurrentComponent + 1);
3939 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3940 BitSetOpcode, SwapPrimarySide))
3943 PartialRegs.push_back(SubVecBitSetReg);
3947 if (CurrentComponent != ComponentCount) {
3953 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
3954 SPIRV::OpVectorExtractDynamic))
3960 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
3961 BitSetOpcode, SwapPrimarySide))
3964 PartialRegs.push_back(FinalElemBitSetReg);
3969 return selectOpWithSrcs(ResVReg, ResType,
I, std::move(PartialRegs),
3970 SPIRV::OpCompositeConstruct);
3973bool SPIRVInstructionSelector::selectFirstBitSet64(
3975 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3988 if (ComponentCount > 2) {
3989 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
3990 BitSetOpcode, SwapPrimarySide);
3994 MachineIRBuilder MIRBuilder(
I);
3996 BaseType, 2 * ComponentCount, MIRBuilder,
false);
4000 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
4006 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
4013 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
4016 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
4017 SPIRV::OpVectorExtractDynamic))
4019 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
4020 SPIRV::OpVectorExtractDynamic))
4024 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4025 TII.get(SPIRV::OpVectorShuffle))
4033 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
4040 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
4041 TII.get(SPIRV::OpVectorShuffle))
4049 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
4070 SelectOp = SPIRV::OpSelectSISCond;
4071 AddOp = SPIRV::OpIAddS;
4079 SelectOp = SPIRV::OpSelectVIVCond;
4080 AddOp = SPIRV::OpIAddV;
4090 if (SwapPrimarySide) {
4091 PrimaryReg = LowReg;
4092 SecondaryReg = HighReg;
4093 PrimaryShiftReg = Reg0;
4094 SecondaryShiftReg = Reg32;
4099 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
4105 if (!selectOpWithSrcs(TmpReg, ResType,
I, {
BReg, SecondaryReg, PrimaryReg},
4111 if (!selectOpWithSrcs(ValReg, ResType,
I,
4112 {
BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
4115 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
4118bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
4121 bool IsSigned)
const {
4123 Register OpReg =
I.getOperand(2).getReg();
4126 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
4127 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
4131 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4133 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4135 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4139 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
4143bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
4145 MachineInstr &
I)
const {
4147 Register OpReg =
I.getOperand(2).getReg();
4152 unsigned ExtendOpcode = SPIRV::OpUConvert;
4153 unsigned BitSetOpcode = GL::FindILsb;
4157 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
4159 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
4161 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
4168bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
4170 MachineInstr &
I)
const {
4174 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
4175 TII.get(SPIRV::OpVariableLengthArrayINTEL))
4178 .
addUse(
I.getOperand(2).getReg())
4181 unsigned Alignment =
I.getOperand(3).getImm();
4187bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
4189 MachineInstr &
I)
const {
4193 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
4194 TII.get(SPIRV::OpVariable))
4197 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function))
4200 unsigned Alignment =
I.getOperand(2).getImm();
4207bool SPIRVInstructionSelector::selectBranch(MachineInstr &
I)
const {
4212 const MachineInstr *PrevI =
I.getPrevNode();
4214 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
4215 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4218 .
addMBB(
I.getOperand(0).getMBB())
4222 .
addMBB(
I.getOperand(0).getMBB())
4226bool SPIRVInstructionSelector::selectBranchCond(MachineInstr &
I)
const {
4237 const MachineInstr *NextI =
I.getNextNode();
4239 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
4245 MachineBasicBlock *NextMBB =
I.getMF()->getBlockNumbered(NextMBBNum);
4246 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
4247 .
addUse(
I.getOperand(0).getReg())
4248 .
addMBB(
I.getOperand(1).getMBB())
4253bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
4255 MachineInstr &
I)
const {
4256 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
4259 const unsigned NumOps =
I.getNumOperands();
4260 for (
unsigned i = 1; i <
NumOps; i += 2) {
4261 MIB.
addUse(
I.getOperand(i + 0).getReg());
4262 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
4270bool SPIRVInstructionSelector::selectGlobalValue(
4271 Register ResVReg, MachineInstr &
I,
const MachineInstr *Init)
const {
4273 MachineIRBuilder MIRBuilder(
I);
4274 const GlobalValue *GV =
I.
getOperand(1).getGlobal();
4277 std::string GlobalIdent;
4279 unsigned &
ID = UnnamedGlobalIDs[GV];
4281 ID = UnnamedGlobalIDs.size();
4282 GlobalIdent =
"__unnamed_" + Twine(
ID).str();
4309 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
4316 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4319 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
4320 MachineInstrBuilder MIB1 =
4321 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
4324 MachineInstrBuilder MIB2 =
4326 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
4330 GR.
add(ConstVal, MIB2);
4336 MachineInstrBuilder MIB3 =
4337 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
4340 GR.
add(ConstVal, MIB3);
4343 assert(NewReg != ResVReg);
4344 return BuildCOPY(ResVReg, NewReg,
I);
4356 SPIRV::LinkageType::LinkageType LnkType =
4358 ? SPIRV::LinkageType::Import
4361 ? SPIRV::LinkageType::LinkOnceODR
4362 : SPIRV::LinkageType::Export);
4370 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
4374bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
4376 MachineInstr &
I)
const {
4378 return selectExtInst(ResVReg, ResType,
I, CL::log10);
4386 MachineIRBuilder MIRBuilder(
I);
4392 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4395 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::GLSL_std_450))
4397 .
add(
I.getOperand(1))
4402 ResType->
getOpcode() == SPIRV::OpTypeFloat);
4405 ResType->
getOpcode() == SPIRV::OpTypeVector
4412 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
4413 ? SPIRV::OpVectorTimesScalar
4423bool SPIRVInstructionSelector::selectModf(
Register ResVReg,
4425 MachineInstr &
I)
const {
4441 MachineIRBuilder MIRBuilder(
I);
4444 ResType, MIRBuilder, SPIRV::StorageClass::Function);
4456 MachineBasicBlock &EntryBB =
I.getMF()->front();
4460 BuildMI(EntryBB, VarPos,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
4463 .
addImm(
static_cast<uint32_t
>(SPIRV::StorageClass::Function));
4469 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
4472 .
addImm(
static_cast<uint32_t
>(SPIRV::InstructionSet::OpenCL_std))
4475 .
add(
I.getOperand(
I.getNumExplicitDefs()))
4479 Register IntegralPartReg =
I.getOperand(1).getReg();
4480 if (IntegralPartReg.
isValid()) {
4482 auto LoadMIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4491 assert(
false &&
"GLSL::Modf is deprecated.");
4502bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
4503 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4504 const SPIRVType *ResType, MachineInstr &
I)
const {
4505 MachineIRBuilder MIRBuilder(
I);
4509 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
4521 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4522 SPIRV::LinkageType::Import, MIRBuilder,
false);
4525 MachineRegisterInfo *
MRI = MIRBuilder.
getMRI();
4526 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
4532 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4539 assert(
I.getOperand(2).isReg());
4540 const uint32_t ThreadId =
foldImm(
I.getOperand(2),
MRI);
4544 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
4554bool SPIRVInstructionSelector::loadBuiltinInputID(
4555 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
4556 const SPIRVType *ResType, MachineInstr &
I)
const {
4557 MachineIRBuilder MIRBuilder(
I);
4559 ResType, MIRBuilder, SPIRV::StorageClass::Input);
4574 SPIRV::StorageClass::Input,
nullptr,
true,
false,
4575 SPIRV::LinkageType::Import, MIRBuilder,
false);
4578 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
4587 MachineInstr &
I)
const {
4588 MachineIRBuilder MIRBuilder(
I);
4589 if (
Type->getOpcode() != SPIRV::OpTypeVector)
4593 if (VectorSize == 4)
4601bool SPIRVInstructionSelector::loadHandleBeforePosition(
4603 MachineInstr &Pos)
const {
4606 Intrinsic::spv_resource_handlefrombinding);
4614 bool IsStructuredBuffer = ResType->
getOpcode() == SPIRV::OpTypePointer;
4615 MachineIRBuilder MIRBuilder(HandleDef);
4617 SPIRV::StorageClass::StorageClass SC = SPIRV::StorageClass::UniformConstant;
4619 if (IsStructuredBuffer) {
4624 Register VarReg = buildPointerToResource(VarType, SC, Set,
Binding, ArraySize,
4625 IndexReg, Name, MIRBuilder);
4629 uint32_t LoadOpcode =
4630 IsStructuredBuffer ? SPIRV::OpCopyObject : SPIRV::OpLoad;
4633 TII.get(LoadOpcode))
4641InstructionSelector *
4645 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
DXIL Resource Implicit Binding
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB, MachineIRBuilder &MIRBuilder, SPIRVGlobalRegistry &GR)
static bool isConstReg(MachineRegisterInfo *MRI, MachineInstr *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
BaseType
A given derived pointer can have multiple base pointers through phi/selects.
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
bool hasPrivateLinkage() const
bool hasHiddenVisibility() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
bool hasInternalLinkage() const
bool hasLinkOnceODRLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
static LLVM_ABI IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
LLVM_ABI iterator getFirstNonPHI()
Returns a pointer to the first instruction in this block that is not a PHINode instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
LLVM_ABI unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MONonTemporal
The memory access is non-temporal.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
defusechain_instr_iterator< false, true, false, true > def_instr_iterator
def_instr_iterator/def_instr_begin/def_instr_end - Walk all defs of the specified register,...
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SPIRVType * getSPIRVTypeForVReg(Register VReg, const MachineFunction *MF=nullptr) const
Register getOrCreateConstInt(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getResultType(Register VReg, MachineFunction *MF=nullptr)
SPIRVType * getOrCreateSPIRVBoolType(MachineIRBuilder &MIRBuilder, bool EmitIR)
MachineInstr * getOrAddMemAliasingINTELInst(MachineIRBuilder &MIRBuilder, const MDNode *AliasingListMD)
void assignSPIRVTypeToVReg(SPIRVType *Type, Register VReg, const MachineFunction &MF)
Register getOrCreateUndef(MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
SPIRVType * changePointerStorageClass(SPIRVType *PtrType, SPIRV::StorageClass::StorageClass SC, MachineInstr &I)
const Type * getTypeForSPIRVType(const SPIRVType *Ty) const
bool isBitcastCompatible(const SPIRVType *Type1, const SPIRVType *Type2) const
unsigned getScalarOrVectorComponentCount(Register VReg) const
bool isScalarOrVectorSigned(const SPIRVType *Type) const
Register getOrCreateGlobalVariableWithBinding(const SPIRVType *VarType, uint32_t Set, uint32_t Binding, StringRef Name, MachineIRBuilder &MIRBuilder)
SPIRVType * getOrCreateSPIRVType(const Type *Type, MachineInstr &I, SPIRV::AccessQualifier::AccessQualifier AQ, bool EmitIR)
unsigned getPointerSize() const
SPIRVType * getOrCreateSPIRVPointerType(const Type *BaseType, MachineIRBuilder &MIRBuilder, SPIRV::StorageClass::StorageClass SC)
Register buildConstantFP(APFloat Val, MachineIRBuilder &MIRBuilder, SPIRVType *SpvType=nullptr)
SPIRVType * getPointeeType(SPIRVType *PtrType)
void invalidateMachineInstr(MachineInstr *MI)
Register getSPIRVTypeID(const SPIRVType *SpirvType) const
bool isScalarOfType(Register VReg, unsigned TypeOpcode) const
Register buildGlobalVariable(Register Reg, SPIRVType *BaseType, StringRef Name, const GlobalValue *GV, SPIRV::StorageClass::StorageClass Storage, const MachineInstr *Init, bool IsConst, bool HasLinkageTy, SPIRV::LinkageType::LinkageType LinkageType, MachineIRBuilder &MIRBuilder, bool IsInstSelector)
bool findValueAttrs(const MachineInstr *Key, Type *&Ty, StringRef &Name)
void addGlobalObject(const Value *V, const MachineFunction *MF, Register R)
SPIRVType * getScalarOrVectorComponentType(Register VReg) const
void recordFunctionPointer(const MachineOperand *MO, const Function *F)
bool isAggregateType(SPIRVType *Type) const
const TargetRegisterClass * getRegClass(SPIRVType *SpvType) const
SPIRVType * getOrCreateSPIRVVectorType(SPIRVType *BaseType, unsigned NumElements, MachineIRBuilder &MIRBuilder, bool EmitIR)
bool isScalarOrVectorOfType(Register VReg, unsigned TypeOpcode) const
Register getOrCreateConstIntArray(uint64_t Val, size_t Num, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII)
MachineFunction * setCurrentFunc(MachineFunction &MF)
Register getOrCreateConstVector(uint64_t Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
SPIRVType * getOrCreateSPIRVIntegerType(unsigned BitWidth, MachineIRBuilder &MIRBuilder)
Type * getDeducedGlobalValueType(const GlobalValue *Global)
LLT getRegType(SPIRVType *SpvType) const
SPIRV::StorageClass::StorageClass getPointerStorageClass(Register VReg) const
Register getOrCreateConstFP(APFloat Val, MachineInstr &I, SPIRVType *SpvType, const SPIRVInstrInfo &TII, bool ZeroAsNull=true)
Register getOrCreateConstNullPtr(MachineIRBuilder &MIRBuilder, SPIRVType *SpvType)
unsigned getScalarOrVectorBitWidth(const SPIRVType *Type) const
const SPIRVType * retrieveScalarOrVectorIntType(const SPIRVType *Type) const
bool erase(const MachineInstr *MI)
bool add(SPIRV::IRHandle Handle, const MachineInstr *MI)
Register find(SPIRV::IRHandle Handle, const MachineFunction *MF)
bool isPhysicalSPIRV() const
bool isAtLeastSPIRVVer(VersionTuple VerToCompareTo) const
bool canUseExtInstSet(SPIRV::InstructionSet::InstructionSet E) const
bool isLogicalSPIRV() const
bool canUseExtension(SPIRV::Extension::Extension E) const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
static LLVM_ABI StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
The instances of the Type class are immutable: once they are created, they are never changed.
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Value * getOperand(unsigned i) const
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
LLVM_C_ABI LLVMTypeRef LLVMIntType(unsigned NumBits)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
NodeAddr< InstrNode * > Instr
NodeAddr< UseNode * > Use
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
int64_t getIConstValSext(Register ConstReg, const MachineRegisterInfo *MRI)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool isTypeFoldingSupported(unsigned Opcode)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
LLVM_ABI void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
unsigned getArrayComponentCount(const MachineRegisterInfo *MRI, const MachineInstr *ResType)
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SmallVector< MachineInstr *, 4 > createContinuedInstructions(MachineIRBuilder &MIRBuilder, unsigned Opcode, unsigned MinWC, unsigned ContinuedOpcode, ArrayRef< Register > Args, Register ReturnRegister, Register TypeID)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
MachineBasicBlock::iterator getFirstValidInstructionInsertPoint(MachineBasicBlock &BB)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Register createVirtualRegister(SPIRVType *SpvType, SPIRVGlobalRegistry *GR, MachineRegisterInfo *MRI, const MachineFunction &MF)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
Type * toTypedPointer(Type *Ty)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
const MachineInstr SPIRVType
constexpr bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
MachineInstr * passCopy(MachineInstr *Def, const MachineRegisterInfo *MRI)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
std::string getStringValueFromReg(Register Reg, MachineRegisterInfo &MRI)
int64_t foldImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
DWARFExpression::Operation Op
constexpr unsigned BitWidth
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
LLVM_ABI bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...
static LLVM_ABI const fltSemantics & IEEEsingle() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEdouble() LLVM_READNONE
static LLVM_ABI const fltSemantics & IEEEhalf() LLVM_READNONE