34#include "llvm/IR/IntrinsicsSPIRV.h"
38#define DEBUG_TYPE "spirv-isel"
41namespace CL = SPIRV::OpenCLExtInst;
42namespace GL = SPIRV::GLSLExtInst;
45 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
49llvm::SPIRV::SelectionControl::SelectionControl
50getSelectionOperandForImm(
int Imm) {
52 return SPIRV::SelectionControl::Flatten;
54 return SPIRV::SelectionControl::DontFlatten;
56 return SPIRV::SelectionControl::None;
60#define GET_GLOBALISEL_PREDICATE_BITSET
61#include "SPIRVGenGlobalISel.inc"
62#undef GET_GLOBALISEL_PREDICATE_BITSET
89#define GET_GLOBALISEL_PREDICATES_DECL
90#include "SPIRVGenGlobalISel.inc"
91#undef GET_GLOBALISEL_PREDICATES_DECL
93#define GET_GLOBALISEL_TEMPORARIES_DECL
94#include "SPIRVGenGlobalISel.inc"
95#undef GET_GLOBALISEL_TEMPORARIES_DECL
117 unsigned BitSetOpcode)
const;
121 unsigned BitSetOpcode)
const;
125 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
129 unsigned BitSetOpcode,
130 bool SwapPrimarySide)
const;
137 unsigned Opcode)
const;
140 unsigned Opcode)
const;
157 unsigned NegateOpcode = 0)
const;
211 template <
bool Signed>
214 template <
bool Signed>
230 bool IsSigned)
const;
232 bool IsSigned,
unsigned Opcode)
const;
234 bool IsSigned)
const;
240 bool IsSigned)
const;
273 [[maybe_unused]]
bool selectExtInst(
Register ResVReg,
276 GL::GLSLExtInst GLInst)
const;
281 GL::GLSLExtInst GLInst)
const;
309 std::pair<Register, bool>
311 const SPIRVType *ResType =
nullptr)
const;
323 SPIRV::StorageClass::StorageClass SC)
const;
331 Register IndexReg,
bool IsNonUniform,
340 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
349#define GET_GLOBALISEL_IMPL
350#include "SPIRVGenGlobalISel.inc"
351#undef GET_GLOBALISEL_IMPL
357 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
359#include
"SPIRVGenGlobalISel.inc"
362#include
"SPIRVGenGlobalISel.inc"
372 GR.setCurrentFunc(MF);
373 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
378 if (HasVRegsReset == &MF)
383 for (
unsigned I = 0, E =
MRI.getNumVirtRegs();
I != E; ++
I) {
385 LLT RegType =
MRI.getType(Reg);
393 for (
const auto &
MBB : MF) {
394 for (
const auto &
MI :
MBB) {
395 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
398 LLT DstType =
MRI.getType(DstReg);
400 LLT SrcType =
MRI.getType(SrcReg);
401 if (DstType != SrcType)
402 MRI.setType(DstReg,
MRI.getType(SrcReg));
406 if (DstRC != SrcRC && SrcRC)
407 MRI.setRegClass(DstReg, SrcRC);
418 for (
const auto &MO :
MI.all_defs()) {
420 if (Reg.isPhysical() || !
MRI.use_nodbg_empty(Reg))
423 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
424 MI.isLifetimeMarker())
428 if (
MI.mayStore() ||
MI.isCall() ||
429 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
430 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
436 resetVRegsType(*
I.getParent()->getParent());
438 assert(
I.getParent() &&
"Instruction should be in a basic block!");
439 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
444 if (Opcode == SPIRV::ASSIGN_TYPE) {
445 Register DstReg =
I.getOperand(0).getReg();
446 Register SrcReg =
I.getOperand(1).getReg();
447 auto *
Def =
MRI->getVRegDef(SrcReg);
449 bool Res = selectImpl(
I, *CoverageInfo);
451 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
452 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
456 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
463 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
464 MRI->replaceRegWith(SrcReg, DstReg);
465 GR.invalidateMachineInstr(&
I);
466 I.removeFromParent();
468 }
else if (
I.getNumDefs() == 1) {
475 if (DeadMIs.contains(&
I)) {
480 GR.invalidateMachineInstr(&
I);
485 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
486 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
492 bool HasDefs =
I.getNumDefs() > 0;
494 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
495 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
496 if (spvSelect(ResVReg, ResType,
I)) {
498 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
500 GR.invalidateMachineInstr(&
I);
501 I.removeFromParent();
509 case TargetOpcode::G_CONSTANT:
511 case TargetOpcode::G_SADDO:
512 case TargetOpcode::G_SSUBO:
522 if (DstRC != SrcRC && SrcRC)
523 MRI->setRegClass(DestReg, SrcRC);
524 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
525 TII.get(TargetOpcode::COPY))
531bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
534 const unsigned Opcode =
I.getOpcode();
536 return selectImpl(
I, *CoverageInfo);
538 case TargetOpcode::G_CONSTANT:
539 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
541 case TargetOpcode::G_GLOBAL_VALUE:
542 return selectGlobalValue(ResVReg,
I);
543 case TargetOpcode::G_IMPLICIT_DEF:
544 return selectOpUndef(ResVReg, ResType,
I);
545 case TargetOpcode::G_FREEZE:
546 return selectFreeze(ResVReg, ResType,
I);
548 case TargetOpcode::G_INTRINSIC:
549 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
550 case TargetOpcode::G_INTRINSIC_CONVERGENT:
551 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
552 return selectIntrinsic(ResVReg, ResType,
I);
553 case TargetOpcode::G_BITREVERSE:
554 return selectBitreverse(ResVReg, ResType,
I);
556 case TargetOpcode::G_BUILD_VECTOR:
557 return selectBuildVector(ResVReg, ResType,
I);
558 case TargetOpcode::G_SPLAT_VECTOR:
559 return selectSplatVector(ResVReg, ResType,
I);
561 case TargetOpcode::G_SHUFFLE_VECTOR: {
563 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
565 .
addUse(GR.getSPIRVTypeID(ResType))
566 .
addUse(
I.getOperand(1).getReg())
567 .
addUse(
I.getOperand(2).getReg());
568 for (
auto V :
I.getOperand(3).getShuffleMask())
572 case TargetOpcode::G_MEMMOVE:
573 case TargetOpcode::G_MEMCPY:
574 case TargetOpcode::G_MEMSET:
575 return selectMemOperation(ResVReg,
I);
577 case TargetOpcode::G_ICMP:
578 return selectICmp(ResVReg, ResType,
I);
579 case TargetOpcode::G_FCMP:
580 return selectFCmp(ResVReg, ResType,
I);
582 case TargetOpcode::G_FRAME_INDEX:
583 return selectFrameIndex(ResVReg, ResType,
I);
585 case TargetOpcode::G_LOAD:
586 return selectLoad(ResVReg, ResType,
I);
587 case TargetOpcode::G_STORE:
588 return selectStore(
I);
590 case TargetOpcode::G_BR:
591 return selectBranch(
I);
592 case TargetOpcode::G_BRCOND:
593 return selectBranchCond(
I);
595 case TargetOpcode::G_PHI:
596 return selectPhi(ResVReg, ResType,
I);
598 case TargetOpcode::G_FPTOSI:
599 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
600 case TargetOpcode::G_FPTOUI:
601 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
603 case TargetOpcode::G_SITOFP:
604 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
605 case TargetOpcode::G_UITOFP:
606 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
608 case TargetOpcode::G_CTPOP:
609 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
610 case TargetOpcode::G_SMIN:
611 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
612 case TargetOpcode::G_UMIN:
613 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
615 case TargetOpcode::G_SMAX:
616 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
617 case TargetOpcode::G_UMAX:
618 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
620 case TargetOpcode::G_SCMP:
621 return selectSUCmp(ResVReg, ResType,
I,
true);
622 case TargetOpcode::G_UCMP:
623 return selectSUCmp(ResVReg, ResType,
I,
false);
625 case TargetOpcode::G_STRICT_FMA:
626 case TargetOpcode::G_FMA:
627 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
629 case TargetOpcode::G_STRICT_FLDEXP:
630 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
632 case TargetOpcode::G_FPOW:
633 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
634 case TargetOpcode::G_FPOWI:
635 return selectExtInst(ResVReg, ResType,
I, CL::pown);
637 case TargetOpcode::G_FEXP:
638 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
639 case TargetOpcode::G_FEXP2:
640 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
642 case TargetOpcode::G_FLOG:
643 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
644 case TargetOpcode::G_FLOG2:
645 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
646 case TargetOpcode::G_FLOG10:
647 return selectLog10(ResVReg, ResType,
I);
649 case TargetOpcode::G_FABS:
650 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
651 case TargetOpcode::G_ABS:
652 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
654 case TargetOpcode::G_FMINNUM:
655 case TargetOpcode::G_FMINIMUM:
656 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
657 case TargetOpcode::G_FMAXNUM:
658 case TargetOpcode::G_FMAXIMUM:
659 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
661 case TargetOpcode::G_FCOPYSIGN:
662 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
664 case TargetOpcode::G_FCEIL:
665 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
666 case TargetOpcode::G_FFLOOR:
667 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
669 case TargetOpcode::G_FCOS:
670 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
671 case TargetOpcode::G_FSIN:
672 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
673 case TargetOpcode::G_FTAN:
674 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
675 case TargetOpcode::G_FACOS:
676 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
677 case TargetOpcode::G_FASIN:
678 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
679 case TargetOpcode::G_FATAN:
680 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
681 case TargetOpcode::G_FATAN2:
682 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
683 case TargetOpcode::G_FCOSH:
684 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
685 case TargetOpcode::G_FSINH:
686 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
687 case TargetOpcode::G_FTANH:
688 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
690 case TargetOpcode::G_STRICT_FSQRT:
691 case TargetOpcode::G_FSQRT:
692 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
694 case TargetOpcode::G_CTTZ:
695 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
696 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
697 case TargetOpcode::G_CTLZ:
698 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
699 return selectExtInst(ResVReg, ResType,
I, CL::clz);
701 case TargetOpcode::G_INTRINSIC_ROUND:
702 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
703 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
704 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
705 case TargetOpcode::G_INTRINSIC_TRUNC:
706 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
707 case TargetOpcode::G_FRINT:
708 case TargetOpcode::G_FNEARBYINT:
709 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
711 case TargetOpcode::G_SMULH:
712 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
713 case TargetOpcode::G_UMULH:
714 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
716 case TargetOpcode::G_SADDSAT:
717 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
718 case TargetOpcode::G_UADDSAT:
719 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
720 case TargetOpcode::G_SSUBSAT:
721 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
722 case TargetOpcode::G_USUBSAT:
723 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
725 case TargetOpcode::G_UADDO:
726 return selectOverflowArith(ResVReg, ResType,
I,
727 ResType->
getOpcode() == SPIRV::OpTypeVector
728 ? SPIRV::OpIAddCarryV
729 : SPIRV::OpIAddCarryS);
730 case TargetOpcode::G_USUBO:
731 return selectOverflowArith(ResVReg, ResType,
I,
732 ResType->
getOpcode() == SPIRV::OpTypeVector
733 ? SPIRV::OpISubBorrowV
734 : SPIRV::OpISubBorrowS);
735 case TargetOpcode::G_UMULO:
736 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
737 case TargetOpcode::G_SMULO:
738 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
740 case TargetOpcode::G_SEXT:
741 return selectExt(ResVReg, ResType,
I,
true);
742 case TargetOpcode::G_ANYEXT:
743 case TargetOpcode::G_ZEXT:
744 return selectExt(ResVReg, ResType,
I,
false);
745 case TargetOpcode::G_TRUNC:
746 return selectTrunc(ResVReg, ResType,
I);
747 case TargetOpcode::G_FPTRUNC:
748 case TargetOpcode::G_FPEXT:
749 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
751 case TargetOpcode::G_PTRTOINT:
752 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
753 case TargetOpcode::G_INTTOPTR:
754 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
755 case TargetOpcode::G_BITCAST:
756 return selectBitcast(ResVReg, ResType,
I);
757 case TargetOpcode::G_ADDRSPACE_CAST:
758 return selectAddrSpaceCast(ResVReg, ResType,
I);
759 case TargetOpcode::G_PTR_ADD: {
761 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
765 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
766 (*II).getOpcode() == TargetOpcode::COPY ||
767 (*II).getOpcode() == SPIRV::OpVariable) &&
770 bool IsGVInit =
false;
772 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
773 UseEnd =
MRI->use_instr_end();
774 UseIt != UseEnd; UseIt = std::next(UseIt)) {
775 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
776 (*UseIt).getOpcode() == SPIRV::OpVariable) {
783 SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
784 SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
785 SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
786 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
789 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
790 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
797 if (!GR.isBitcastCompatible(ResType, GVType))
799 "incompatible result and operand types in a bitcast");
800 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
802 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
808 TII.get(STI.isVulkanEnv()
809 ? SPIRV::OpInBoundsAccessChain
810 : SPIRV::OpInBoundsPtrAccessChain))
814 .
addUse(
I.getOperand(2).getReg())
817 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
819 .
addUse(GR.getSPIRVTypeID(ResType))
821 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
823 .
addUse(
I.getOperand(2).getReg())
830 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
831 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
833 .
addUse(GR.getSPIRVTypeID(ResType))
835 SPIRV::Opcode::InBoundsPtrAccessChain))
838 .
addUse(
I.getOperand(2).getReg());
842 case TargetOpcode::G_ATOMICRMW_OR:
843 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
844 case TargetOpcode::G_ATOMICRMW_ADD:
845 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
846 case TargetOpcode::G_ATOMICRMW_AND:
847 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
848 case TargetOpcode::G_ATOMICRMW_MAX:
849 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
850 case TargetOpcode::G_ATOMICRMW_MIN:
851 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
852 case TargetOpcode::G_ATOMICRMW_SUB:
853 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
854 case TargetOpcode::G_ATOMICRMW_XOR:
855 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
856 case TargetOpcode::G_ATOMICRMW_UMAX:
857 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
858 case TargetOpcode::G_ATOMICRMW_UMIN:
859 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
860 case TargetOpcode::G_ATOMICRMW_XCHG:
861 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
862 case TargetOpcode::G_ATOMIC_CMPXCHG:
863 return selectAtomicCmpXchg(ResVReg, ResType,
I);
865 case TargetOpcode::G_ATOMICRMW_FADD:
866 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
867 case TargetOpcode::G_ATOMICRMW_FSUB:
869 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
871 case TargetOpcode::G_ATOMICRMW_FMIN:
872 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
873 case TargetOpcode::G_ATOMICRMW_FMAX:
874 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
876 case TargetOpcode::G_FENCE:
877 return selectFence(
I);
879 case TargetOpcode::G_STACKSAVE:
880 return selectStackSave(ResVReg, ResType,
I);
881 case TargetOpcode::G_STACKRESTORE:
882 return selectStackRestore(
I);
884 case TargetOpcode::G_UNMERGE_VALUES:
890 case TargetOpcode::G_TRAP:
891 case TargetOpcode::G_DEBUGTRAP:
892 case TargetOpcode::G_UBSANTRAP:
893 case TargetOpcode::DBG_LABEL:
901bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
904 GL::GLSLExtInst GLInst)
const {
905 return selectExtInst(ResVReg, ResType,
I,
906 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
909bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
912 CL::OpenCLExtInst CLInst)
const {
913 return selectExtInst(ResVReg, ResType,
I,
914 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
917bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
920 CL::OpenCLExtInst CLInst,
921 GL::GLSLExtInst GLInst)
const {
922 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
923 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
924 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
927bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
932 for (
const auto &Ex : Insts) {
933 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
935 if (STI.canUseExtInstSet(Set)) {
937 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
939 .
addUse(GR.getSPIRVTypeID(ResType))
942 const unsigned NumOps =
I.getNumOperands();
944 if (Index < NumOps &&
945 I.getOperand(Index).getType() ==
946 MachineOperand::MachineOperandType::MO_IntrinsicID)
949 MIB.
add(
I.getOperand(Index));
956bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
959 std::vector<Register> Srcs,
960 unsigned Opcode)
const {
961 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
963 .
addUse(GR.getSPIRVTypeID(ResType));
970bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
973 unsigned Opcode)
const {
974 if (STI.isOpenCLEnv() &&
I.getOperand(1).isReg()) {
975 Register SrcReg =
I.getOperand(1).getReg();
978 MRI->def_instr_begin(SrcReg);
979 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
980 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
988 case SPIRV::OpConvertPtrToU:
989 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
991 case SPIRV::OpConvertUToPtr:
992 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
996 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
997 TII.get(SPIRV::OpSpecConstantOp))
999 .
addUse(GR.getSPIRVTypeID(ResType))
1005 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).
getReg()},
1009bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1012 Register OpReg =
I.getOperand(1).getReg();
1013 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
1014 if (!GR.isBitcastCompatible(ResType, OpType))
1016 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1022 if (
MemOp->isVolatile())
1023 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1024 if (
MemOp->isNonTemporal())
1025 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1026 if (
MemOp->getAlign().value())
1027 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1029 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1031 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1038 if (Flags & MachineMemOperand::Flags::MOVolatile)
1039 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1040 if (Flags & MachineMemOperand::Flags::MONonTemporal)
1041 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1043 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1047bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1050 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1054 auto *IntPtrDef = dyn_cast<GIntrinsic>(PtrDef);
1056 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1057 Register ImageReg = IntPtrDef->getOperand(2).getReg();
1059 MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
1060 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
1061 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
1066 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1067 return generateImageRead(ResVReg, ResType, NewImageReg, IdxReg,
1068 I.getDebugLoc(),
I);
1071 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1073 .
addUse(GR.getSPIRVTypeID(ResType))
1075 if (!
I.getNumMemOperands()) {
1076 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1078 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1086bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
1087 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1088 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1092 auto *IntPtrDef = dyn_cast<GIntrinsic>(PtrDef);
1094 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1095 Register ImageReg = IntPtrDef->getOperand(2).getReg();
1097 MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
1098 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
1099 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
1104 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1105 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1106 TII.get(SPIRV::OpImageWrite))
1114 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1117 if (!
I.getNumMemOperands()) {
1118 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1120 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1128bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1131 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1133 "llvm.stacksave intrinsic: this instruction requires the following "
1134 "SPIR-V extension: SPV_INTEL_variable_length_array",
1137 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1139 .
addUse(GR.getSPIRVTypeID(ResType))
1143bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
1144 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1146 "llvm.stackrestore intrinsic: this instruction requires the following "
1147 "SPIR-V extension: SPV_INTEL_variable_length_array",
1149 if (!
I.getOperand(0).isReg())
1152 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1153 .
addUse(
I.getOperand(0).getReg())
1157bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1160 Register SrcReg =
I.getOperand(1).getReg();
1162 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1163 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1166 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1167 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
1169 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
1170 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1180 GR.add(GV, GR.CurMF, VarReg);
1181 GR.addGlobalObject(GV, GR.CurMF, VarReg);
1184 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1186 .
addUse(GR.getSPIRVTypeID(VarTy))
1187 .
addImm(SPIRV::StorageClass::UniformConstant)
1191 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
1192 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1194 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1196 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1197 .
addUse(
I.getOperand(0).getReg())
1199 .
addUse(
I.getOperand(2).getReg());
1200 if (
I.getNumMemOperands())
1208bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1212 unsigned NegateOpcode)
const {
1217 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1218 auto ScopeConstant = buildI32Constant(Scope,
I);
1219 Register ScopeReg = ScopeConstant.first;
1220 Result &= ScopeConstant.second;
1228 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1229 Register MemSemReg = MemSemConstant.first;
1230 Result &= MemSemConstant.second;
1232 Register ValueReg =
I.getOperand(2).getReg();
1233 if (NegateOpcode != 0) {
1235 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1236 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1241 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1243 .
addUse(GR.getSPIRVTypeID(ResType))
1251bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
1252 unsigned ArgI =
I.getNumOperands() - 1;
1254 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1256 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
1257 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1259 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1265 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1266 Register ResVReg =
I.getOperand(i).getReg();
1267 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1270 ResType = ScalarType;
1271 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1272 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1273 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1276 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1278 .
addUse(GR.getSPIRVTypeID(ResType))
1280 .
addImm(
static_cast<int64_t
>(i));
1286bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
1289 auto MemSemConstant = buildI32Constant(MemSem,
I);
1290 Register MemSemReg = MemSemConstant.first;
1291 bool Result = MemSemConstant.second;
1294 getMemScope(GR.CurMF->getFunction().getContext(), Ord));
1295 auto ScopeConstant = buildI32Constant(Scope,
I);
1296 Register ScopeReg = ScopeConstant.first;
1297 Result &= ScopeConstant.second;
1300 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1306bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1309 unsigned Opcode)
const {
1310 Type *ResTy =
nullptr;
1312 if (!GR.findValueAttrs(&
I, ResTy, ResName))
1314 "Not enough info to select the arithmetic with overflow instruction");
1317 "with overflow instruction");
1320 Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
1325 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1326 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1328 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1330 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
1331 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1332 Register ZeroReg = buildZerosVal(ResType,
I);
1335 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1337 if (ResName.
size() > 0)
1342 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1345 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1346 MIB.
addUse(
I.getOperand(i).getReg());
1351 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1352 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1354 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1355 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1356 .
addUse(GR.getSPIRVTypeID(ResType))
1363 .
addDef(
I.getOperand(1).getReg())
1370bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1378 if (!isa<GIntrinsic>(
I)) {
1382 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1383 auto ScopeConstant = buildI32Constant(Scope,
I);
1384 ScopeReg = ScopeConstant.first;
1385 Result &= ScopeConstant.second;
1387 unsigned ScSem =
static_cast<uint32_t>(
1391 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1392 MemSemEqReg = MemSemEqConstant.first;
1393 Result &= MemSemEqConstant.second;
1396 if (MemSemEq == MemSemNeq)
1397 MemSemNeqReg = MemSemEqReg;
1399 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1400 MemSemNeqReg = MemSemNeqConstant.first;
1401 Result &= MemSemNeqConstant.second;
1404 ScopeReg =
I.getOperand(5).getReg();
1405 MemSemEqReg =
I.getOperand(6).getReg();
1406 MemSemNeqReg =
I.getOperand(7).getReg();
1410 Register Val =
I.getOperand(4).getReg();
1411 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1412 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1415 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1417 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1425 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1429 .
addUse(GR.getSPIRVTypeID(BoolTy))
1433 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1436 .
addUse(GR.getSPIRVTypeID(ResType))
1438 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1442 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1444 .
addUse(GR.getSPIRVTypeID(ResType))
1453 case SPIRV::StorageClass::Workgroup:
1454 case SPIRV::StorageClass::CrossWorkgroup:
1455 case SPIRV::StorageClass::Function:
1464 case SPIRV::StorageClass::DeviceOnlyINTEL:
1465 case SPIRV::StorageClass::HostOnlyINTEL:
1474 bool IsGRef =
false;
1475 bool IsAllowedRefs =
1476 std::all_of(
MRI->use_instr_begin(ResVReg),
MRI->use_instr_end(),
1477 [&IsGRef](
auto const &It) {
1478 unsigned Opcode = It.getOpcode();
1479 if (Opcode == SPIRV::OpConstantComposite ||
1480 Opcode == SPIRV::OpVariable ||
1481 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1482 return IsGRef = true;
1483 return Opcode == SPIRV::OpName;
1485 return IsAllowedRefs && IsGRef;
1488Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1489 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1490 return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
1491 GR.getOrCreateSPIRVIntegerType(8,
I,
TII),
I,
TII, SC));
1498 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1499 TII.get(SPIRV::OpSpecConstantOp))
1509 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1510 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1511 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1513 SPIRV::StorageClass::Generic),
1514 GR.getPointerSize()));
1516 GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
1518 I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
1519 static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
1529bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1535 Register SrcPtr =
I.getOperand(1).getReg();
1536 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1539 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1540 ResType->
getOpcode() != SPIRV::OpTypePointer)
1541 return BuildCOPY(ResVReg, SrcPtr,
I);
1543 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
1544 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);
1551 unsigned SpecOpcode =
1553 ?
static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
1554 : (SrcSC == SPIRV::StorageClass::Generic &&
1556 ?
static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
1563 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1564 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1565 .constrainAllUses(
TII,
TRI, RBI);
1569 buildSpecConstantOp(
1571 getUcharPtrTypeReg(
I, DstSC),
1572 static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
1573 .constrainAllUses(
TII,
TRI, RBI);
1579 return BuildCOPY(ResVReg, SrcPtr,
I);
1581 if ((SrcSC == SPIRV::StorageClass::Function &&
1582 DstSC == SPIRV::StorageClass::Private) ||
1583 (DstSC == SPIRV::StorageClass::Function &&
1584 SrcSC == SPIRV::StorageClass::Private))
1585 return BuildCOPY(ResVReg, SrcPtr,
I);
1589 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1592 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1595 Register Tmp =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1596 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1597 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1600 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1605 .
addUse(GR.getSPIRVTypeID(ResType))
1613 return selectUnOp(ResVReg, ResType,
I,
1614 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1616 return selectUnOp(ResVReg, ResType,
I,
1617 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1619 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1621 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1631 return SPIRV::OpFOrdEqual;
1633 return SPIRV::OpFOrdGreaterThanEqual;
1635 return SPIRV::OpFOrdGreaterThan;
1637 return SPIRV::OpFOrdLessThanEqual;
1639 return SPIRV::OpFOrdLessThan;
1641 return SPIRV::OpFOrdNotEqual;
1643 return SPIRV::OpOrdered;
1645 return SPIRV::OpFUnordEqual;
1647 return SPIRV::OpFUnordGreaterThanEqual;
1649 return SPIRV::OpFUnordGreaterThan;
1651 return SPIRV::OpFUnordLessThanEqual;
1653 return SPIRV::OpFUnordLessThan;
1655 return SPIRV::OpFUnordNotEqual;
1657 return SPIRV::OpUnordered;
1667 return SPIRV::OpIEqual;
1669 return SPIRV::OpINotEqual;
1671 return SPIRV::OpSGreaterThanEqual;
1673 return SPIRV::OpSGreaterThan;
1675 return SPIRV::OpSLessThanEqual;
1677 return SPIRV::OpSLessThan;
1679 return SPIRV::OpUGreaterThanEqual;
1681 return SPIRV::OpUGreaterThan;
1683 return SPIRV::OpULessThanEqual;
1685 return SPIRV::OpULessThan;
1694 return SPIRV::OpPtrEqual;
1696 return SPIRV::OpPtrNotEqual;
1707 return SPIRV::OpLogicalEqual;
1709 return SPIRV::OpLogicalNotEqual;
1743bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1746 unsigned OpAnyOrAll)
const {
1747 assert(
I.getNumOperands() == 3);
1748 assert(
I.getOperand(2).isReg());
1750 Register InputRegister =
I.getOperand(2).getReg();
1751 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1756 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1757 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1758 if (IsBoolTy && !IsVectorTy) {
1759 assert(ResVReg ==
I.getOperand(0).getReg());
1760 return BuildCOPY(ResVReg, InputRegister,
I);
1763 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1764 unsigned SpirvNotEqualId =
1765 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1766 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1771 NotEqualReg = IsBoolTy ? InputRegister
1772 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1774 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1780 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1784 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1795 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1800bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1803 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1806bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1809 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1813bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
1816 assert(
I.getNumOperands() == 4);
1817 assert(
I.getOperand(2).isReg());
1818 assert(
I.getOperand(3).isReg());
1821 GR.getSPIRVTypeForVReg(
I.getOperand(2).getReg());
1824 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1825 "dot product requires a vector of at least 2 components");
1828 GR.getSPIRVTypeForVReg(
VecType->getOperand(1).getReg());
1833 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
1835 .
addUse(GR.getSPIRVTypeID(ResType))
1836 .
addUse(
I.getOperand(2).getReg())
1837 .
addUse(
I.getOperand(3).getReg())
1841bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
1845 assert(
I.getNumOperands() == 4);
1846 assert(
I.getOperand(2).isReg());
1847 assert(
I.getOperand(3).isReg());
1850 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1853 .
addUse(GR.getSPIRVTypeID(ResType))
1854 .
addUse(
I.getOperand(2).getReg())
1855 .
addUse(
I.getOperand(3).getReg())
1861bool SPIRVInstructionSelector::selectIntegerDotExpansion(
1863 assert(
I.getNumOperands() == 4);
1864 assert(
I.getOperand(2).isReg());
1865 assert(
I.getOperand(3).isReg());
1869 Register Vec0 =
I.getOperand(2).getReg();
1870 Register Vec1 =
I.getOperand(3).getReg();
1871 Register TmpVec =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1876 .
addUse(GR.getSPIRVTypeID(VecType))
1882 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1883 "dot product requires a vector of at least 2 components");
1885 Register Res =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1888 .
addUse(GR.getSPIRVTypeID(ResType))
1893 for (
unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
1894 Register Elt =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1897 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1899 .
addUse(GR.getSPIRVTypeID(ResType))
1904 Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
1905 ?
MRI->createVirtualRegister(GR.getRegClass(ResType))
1910 .
addUse(GR.getSPIRVTypeID(ResType))
1920template <
bool Signed>
1921bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
1924 assert(
I.getNumOperands() == 5);
1925 assert(
I.getOperand(2).isReg());
1926 assert(
I.getOperand(3).isReg());
1927 assert(
I.getOperand(4).isReg());
1930 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1931 Register Dot =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1934 .
addUse(GR.getSPIRVTypeID(ResType))
1935 .
addUse(
I.getOperand(2).getReg())
1936 .
addUse(
I.getOperand(3).getReg())
1941 .
addUse(GR.getSPIRVTypeID(ResType))
1943 .
addUse(
I.getOperand(4).getReg())
1950template <
bool Signed>
1951bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
1953 assert(
I.getNumOperands() == 5);
1954 assert(
I.getOperand(2).isReg());
1955 assert(
I.getOperand(3).isReg());
1956 assert(
I.getOperand(4).isReg());
1962 Register Acc =
I.getOperand(4).getReg();
1963 SPIRVType *EltType = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1965 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
1968 for (
unsigned i = 0; i < 4; i++) {
1970 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1973 .
addUse(GR.getSPIRVTypeID(ResType))
1974 .
addUse(
I.getOperand(2).getReg())
1975 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1976 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1980 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1983 .
addUse(GR.getSPIRVTypeID(ResType))
1984 .
addUse(
I.getOperand(3).getReg())
1985 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1986 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1993 .
addUse(GR.getSPIRVTypeID(ResType))
1999 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2002 .
addUse(GR.getSPIRVTypeID(ResType))
2004 .
addUse(GR.getOrCreateConstInt(0,
I, EltType,
TII))
2005 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
2010 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2013 .
addUse(GR.getSPIRVTypeID(ResType))
2026bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2029 assert(
I.getNumOperands() == 3);
2030 assert(
I.getOperand(2).isReg());
2032 Register VZero = buildZerosValF(ResType,
I);
2033 Register VOne = buildOnesValF(ResType,
I);
2035 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2037 .
addUse(GR.getSPIRVTypeID(ResType))
2038 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2040 .
addUse(
I.getOperand(2).getReg())
2046bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2049 assert(
I.getNumOperands() == 3);
2050 assert(
I.getOperand(2).isReg());
2052 Register InputRegister =
I.getOperand(2).getReg();
2053 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2054 auto &
DL =
I.getDebugLoc();
2059 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2061 unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
2062 unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);
2064 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2066 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2068 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2074 .
addUse(GR.getSPIRVTypeID(InputType))
2075 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2080 if (NeedsConversion) {
2081 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2084 .
addUse(GR.getSPIRVTypeID(ResType))
2092bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2095 unsigned Opcode)
const {
2097 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2099 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2101 .
addUse(GR.getSPIRVTypeID(ResType))
2102 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I,
2105 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2106 BMI.
addUse(
I.getOperand(J).getReg());
2112bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2115 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2116 SPIRVType *BallotType = GR.getOrCreateSPIRVVectorType(IntTy, 4,
I,
TII);
2117 Register BallotReg =
MRI->createVirtualRegister(GR.getRegClass(BallotType));
2118 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2119 SPIRV::OpGroupNonUniformBallot);
2124 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2126 .
addUse(GR.getSPIRVTypeID(ResType))
2127 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII))
2128 .
addImm(SPIRV::GroupOperation::Reduce)
2135bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2138 assert(
I.getNumOperands() == 3);
2139 assert(
I.getOperand(2).isReg());
2141 Register InputRegister =
I.getOperand(2).getReg();
2142 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2147 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2149 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2151 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2152 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2154 .
addUse(GR.getSPIRVTypeID(ResType))
2155 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII))
2156 .
addImm(SPIRV::GroupOperation::Reduce)
2157 .
addUse(
I.getOperand(2).getReg());
2160bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2164 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2166 .
addUse(GR.getSPIRVTypeID(ResType))
2167 .
addUse(
I.getOperand(1).getReg())
2171bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2179 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2181 Register OpReg =
I.getOperand(1).getReg();
2184 switch (
Def->getOpcode()) {
2185 case SPIRV::ASSIGN_TYPE:
2187 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2188 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2189 Reg =
Def->getOperand(2).getReg();
2192 case SPIRV::OpUndef:
2193 Reg =
Def->getOperand(1).getReg();
2196 unsigned DestOpCode;
2197 if (
Reg.isValid()) {
2198 DestOpCode = SPIRV::OpConstantNull;
2200 DestOpCode = TargetOpcode::COPY;
2203 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2204 .
addDef(
I.getOperand(0).getReg())
2217 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2222 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
2231 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2243 case TargetOpcode::G_CONSTANT:
2244 case TargetOpcode::G_FCONSTANT:
2246 case TargetOpcode::G_INTRINSIC:
2247 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2248 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2249 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
2250 Intrinsic::spv_const_composite;
2251 case TargetOpcode::G_BUILD_VECTOR:
2252 case TargetOpcode::G_SPLAT_VECTOR: {
2276bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2280 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2281 N = GR.getScalarOrVectorComponentCount(ResType);
2282 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2286 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2291 for (
unsigned i =
I.getNumExplicitDefs();
2292 i <
I.getNumExplicitOperands() && IsConst; ++i)
2296 if (!IsConst &&
N < 2)
2298 "There must be at least two constituent operands in a vector");
2300 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2301 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2302 TII.get(IsConst ? SPIRV::OpConstantComposite
2303 : SPIRV::OpCompositeConstruct))
2305 .
addUse(GR.getSPIRVTypeID(ResType));
2306 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2307 MIB.
addUse(
I.getOperand(i).getReg());
2311bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2315 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2316 N = GR.getScalarOrVectorComponentCount(ResType);
2317 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2322 unsigned OpIdx =
I.getNumExplicitDefs();
2323 if (!
I.getOperand(OpIdx).isReg())
2327 Register OpReg =
I.getOperand(OpIdx).getReg();
2330 if (!IsConst &&
N < 2)
2332 "There must be at least two constituent operands in a vector");
2334 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2335 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2336 TII.get(IsConst ? SPIRV::OpConstantComposite
2337 : SPIRV::OpCompositeConstruct))
2339 .
addUse(GR.getSPIRVTypeID(ResType));
2340 for (
unsigned i = 0; i <
N; ++i)
2345bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2351 if (STI.canUseExtension(
2352 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2354 Opcode = SPIRV::OpDemoteToHelperInvocation;
2356 Opcode = SPIRV::OpKill;
2359 GR.invalidateMachineInstr(NextI);
2360 NextI->removeFromParent();
2365 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2369bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2373 Register Cmp0 =
I.getOperand(2).getReg();
2374 Register Cmp1 =
I.getOperand(3).getReg();
2375 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
2376 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
2377 "CMP operands should have the same type");
2378 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2380 .
addUse(GR.getSPIRVTypeID(ResType))
2386bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2389 auto Pred =
I.getOperand(1).getPredicate();
2392 Register CmpOperand =
I.getOperand(2).getReg();
2393 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
2395 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
2399 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2405 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
2406 "Expected G_FCONSTANT");
2407 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
2414 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2415 "Expected G_CONSTANT");
2416 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
2419std::pair<Register, bool>
2424 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2426 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2427 Register NewReg = GR.find(ConstInt, GR.CurMF);
2431 GR.add(ConstInt, GR.CurMF, NewReg);
2435 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2437 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
2439 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2441 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
2449bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2453 return selectCmp(ResVReg, ResType, CmpOp,
I);
2459 bool ZeroAsNull = STI.isOpenCLEnv();
2460 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2461 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
2462 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
2468 bool ZeroAsNull = STI.isOpenCLEnv();
2470 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2471 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
2472 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
2478 bool ZeroAsNull = STI.isOpenCLEnv();
2480 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2481 return GR.getOrCreateConstVector(VOne,
I, ResType,
TII, ZeroAsNull);
2482 return GR.getOrCreateConstFP(VOne,
I, ResType,
TII, ZeroAsNull);
2488 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2491 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2496bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2499 bool IsSigned)
const {
2501 Register ZeroReg = buildZerosVal(ResType,
I);
2502 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2504 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2506 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2507 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2509 .
addUse(GR.getSPIRVTypeID(ResType))
2510 .
addUse(
I.getOperand(1).getReg())
2516bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2519 unsigned Opcode)
const {
2520 Register SrcReg =
I.getOperand(1).getReg();
2523 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
2524 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2526 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2528 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
2530 SrcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2531 selectSelect(SrcReg, TmpType,
I,
false);
2533 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2536bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2539 Register SrcReg =
I.getOperand(1).getReg();
2540 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
2541 return selectSelect(ResVReg, ResType,
I, IsSigned);
2543 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
2544 if (SrcType == ResType)
2545 return BuildCOPY(ResVReg, SrcReg,
I);
2547 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2548 return selectUnOp(ResVReg, ResType,
I, Opcode);
2551bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2554 bool IsSigned)
const {
2560 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
2562 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
2563 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
2567 Register IsLessEqReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2569 GR.assignSPIRVTypeToVReg(ResType, IsLessEqReg, MIRBuilder.getMF());
2571 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2572 : SPIRV::OpULessThanEqual))
2575 .
addUse(
I.getOperand(1).getReg())
2576 .
addUse(
I.getOperand(2).getReg())
2578 Register IsLessReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2580 GR.assignSPIRVTypeToVReg(ResType, IsLessReg, MIRBuilder.getMF());
2582 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2585 .
addUse(
I.getOperand(1).getReg())
2586 .
addUse(
I.getOperand(2).getReg())
2589 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
2591 MRI->createVirtualRegister(GR.getRegClass(ResType));
2593 GR.assignSPIRVTypeToVReg(ResType, NegOneOrZeroReg, MIRBuilder.getMF());
2594 unsigned SelectOpcode =
2595 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2600 .
addUse(buildOnesVal(
true, ResType,
I))
2601 .
addUse(buildZerosVal(ResType,
I))
2608 .
addUse(buildOnesVal(
false, ResType,
I))
2612bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2618 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2619 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2620 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2622 Register One = buildOnesVal(
false, IntTy,
I);
2626 .
addUse(GR.getSPIRVTypeID(IntTy))
2632 .
addUse(GR.getSPIRVTypeID(BoolTy))
2638bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2641 Register IntReg =
I.getOperand(1).getReg();
2642 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
2643 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
2644 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2645 if (ArgType == ResType)
2646 return BuildCOPY(ResVReg, IntReg,
I);
2647 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
2648 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2649 return selectUnOp(ResVReg, ResType,
I, Opcode);
2652bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2656 unsigned TyOpcode = ResType->
getOpcode();
2657 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
2659 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
2661 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2663 .
addUse(GR.getSPIRVTypeID(ResType))
2665 if (TyOpcode == SPIRV::OpTypeInt) {
2666 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
2668 return Reg == ResVReg ?
true : BuildCOPY(ResVReg, Reg,
I);
2670 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2672 .
addUse(GR.getSPIRVTypeID(ResType));
2679bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2682 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2684 .
addUse(GR.getSPIRVTypeID(ResType))
2691 if (TypeInst->
getOpcode() == SPIRV::ASSIGN_TYPE) {
2694 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
2696 return TypeInst->
getOpcode() == SPIRV::OpConstantI;
2701 if (TypeInst->
getOpcode() == SPIRV::OpConstantI)
2708bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2712 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2714 .
addUse(GR.getSPIRVTypeID(ResType))
2716 .
addUse(
I.getOperand(3).getReg())
2718 .
addUse(
I.getOperand(2).getReg());
2719 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2724bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2728 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2730 .
addUse(GR.getSPIRVTypeID(ResType))
2731 .
addUse(
I.getOperand(2).getReg());
2732 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2737bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2741 return selectInsertVal(ResVReg, ResType,
I);
2743 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
2745 .
addUse(GR.getSPIRVTypeID(ResType))
2746 .
addUse(
I.getOperand(2).getReg())
2747 .
addUse(
I.getOperand(3).getReg())
2748 .
addUse(
I.getOperand(4).getReg())
2752bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
2756 return selectExtractVal(ResVReg, ResType,
I);
2758 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
2760 .
addUse(GR.getSPIRVTypeID(ResType))
2761 .
addUse(
I.getOperand(2).getReg())
2762 .
addUse(
I.getOperand(3).getReg())
2766bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
2769 const bool IsGEPInBounds =
I.getOperand(2).getImm();
2774 const unsigned Opcode = STI.isVulkanEnv()
2775 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2776 : SPIRV::OpAccessChain)
2777 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2778 : SPIRV::OpPtrAccessChain);
2780 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2782 .
addUse(GR.getSPIRVTypeID(ResType))
2784 .
addUse(
I.getOperand(3).getReg());
2786 const unsigned StartingIndex =
2787 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2790 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
2791 Res.addUse(
I.getOperand(i).getReg());
2792 return Res.constrainAllUses(
TII,
TRI, RBI);
2796bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2799 unsigned Lim =
I.getNumExplicitOperands();
2800 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2801 Register OpReg =
I.getOperand(i).getReg();
2803 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2805 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
2806 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2807 GR.isAggregateType(OpType)) {
2814 Register WrapReg = GR.find(OpDefine, MF);
2820 WrapReg =
MRI->createVirtualRegister(GR.getRegClass(OpType));
2821 GR.add(OpDefine, MF, WrapReg);
2825 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2829 .
addUse(GR.getSPIRVTypeID(OpType))
2839bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
2845 case Intrinsic::spv_load:
2846 return selectLoad(ResVReg, ResType,
I);
2847 case Intrinsic::spv_store:
2848 return selectStore(
I);
2849 case Intrinsic::spv_extractv:
2850 return selectExtractVal(ResVReg, ResType,
I);
2851 case Intrinsic::spv_insertv:
2852 return selectInsertVal(ResVReg, ResType,
I);
2853 case Intrinsic::spv_extractelt:
2854 return selectExtractElt(ResVReg, ResType,
I);
2855 case Intrinsic::spv_insertelt:
2856 return selectInsertElt(ResVReg, ResType,
I);
2857 case Intrinsic::spv_gep:
2858 return selectGEP(ResVReg, ResType,
I);
2859 case Intrinsic::spv_unref_global:
2860 case Intrinsic::spv_init_global: {
2863 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
2866 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
2868 case Intrinsic::spv_undef: {
2869 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2871 .
addUse(GR.getSPIRVTypeID(ResType));
2874 case Intrinsic::spv_const_composite: {
2876 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
2878 unsigned Opcode = SPIRV::OpConstantNull;
2881 Opcode = SPIRV::OpConstantComposite;
2882 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
2885 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2886 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2888 .
addUse(GR.getSPIRVTypeID(ResType));
2891 for (
Register OpReg : CompositeArgs)
2896 case Intrinsic::spv_assign_name: {
2897 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
2898 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
2899 for (
unsigned i =
I.getNumExplicitDefs() + 2;
2900 i <
I.getNumExplicitOperands(); ++i) {
2901 MIB.
addImm(
I.getOperand(i).getImm());
2905 case Intrinsic::spv_switch: {
2906 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
2907 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2908 if (
I.getOperand(i).isReg())
2909 MIB.
addReg(
I.getOperand(i).getReg());
2910 else if (
I.getOperand(i).isCImm())
2911 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
2912 else if (
I.getOperand(i).isMBB())
2913 MIB.
addMBB(
I.getOperand(i).getMBB());
2919 case Intrinsic::spv_loop_merge: {
2920 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
2921 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2922 assert(
I.getOperand(i).isMBB());
2923 MIB.
addMBB(
I.getOperand(i).getMBB());
2925 MIB.
addImm(SPIRV::SelectionControl::None);
2928 case Intrinsic::spv_selection_merge: {
2930 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
2931 assert(
I.getOperand(1).isMBB() &&
2932 "operand 1 to spv_selection_merge must be a basic block");
2933 MIB.
addMBB(
I.getOperand(1).getMBB());
2934 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
2937 case Intrinsic::spv_cmpxchg:
2938 return selectAtomicCmpXchg(ResVReg, ResType,
I);
2939 case Intrinsic::spv_unreachable:
2940 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
2942 case Intrinsic::spv_alloca:
2943 return selectFrameIndex(ResVReg, ResType,
I);
2944 case Intrinsic::spv_alloca_array:
2945 return selectAllocaArray(ResVReg, ResType,
I);
2946 case Intrinsic::spv_assume:
2947 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2948 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
2949 .
addUse(
I.getOperand(1).getReg())
2952 case Intrinsic::spv_expect:
2953 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2954 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
2956 .
addUse(GR.getSPIRVTypeID(ResType))
2957 .
addUse(
I.getOperand(2).getReg())
2958 .
addUse(
I.getOperand(3).getReg())
2961 case Intrinsic::arithmetic_fence:
2962 if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
2964 TII.get(SPIRV::OpArithmeticFenceEXT))
2966 .
addUse(GR.getSPIRVTypeID(ResType))
2967 .
addUse(
I.getOperand(2).getReg())
2970 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
2972 case Intrinsic::spv_thread_id:
2978 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
2980 case Intrinsic::spv_thread_id_in_group:
2986 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
2988 case Intrinsic::spv_group_id:
2994 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
2996 case Intrinsic::spv_fdot:
2997 return selectFloatDot(ResVReg, ResType,
I);
2998 case Intrinsic::spv_udot:
2999 case Intrinsic::spv_sdot:
3000 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3002 return selectIntegerDot(ResVReg, ResType,
I,
3003 IID == Intrinsic::spv_sdot);
3004 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3005 case Intrinsic::spv_dot4add_i8packed:
3006 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3008 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3009 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3010 case Intrinsic::spv_dot4add_u8packed:
3011 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3013 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3014 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3015 case Intrinsic::spv_all:
3016 return selectAll(ResVReg, ResType,
I);
3017 case Intrinsic::spv_any:
3018 return selectAny(ResVReg, ResType,
I);
3019 case Intrinsic::spv_cross:
3020 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3021 case Intrinsic::spv_distance:
3022 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3023 case Intrinsic::spv_lerp:
3024 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3025 case Intrinsic::spv_length:
3026 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3027 case Intrinsic::spv_degrees:
3028 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3029 case Intrinsic::spv_frac:
3030 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3031 case Intrinsic::spv_normalize:
3032 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3033 case Intrinsic::spv_rsqrt:
3034 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3035 case Intrinsic::spv_sign:
3036 return selectSign(ResVReg, ResType,
I);
3037 case Intrinsic::spv_firstbituhigh:
3038 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3039 case Intrinsic::spv_firstbitshigh:
3040 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3041 case Intrinsic::spv_firstbitlow:
3042 return selectFirstBitLow(ResVReg, ResType,
I);
3043 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3045 auto MemSemConstant =
3046 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3047 Register MemSemReg = MemSemConstant.first;
3048 Result &= MemSemConstant.second;
3049 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3050 Register ScopeReg = ScopeConstant.first;
3051 Result &= ScopeConstant.second;
3054 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3060 case Intrinsic::spv_lifetime_start:
3061 case Intrinsic::spv_lifetime_end: {
3062 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3063 : SPIRV::OpLifetimeStop;
3064 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3065 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3073 case Intrinsic::spv_saturate:
3074 return selectSaturate(ResVReg, ResType,
I);
3075 case Intrinsic::spv_nclamp:
3076 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3077 case Intrinsic::spv_uclamp:
3078 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3079 case Intrinsic::spv_sclamp:
3080 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3081 case Intrinsic::spv_wave_active_countbits:
3082 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3083 case Intrinsic::spv_wave_all:
3084 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3085 case Intrinsic::spv_wave_any:
3086 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3087 case Intrinsic::spv_wave_is_first_lane:
3088 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3089 case Intrinsic::spv_wave_reduce_sum:
3090 return selectWaveReduceSum(ResVReg, ResType,
I);
3091 case Intrinsic::spv_wave_readlane:
3092 return selectWaveOpInst(ResVReg, ResType,
I,
3093 SPIRV::OpGroupNonUniformShuffle);
3094 case Intrinsic::spv_step:
3095 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3096 case Intrinsic::spv_radians:
3097 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3101 case Intrinsic::instrprof_increment:
3102 case Intrinsic::instrprof_increment_step:
3103 case Intrinsic::instrprof_value_profile:
3106 case Intrinsic::spv_value_md:
3108 case Intrinsic::spv_resource_handlefrombinding: {
3109 return selectHandleFromBinding(ResVReg, ResType,
I);
3111 case Intrinsic::spv_resource_store_typedbuffer: {
3112 return selectImageWriteIntrinsic(
I);
3114 case Intrinsic::spv_resource_load_typedbuffer: {
3115 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3117 case Intrinsic::spv_resource_getpointer: {
3118 return selectResourceGetPointer(ResVReg, ResType,
I);
3120 case Intrinsic::spv_discard: {
3121 return selectDiscard(ResVReg, ResType,
I);
3124 std::string DiagMsg;
3127 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3134bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3140bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3149 Register ImageReg =
I.getOperand(2).getReg();
3150 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
3151 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3152 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
3157 Register IdxReg =
I.getOperand(3).getReg();
3161 return generateImageRead(ResVReg, ResType, NewImageReg, IdxReg, Loc, Pos);
3164bool SPIRVInstructionSelector::generateImageRead(
Register &ResVReg,
3169 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3170 if (ResultSize == 4) {
3173 .
addUse(GR.getSPIRVTypeID(ResType))
3179 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
3180 Register ReadReg =
MRI->createVirtualRegister(GR.getRegClass(ReadType));
3184 .
addUse(GR.getSPIRVTypeID(ReadType))
3191 if (ResultSize == 1) {
3193 TII.get(SPIRV::OpCompositeExtract))
3195 .
addUse(GR.getSPIRVTypeID(ResType))
3200 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
3203bool SPIRVInstructionSelector::selectResourceGetPointer(
3208 Register ResourcePtr =
I.getOperand(2).getReg();
3209 SPIRVType *RegType = GR.getResultType(ResourcePtr);
3211 "Can only handle texel buffers for now.");
3221bool SPIRVInstructionSelector::extractSubvector(
3224 SPIRVType *InputType = GR.getResultType(ReadReg);
3225 [[maybe_unused]]
uint64_t InputSize =
3226 GR.getScalarOrVectorComponentCount(InputType);
3227 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3228 assert(InputSize > 1 &&
"The input must be a vector.");
3229 assert(ResultSize > 1 &&
"The result must be a vector.");
3230 assert(ResultSize < InputSize &&
3231 "Cannot extract more element than there are in the input.");
3233 SPIRVType *ScalarType = GR.getScalarOrVectorComponentType(ResType);
3236 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3239 TII.get(SPIRV::OpCompositeExtract))
3252 TII.get(SPIRV::OpCompositeConstruct))
3254 .
addUse(GR.getSPIRVTypeID(ResType));
3256 for (
Register ComponentReg : ComponentRegisters)
3257 MIB.
addUse(ComponentReg);
3261bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3269 Register ImageReg =
I.getOperand(1).getReg();
3270 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
3271 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3272 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
3277 Register CoordinateReg =
I.getOperand(2).getReg();
3278 Register DataReg =
I.getOperand(3).getReg();
3279 assert(GR.getResultType(DataReg)->getOpcode() == SPIRV::OpTypeVector);
3280 assert(GR.getScalarOrVectorComponentCount(GR.getResultType(DataReg)) == 4);
3281 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3282 TII.get(SPIRV::OpImageWrite))
3289Register SPIRVInstructionSelector::buildPointerToResource(
3294 return GR.getOrCreateGlobalVariableWithBinding(ResType, Set, Binding,
3297 const SPIRVType *VarType = GR.getOrCreateSPIRVArrayType(
3299 Register VarReg = GR.getOrCreateGlobalVariableWithBinding(
3300 VarType, Set, Binding, MIRBuilder);
3302 SPIRVType *ResPointerType = GR.getOrCreateSPIRVPointerType(
3303 ResType, MIRBuilder, SPIRV::StorageClass::UniformConstant);
3305 Register AcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3309 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3310 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3315 .
addUse(GR.getSPIRVTypeID(ResPointerType))
3322bool SPIRVInstructionSelector::selectFirstBitSet16(
3324 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3325 Register ExtReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3326 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).
getReg()},
3330 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3333bool SPIRVInstructionSelector::selectFirstBitSet32(
3335 Register SrcReg,
unsigned BitSetOpcode)
const {
3336 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3338 .
addUse(GR.getSPIRVTypeID(ResType))
3339 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3345bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3347 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3353 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3354 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3358 SPIRVType *I64Type = GR.getOrCreateSPIRVIntegerType(64, MIRBuilder);
3359 SPIRVType *I64x2Type = GR.getOrCreateSPIRVVectorType(I64Type, 2, MIRBuilder);
3361 GR.getOrCreateSPIRVVectorType(
BaseType, 2, MIRBuilder);
3363 std::vector<Register> PartialRegs;
3366 unsigned CurrentComponent = 0;
3367 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3371 MRI->createVirtualRegister(GR.getRegClass(I64x2Type));
3373 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3374 TII.get(SPIRV::OpVectorShuffle))
3376 .
addUse(GR.getSPIRVTypeID(I64x2Type))
3379 .
addImm(CurrentComponent)
3380 .
addImm(CurrentComponent + 1);
3386 MRI->createVirtualRegister(GR.getRegClass(Vec2ResType));
3388 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3389 BitSetOpcode, SwapPrimarySide))
3392 PartialRegs.push_back(SubVecBitSetReg);
3396 if (CurrentComponent != ComponentCount) {
3397 bool ZeroAsNull = STI.isOpenCLEnv();
3398 Register FinalElemReg =
MRI->createVirtualRegister(GR.getRegClass(I64Type));
3399 Register ConstIntLastIdx = GR.getOrCreateConstInt(
3402 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
3403 SPIRV::OpVectorExtractDynamic))
3407 MRI->createVirtualRegister(GR.getRegClass(
BaseType));
3409 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
3410 BitSetOpcode, SwapPrimarySide))
3413 PartialRegs.push_back(FinalElemBitSetReg);
3418 return selectOpWithSrcs(ResVReg, ResType,
I, PartialRegs,
3419 SPIRV::OpCompositeConstruct);
3422bool SPIRVInstructionSelector::selectFirstBitSet64(
3424 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3425 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3427 bool ZeroAsNull = STI.isOpenCLEnv();
3429 GR.getOrCreateConstInt(0,
I,
BaseType,
TII, ZeroAsNull);
3431 GR.getOrCreateConstInt(1,
I,
BaseType,
TII, ZeroAsNull);
3437 if (ComponentCount > 2) {
3438 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
3439 BitSetOpcode, SwapPrimarySide);
3445 GR.getOrCreateSPIRVVectorType(
BaseType, 2 * ComponentCount, MIRBuilder);
3447 MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3449 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
3454 Register FBSReg =
MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3455 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
3459 Register HighReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3460 Register LowReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3462 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
3465 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
3466 SPIRV::OpVectorExtractDynamic))
3468 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
3469 SPIRV::OpVectorExtractDynamic))
3473 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3474 TII.get(SPIRV::OpVectorShuffle))
3476 .
addUse(GR.getSPIRVTypeID(ResType))
3482 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
3489 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3490 TII.get(SPIRV::OpVectorShuffle))
3492 .
addUse(GR.getSPIRVTypeID(ResType))
3498 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
3516 GR.getOrCreateConstInt((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3517 Reg0 = GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
3518 Reg32 = GR.getOrCreateConstInt(32,
I, ResType,
TII, ZeroAsNull);
3519 SelectOp = SPIRV::OpSelectSISCond;
3520 AddOp = SPIRV::OpIAddS;
3523 GR.getOrCreateSPIRVVectorType(BoolType, ComponentCount, MIRBuilder);
3525 GR.getOrCreateConstVector((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3526 Reg0 = GR.getOrCreateConstVector(0,
I, ResType,
TII, ZeroAsNull);
3527 Reg32 = GR.getOrCreateConstVector(32,
I, ResType,
TII, ZeroAsNull);
3528 SelectOp = SPIRV::OpSelectVIVCond;
3529 AddOp = SPIRV::OpIAddV;
3539 if (SwapPrimarySide) {
3540 PrimaryReg = LowReg;
3541 SecondaryReg = HighReg;
3542 PrimaryShiftReg = Reg0;
3543 SecondaryShiftReg = Reg32;
3547 Register BReg =
MRI->createVirtualRegister(GR.getRegClass(BoolType));
3548 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
3553 Register TmpReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3554 if (!selectOpWithSrcs(TmpReg, ResType,
I, {BReg, SecondaryReg, PrimaryReg},
3559 Register ValReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3560 if (!selectOpWithSrcs(ValReg, ResType,
I,
3561 {BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
3564 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
3567bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
3570 bool IsSigned)
const {
3572 Register OpReg =
I.getOperand(2).getReg();
3573 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3575 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3576 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
3578 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3580 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3582 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3584 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3588 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
3592bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
3596 Register OpReg =
I.getOperand(2).getReg();
3597 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3601 unsigned ExtendOpcode = SPIRV::OpUConvert;
3602 unsigned BitSetOpcode = GL::FindILsb;
3604 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3606 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3608 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3610 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3617bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
3623 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
3624 TII.get(SPIRV::OpVariableLengthArrayINTEL))
3626 .
addUse(GR.getSPIRVTypeID(ResType))
3627 .
addUse(
I.getOperand(2).getReg())
3629 if (!STI.isVulkanEnv()) {
3630 unsigned Alignment =
I.getOperand(3).getImm();
3636bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
3642 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
3643 TII.get(SPIRV::OpVariable))
3645 .
addUse(GR.getSPIRVTypeID(ResType))
3648 if (!STI.isVulkanEnv()) {
3649 unsigned Alignment =
I.getOperand(2).getImm();
3656bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
3663 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
3664 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3667 .
addMBB(
I.getOperand(0).getMBB())
3671 .
addMBB(
I.getOperand(0).getMBB())
3675bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
3688 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
3695 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3696 .
addUse(
I.getOperand(0).getReg())
3697 .
addMBB(
I.getOperand(1).getMBB())
3702bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
3705 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
3707 .
addUse(GR.getSPIRVTypeID(ResType));
3708 const unsigned NumOps =
I.getNumOperands();
3709 for (
unsigned i = 1; i < NumOps; i += 2) {
3710 MIB.
addUse(
I.getOperand(i + 0).getReg());
3711 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
3719bool SPIRVInstructionSelector::selectGlobalValue(
3729 SPIRV::AccessQualifier::ReadWrite,
false);
3730 PointerBaseType = GR.getOrCreateSPIRVArrayType(
3733 PointerBaseType = GR.getOrCreateSPIRVType(
3734 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
3737 std::string GlobalIdent;
3739 unsigned &
ID = UnnamedGlobalIDs[GV];
3741 ID = UnnamedGlobalIDs.size();
3742 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
3757 if (isa<Function>(GV)) {
3760 Register NewReg = GR.find(ConstVal, GR.CurMF);
3763 GR.add(ConstVal, GR.CurMF, NewReg);
3765 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
3766 ? dyn_cast<Function>(GV)
3768 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
3769 PointerBaseType,
I,
TII,
3770 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
3776 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
3779 MRI->createGenericVirtualRegister(GR.getRegType(ResType));
3780 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
3782 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3787 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
3796 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3798 .
addUse(GR.getSPIRVTypeID(ResType))
3801 assert(NewReg != ResVReg);
3802 return BuildCOPY(ResVReg, NewReg,
I);
3804 auto GlobalVar = cast<GlobalVariable>(GV);
3813 SPIRV::LinkageType::LinkageType LnkType =
3815 ? SPIRV::LinkageType::Import
3817 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
3818 ? SPIRV::LinkageType::LinkOnceODR
3819 : SPIRV::LinkageType::Export);
3828 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
3829 return Reg.isValid();
3832bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
3835 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
3836 return selectExtInst(ResVReg, ResType,
I, CL::log10);
3848 Register VarReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3850 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3852 .
addUse(GR.getSPIRVTypeID(ResType))
3853 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3855 .
add(
I.getOperand(1))
3860 ResType->
getOpcode() == SPIRV::OpTypeFloat);
3863 ResType->
getOpcode() == SPIRV::OpTypeVector
3867 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
3870 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
3871 ? SPIRV::OpVectorTimesScalar
3875 .
addUse(GR.getSPIRVTypeID(ResType))
3884bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
3885 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
3888 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
3890 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
3891 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
3892 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
3898 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.
getMF());
3902 Register Variable = GR.buildGlobalVariable(
3904 SPIRV::StorageClass::Input,
nullptr,
true,
true,
3905 SPIRV::LinkageType::Import, MIRBuilder,
false);
3909 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3911 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.
getMF());
3915 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
3917 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
3922 assert(
I.getOperand(2).isReg());
3927 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3929 .
addUse(GR.getSPIRVTypeID(ResType))
3938 if (
Type->getOpcode() != SPIRV::OpTypeVector)
3939 return GR.getOrCreateSPIRVVectorType(
Type, 4, MIRBuilder);
3942 if (VectorSize == 4)
3946 const SPIRVType *ScalarType = GR.getSPIRVTypeForVReg(ScalarTypeReg);
3947 return GR.getOrCreateSPIRVVectorType(ScalarType, 4, MIRBuilder);
3950bool SPIRVInstructionSelector::loadHandleBeforePosition(
3955 Intrinsic::spv_resource_handlefrombinding);
3963 Register VarReg = buildPointerToResource(ResType, Set, Binding, ArraySize,
3964 IndexReg, IsNonUniform, MIRBuilder);
3973 TII.get(SPIRV::OpLoad))
3975 .
addUse(GR.getSPIRVTypeID(ResType))
3985 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
bool hasPrivateLinkage() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
bool hasInternalLinkage() const
bool hasLinkOnceODRLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
constexpr bool isValid() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
StringRef getName() const
Return a constant reference to the value's name.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Type * toTypedPointer(Type *Ty)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...