34#include "llvm/IR/IntrinsicsSPIRV.h"
38#define DEBUG_TYPE "spirv-isel"
41namespace CL = SPIRV::OpenCLExtInst;
42namespace GL = SPIRV::GLSLExtInst;
45 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
49llvm::SPIRV::SelectionControl::SelectionControl
50getSelectionOperandForImm(
int Imm) {
52 return SPIRV::SelectionControl::Flatten;
54 return SPIRV::SelectionControl::DontFlatten;
56 return SPIRV::SelectionControl::None;
60#define GET_GLOBALISEL_PREDICATE_BITSET
61#include "SPIRVGenGlobalISel.inc"
62#undef GET_GLOBALISEL_PREDICATE_BITSET
89#define GET_GLOBALISEL_PREDICATES_DECL
90#include "SPIRVGenGlobalISel.inc"
91#undef GET_GLOBALISEL_PREDICATES_DECL
93#define GET_GLOBALISEL_TEMPORARIES_DECL
94#include "SPIRVGenGlobalISel.inc"
95#undef GET_GLOBALISEL_TEMPORARIES_DECL
117 unsigned BitSetOpcode)
const;
121 unsigned BitSetOpcode)
const;
125 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
129 unsigned BitSetOpcode,
130 bool SwapPrimarySide)
const;
137 unsigned Opcode)
const;
140 unsigned Opcode)
const;
157 unsigned NegateOpcode = 0)
const;
211 template <
bool Signed>
214 template <
bool Signed>
233 bool IsSigned)
const;
235 bool IsSigned,
unsigned Opcode)
const;
237 bool IsSigned)
const;
243 bool IsSigned)
const;
276 [[maybe_unused]]
bool selectExtInst(
Register ResVReg,
279 GL::GLSLExtInst GLInst)
const;
284 GL::GLSLExtInst GLInst)
const;
312 std::pair<Register, bool>
314 const SPIRVType *ResType =
nullptr)
const;
326 SPIRV::StorageClass::StorageClass SC)
const;
334 Register IndexReg,
bool IsNonUniform,
343 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
352#define GET_GLOBALISEL_IMPL
353#include "SPIRVGenGlobalISel.inc"
354#undef GET_GLOBALISEL_IMPL
360 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
362#include
"SPIRVGenGlobalISel.inc"
365#include
"SPIRVGenGlobalISel.inc"
375 GR.setCurrentFunc(MF);
376 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
381 if (HasVRegsReset == &MF)
386 for (
unsigned I = 0, E =
MRI.getNumVirtRegs();
I != E; ++
I) {
388 LLT RegType =
MRI.getType(Reg);
396 for (
const auto &
MBB : MF) {
397 for (
const auto &
MI :
MBB) {
398 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
401 LLT DstType =
MRI.getType(DstReg);
403 LLT SrcType =
MRI.getType(SrcReg);
404 if (DstType != SrcType)
405 MRI.setType(DstReg,
MRI.getType(SrcReg));
409 if (DstRC != SrcRC && SrcRC)
410 MRI.setRegClass(DstReg, SrcRC);
421 for (
const auto &MO :
MI.all_defs()) {
423 if (Reg.isPhysical() || !
MRI.use_nodbg_empty(Reg))
426 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
427 MI.isLifetimeMarker())
431 if (
MI.mayStore() ||
MI.isCall() ||
432 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
433 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
439 resetVRegsType(*
I.getParent()->getParent());
441 assert(
I.getParent() &&
"Instruction should be in a basic block!");
442 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
447 if (Opcode == SPIRV::ASSIGN_TYPE) {
448 Register DstReg =
I.getOperand(0).getReg();
449 Register SrcReg =
I.getOperand(1).getReg();
450 auto *
Def =
MRI->getVRegDef(SrcReg);
452 bool Res = selectImpl(
I, *CoverageInfo);
454 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
455 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
459 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
466 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
467 MRI->replaceRegWith(SrcReg, DstReg);
468 GR.invalidateMachineInstr(&
I);
469 I.removeFromParent();
471 }
else if (
I.getNumDefs() == 1) {
478 if (DeadMIs.contains(&
I)) {
483 GR.invalidateMachineInstr(&
I);
488 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
489 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
495 bool HasDefs =
I.getNumDefs() > 0;
497 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
498 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
499 if (spvSelect(ResVReg, ResType,
I)) {
501 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
503 GR.invalidateMachineInstr(&
I);
504 I.removeFromParent();
512 case TargetOpcode::G_CONSTANT:
514 case TargetOpcode::G_SADDO:
515 case TargetOpcode::G_SSUBO:
525 if (DstRC != SrcRC && SrcRC)
526 MRI->setRegClass(DestReg, SrcRC);
527 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
528 TII.get(TargetOpcode::COPY))
534bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
537 const unsigned Opcode =
I.getOpcode();
539 return selectImpl(
I, *CoverageInfo);
541 case TargetOpcode::G_CONSTANT:
542 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
544 case TargetOpcode::G_GLOBAL_VALUE:
545 return selectGlobalValue(ResVReg,
I);
546 case TargetOpcode::G_IMPLICIT_DEF:
547 return selectOpUndef(ResVReg, ResType,
I);
548 case TargetOpcode::G_FREEZE:
549 return selectFreeze(ResVReg, ResType,
I);
551 case TargetOpcode::G_INTRINSIC:
552 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
553 case TargetOpcode::G_INTRINSIC_CONVERGENT:
554 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
555 return selectIntrinsic(ResVReg, ResType,
I);
556 case TargetOpcode::G_BITREVERSE:
557 return selectBitreverse(ResVReg, ResType,
I);
559 case TargetOpcode::G_BUILD_VECTOR:
560 return selectBuildVector(ResVReg, ResType,
I);
561 case TargetOpcode::G_SPLAT_VECTOR:
562 return selectSplatVector(ResVReg, ResType,
I);
564 case TargetOpcode::G_SHUFFLE_VECTOR: {
566 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
568 .
addUse(GR.getSPIRVTypeID(ResType))
569 .
addUse(
I.getOperand(1).getReg())
570 .
addUse(
I.getOperand(2).getReg());
571 for (
auto V :
I.getOperand(3).getShuffleMask())
575 case TargetOpcode::G_MEMMOVE:
576 case TargetOpcode::G_MEMCPY:
577 case TargetOpcode::G_MEMSET:
578 return selectMemOperation(ResVReg,
I);
580 case TargetOpcode::G_ICMP:
581 return selectICmp(ResVReg, ResType,
I);
582 case TargetOpcode::G_FCMP:
583 return selectFCmp(ResVReg, ResType,
I);
585 case TargetOpcode::G_FRAME_INDEX:
586 return selectFrameIndex(ResVReg, ResType,
I);
588 case TargetOpcode::G_LOAD:
589 return selectLoad(ResVReg, ResType,
I);
590 case TargetOpcode::G_STORE:
591 return selectStore(
I);
593 case TargetOpcode::G_BR:
594 return selectBranch(
I);
595 case TargetOpcode::G_BRCOND:
596 return selectBranchCond(
I);
598 case TargetOpcode::G_PHI:
599 return selectPhi(ResVReg, ResType,
I);
601 case TargetOpcode::G_FPTOSI:
602 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
603 case TargetOpcode::G_FPTOUI:
604 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
606 case TargetOpcode::G_SITOFP:
607 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
608 case TargetOpcode::G_UITOFP:
609 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
611 case TargetOpcode::G_CTPOP:
612 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
613 case TargetOpcode::G_SMIN:
614 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
615 case TargetOpcode::G_UMIN:
616 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
618 case TargetOpcode::G_SMAX:
619 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
620 case TargetOpcode::G_UMAX:
621 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
623 case TargetOpcode::G_SCMP:
624 return selectSUCmp(ResVReg, ResType,
I,
true);
625 case TargetOpcode::G_UCMP:
626 return selectSUCmp(ResVReg, ResType,
I,
false);
628 case TargetOpcode::G_STRICT_FMA:
629 case TargetOpcode::G_FMA:
630 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
632 case TargetOpcode::G_STRICT_FLDEXP:
633 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
635 case TargetOpcode::G_FPOW:
636 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
637 case TargetOpcode::G_FPOWI:
638 return selectExtInst(ResVReg, ResType,
I, CL::pown);
640 case TargetOpcode::G_FEXP:
641 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
642 case TargetOpcode::G_FEXP2:
643 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
645 case TargetOpcode::G_FLOG:
646 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
647 case TargetOpcode::G_FLOG2:
648 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
649 case TargetOpcode::G_FLOG10:
650 return selectLog10(ResVReg, ResType,
I);
652 case TargetOpcode::G_FABS:
653 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
654 case TargetOpcode::G_ABS:
655 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
657 case TargetOpcode::G_FMINNUM:
658 case TargetOpcode::G_FMINIMUM:
659 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
660 case TargetOpcode::G_FMAXNUM:
661 case TargetOpcode::G_FMAXIMUM:
662 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
664 case TargetOpcode::G_FCOPYSIGN:
665 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
667 case TargetOpcode::G_FCEIL:
668 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
669 case TargetOpcode::G_FFLOOR:
670 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
672 case TargetOpcode::G_FCOS:
673 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
674 case TargetOpcode::G_FSIN:
675 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
676 case TargetOpcode::G_FTAN:
677 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
678 case TargetOpcode::G_FACOS:
679 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
680 case TargetOpcode::G_FASIN:
681 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
682 case TargetOpcode::G_FATAN:
683 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
684 case TargetOpcode::G_FATAN2:
685 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
686 case TargetOpcode::G_FCOSH:
687 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
688 case TargetOpcode::G_FSINH:
689 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
690 case TargetOpcode::G_FTANH:
691 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
693 case TargetOpcode::G_STRICT_FSQRT:
694 case TargetOpcode::G_FSQRT:
695 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
697 case TargetOpcode::G_CTTZ:
698 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
699 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
700 case TargetOpcode::G_CTLZ:
701 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
702 return selectExtInst(ResVReg, ResType,
I, CL::clz);
704 case TargetOpcode::G_INTRINSIC_ROUND:
705 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
706 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
707 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
708 case TargetOpcode::G_INTRINSIC_TRUNC:
709 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
710 case TargetOpcode::G_FRINT:
711 case TargetOpcode::G_FNEARBYINT:
712 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
714 case TargetOpcode::G_SMULH:
715 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
716 case TargetOpcode::G_UMULH:
717 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
719 case TargetOpcode::G_SADDSAT:
720 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
721 case TargetOpcode::G_UADDSAT:
722 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
723 case TargetOpcode::G_SSUBSAT:
724 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
725 case TargetOpcode::G_USUBSAT:
726 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
728 case TargetOpcode::G_UADDO:
729 return selectOverflowArith(ResVReg, ResType,
I,
730 ResType->
getOpcode() == SPIRV::OpTypeVector
731 ? SPIRV::OpIAddCarryV
732 : SPIRV::OpIAddCarryS);
733 case TargetOpcode::G_USUBO:
734 return selectOverflowArith(ResVReg, ResType,
I,
735 ResType->
getOpcode() == SPIRV::OpTypeVector
736 ? SPIRV::OpISubBorrowV
737 : SPIRV::OpISubBorrowS);
738 case TargetOpcode::G_UMULO:
739 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
740 case TargetOpcode::G_SMULO:
741 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
743 case TargetOpcode::G_SEXT:
744 return selectExt(ResVReg, ResType,
I,
true);
745 case TargetOpcode::G_ANYEXT:
746 case TargetOpcode::G_ZEXT:
747 return selectExt(ResVReg, ResType,
I,
false);
748 case TargetOpcode::G_TRUNC:
749 return selectTrunc(ResVReg, ResType,
I);
750 case TargetOpcode::G_FPTRUNC:
751 case TargetOpcode::G_FPEXT:
752 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
754 case TargetOpcode::G_PTRTOINT:
755 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
756 case TargetOpcode::G_INTTOPTR:
757 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
758 case TargetOpcode::G_BITCAST:
759 return selectBitcast(ResVReg, ResType,
I);
760 case TargetOpcode::G_ADDRSPACE_CAST:
761 return selectAddrSpaceCast(ResVReg, ResType,
I);
762 case TargetOpcode::G_PTR_ADD: {
764 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
768 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
769 (*II).getOpcode() == TargetOpcode::COPY ||
770 (*II).getOpcode() == SPIRV::OpVariable) &&
773 bool IsGVInit =
false;
775 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
776 UseEnd =
MRI->use_instr_end();
777 UseIt != UseEnd; UseIt = std::next(UseIt)) {
778 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
779 (*UseIt).getOpcode() == SPIRV::OpVariable) {
786 SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
787 SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
788 SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
789 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
792 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
793 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
800 if (!GR.isBitcastCompatible(ResType, GVType))
802 "incompatible result and operand types in a bitcast");
803 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
805 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
811 TII.get(STI.isVulkanEnv()
812 ? SPIRV::OpInBoundsAccessChain
813 : SPIRV::OpInBoundsPtrAccessChain))
817 .
addUse(
I.getOperand(2).getReg())
820 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
822 .
addUse(GR.getSPIRVTypeID(ResType))
824 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
826 .
addUse(
I.getOperand(2).getReg())
833 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
834 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
836 .
addUse(GR.getSPIRVTypeID(ResType))
838 SPIRV::Opcode::InBoundsPtrAccessChain))
841 .
addUse(
I.getOperand(2).getReg());
845 case TargetOpcode::G_ATOMICRMW_OR:
846 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
847 case TargetOpcode::G_ATOMICRMW_ADD:
848 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
849 case TargetOpcode::G_ATOMICRMW_AND:
850 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
851 case TargetOpcode::G_ATOMICRMW_MAX:
852 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
853 case TargetOpcode::G_ATOMICRMW_MIN:
854 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
855 case TargetOpcode::G_ATOMICRMW_SUB:
856 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
857 case TargetOpcode::G_ATOMICRMW_XOR:
858 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
859 case TargetOpcode::G_ATOMICRMW_UMAX:
860 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
861 case TargetOpcode::G_ATOMICRMW_UMIN:
862 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
863 case TargetOpcode::G_ATOMICRMW_XCHG:
864 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
865 case TargetOpcode::G_ATOMIC_CMPXCHG:
866 return selectAtomicCmpXchg(ResVReg, ResType,
I);
868 case TargetOpcode::G_ATOMICRMW_FADD:
869 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
870 case TargetOpcode::G_ATOMICRMW_FSUB:
872 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
874 case TargetOpcode::G_ATOMICRMW_FMIN:
875 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
876 case TargetOpcode::G_ATOMICRMW_FMAX:
877 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
879 case TargetOpcode::G_FENCE:
880 return selectFence(
I);
882 case TargetOpcode::G_STACKSAVE:
883 return selectStackSave(ResVReg, ResType,
I);
884 case TargetOpcode::G_STACKRESTORE:
885 return selectStackRestore(
I);
887 case TargetOpcode::G_UNMERGE_VALUES:
893 case TargetOpcode::G_TRAP:
894 case TargetOpcode::G_DEBUGTRAP:
895 case TargetOpcode::G_UBSANTRAP:
896 case TargetOpcode::DBG_LABEL:
904bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
907 GL::GLSLExtInst GLInst)
const {
908 return selectExtInst(ResVReg, ResType,
I,
909 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
912bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
915 CL::OpenCLExtInst CLInst)
const {
916 return selectExtInst(ResVReg, ResType,
I,
917 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
920bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
923 CL::OpenCLExtInst CLInst,
924 GL::GLSLExtInst GLInst)
const {
925 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
926 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
927 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
930bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
935 for (
const auto &Ex : Insts) {
936 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
938 if (STI.canUseExtInstSet(Set)) {
940 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
942 .
addUse(GR.getSPIRVTypeID(ResType))
945 const unsigned NumOps =
I.getNumOperands();
947 if (Index < NumOps &&
948 I.getOperand(Index).getType() ==
949 MachineOperand::MachineOperandType::MO_IntrinsicID)
952 MIB.
add(
I.getOperand(Index));
959bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
962 std::vector<Register> Srcs,
963 unsigned Opcode)
const {
964 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
966 .
addUse(GR.getSPIRVTypeID(ResType));
973bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
976 unsigned Opcode)
const {
977 if (STI.isOpenCLEnv() &&
I.getOperand(1).isReg()) {
978 Register SrcReg =
I.getOperand(1).getReg();
981 MRI->def_instr_begin(SrcReg);
982 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
983 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
991 case SPIRV::OpConvertPtrToU:
992 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
994 case SPIRV::OpConvertUToPtr:
995 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
999 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1000 TII.get(SPIRV::OpSpecConstantOp))
1002 .
addUse(GR.getSPIRVTypeID(ResType))
1008 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).
getReg()},
1012bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1015 Register OpReg =
I.getOperand(1).getReg();
1016 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
1017 if (!GR.isBitcastCompatible(ResType, OpType))
1019 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1025 if (
MemOp->isVolatile())
1026 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1027 if (
MemOp->isNonTemporal())
1028 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1029 if (
MemOp->getAlign().value())
1030 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1032 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1034 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1041 if (Flags & MachineMemOperand::Flags::MOVolatile)
1042 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1043 if (Flags & MachineMemOperand::Flags::MONonTemporal)
1044 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1046 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1050bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1053 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1057 auto *IntPtrDef = dyn_cast<GIntrinsic>(PtrDef);
1059 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1060 Register ImageReg = IntPtrDef->getOperand(2).getReg();
1062 MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
1063 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
1064 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
1069 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1070 return generateImageRead(ResVReg, ResType, NewImageReg, IdxReg,
1071 I.getDebugLoc(),
I);
1074 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1076 .
addUse(GR.getSPIRVTypeID(ResType))
1078 if (!
I.getNumMemOperands()) {
1079 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1081 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1089bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
1090 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1091 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1095 auto *IntPtrDef = dyn_cast<GIntrinsic>(PtrDef);
1097 IntPtrDef->getIntrinsicID() == Intrinsic::spv_resource_getpointer) {
1098 Register ImageReg = IntPtrDef->getOperand(2).getReg();
1100 MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
1101 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
1102 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
1107 Register IdxReg = IntPtrDef->getOperand(3).getReg();
1108 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1109 TII.get(SPIRV::OpImageWrite))
1117 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1120 if (!
I.getNumMemOperands()) {
1121 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1123 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1131bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1134 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1136 "llvm.stacksave intrinsic: this instruction requires the following "
1137 "SPIR-V extension: SPV_INTEL_variable_length_array",
1140 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1142 .
addUse(GR.getSPIRVTypeID(ResType))
1146bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
1147 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1149 "llvm.stackrestore intrinsic: this instruction requires the following "
1150 "SPIR-V extension: SPV_INTEL_variable_length_array",
1152 if (!
I.getOperand(0).isReg())
1155 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1156 .
addUse(
I.getOperand(0).getReg())
1160bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1163 Register SrcReg =
I.getOperand(1).getReg();
1165 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1166 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1169 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1170 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
1172 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
1173 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1183 GR.add(GV, GR.CurMF, VarReg);
1184 GR.addGlobalObject(GV, GR.CurMF, VarReg);
1187 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1189 .
addUse(GR.getSPIRVTypeID(VarTy))
1190 .
addImm(SPIRV::StorageClass::UniformConstant)
1194 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
1195 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1197 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1199 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1200 .
addUse(
I.getOperand(0).getReg())
1202 .
addUse(
I.getOperand(2).getReg());
1203 if (
I.getNumMemOperands())
1211bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1215 unsigned NegateOpcode)
const {
1220 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1221 auto ScopeConstant = buildI32Constant(Scope,
I);
1222 Register ScopeReg = ScopeConstant.first;
1223 Result &= ScopeConstant.second;
1231 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1232 Register MemSemReg = MemSemConstant.first;
1233 Result &= MemSemConstant.second;
1235 Register ValueReg =
I.getOperand(2).getReg();
1236 if (NegateOpcode != 0) {
1238 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1239 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1244 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1246 .
addUse(GR.getSPIRVTypeID(ResType))
1254bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
1255 unsigned ArgI =
I.getNumOperands() - 1;
1257 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1259 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
1260 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1262 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1268 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1269 Register ResVReg =
I.getOperand(i).getReg();
1270 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1273 ResType = ScalarType;
1274 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1275 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1276 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1279 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1281 .
addUse(GR.getSPIRVTypeID(ResType))
1283 .
addImm(
static_cast<int64_t
>(i));
1289bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
1292 auto MemSemConstant = buildI32Constant(MemSem,
I);
1293 Register MemSemReg = MemSemConstant.first;
1294 bool Result = MemSemConstant.second;
1297 getMemScope(GR.CurMF->getFunction().getContext(), Ord));
1298 auto ScopeConstant = buildI32Constant(Scope,
I);
1299 Register ScopeReg = ScopeConstant.first;
1300 Result &= ScopeConstant.second;
1303 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1309bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1312 unsigned Opcode)
const {
1313 Type *ResTy =
nullptr;
1315 if (!GR.findValueAttrs(&
I, ResTy, ResName))
1317 "Not enough info to select the arithmetic with overflow instruction");
1320 "with overflow instruction");
1323 Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
1328 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1329 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1331 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1333 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
1334 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1335 Register ZeroReg = buildZerosVal(ResType,
I);
1338 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1340 if (ResName.
size() > 0)
1345 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1348 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1349 MIB.
addUse(
I.getOperand(i).getReg());
1354 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1355 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1357 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1358 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1359 .
addUse(GR.getSPIRVTypeID(ResType))
1366 .
addDef(
I.getOperand(1).getReg())
1373bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1381 if (!isa<GIntrinsic>(
I)) {
1385 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1386 auto ScopeConstant = buildI32Constant(Scope,
I);
1387 ScopeReg = ScopeConstant.first;
1388 Result &= ScopeConstant.second;
1390 unsigned ScSem =
static_cast<uint32_t>(
1394 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1395 MemSemEqReg = MemSemEqConstant.first;
1396 Result &= MemSemEqConstant.second;
1399 if (MemSemEq == MemSemNeq)
1400 MemSemNeqReg = MemSemEqReg;
1402 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1403 MemSemNeqReg = MemSemNeqConstant.first;
1404 Result &= MemSemNeqConstant.second;
1407 ScopeReg =
I.getOperand(5).getReg();
1408 MemSemEqReg =
I.getOperand(6).getReg();
1409 MemSemNeqReg =
I.getOperand(7).getReg();
1413 Register Val =
I.getOperand(4).getReg();
1414 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1415 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1418 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1420 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1428 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1432 .
addUse(GR.getSPIRVTypeID(BoolTy))
1436 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1439 .
addUse(GR.getSPIRVTypeID(ResType))
1441 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1445 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1447 .
addUse(GR.getSPIRVTypeID(ResType))
1456 case SPIRV::StorageClass::Workgroup:
1457 case SPIRV::StorageClass::CrossWorkgroup:
1458 case SPIRV::StorageClass::Function:
1467 case SPIRV::StorageClass::DeviceOnlyINTEL:
1468 case SPIRV::StorageClass::HostOnlyINTEL:
1477 bool IsGRef =
false;
1478 bool IsAllowedRefs =
1479 std::all_of(
MRI->use_instr_begin(ResVReg),
MRI->use_instr_end(),
1480 [&IsGRef](
auto const &It) {
1481 unsigned Opcode = It.getOpcode();
1482 if (Opcode == SPIRV::OpConstantComposite ||
1483 Opcode == SPIRV::OpVariable ||
1484 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1485 return IsGRef = true;
1486 return Opcode == SPIRV::OpName;
1488 return IsAllowedRefs && IsGRef;
1491Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1492 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1493 return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
1494 GR.getOrCreateSPIRVIntegerType(8,
I,
TII),
I,
TII, SC));
1501 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1502 TII.get(SPIRV::OpSpecConstantOp))
1512 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1513 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1514 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1516 SPIRV::StorageClass::Generic),
1517 GR.getPointerSize()));
1519 GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
1521 I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
1522 static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
1532bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1538 Register SrcPtr =
I.getOperand(1).getReg();
1539 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1542 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1543 ResType->
getOpcode() != SPIRV::OpTypePointer)
1544 return BuildCOPY(ResVReg, SrcPtr,
I);
1546 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
1547 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);
1554 unsigned SpecOpcode =
1556 ?
static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
1557 : (SrcSC == SPIRV::StorageClass::Generic &&
1559 ?
static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
1566 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1567 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1568 .constrainAllUses(
TII,
TRI, RBI);
1572 buildSpecConstantOp(
1574 getUcharPtrTypeReg(
I, DstSC),
1575 static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
1576 .constrainAllUses(
TII,
TRI, RBI);
1582 return BuildCOPY(ResVReg, SrcPtr,
I);
1584 if ((SrcSC == SPIRV::StorageClass::Function &&
1585 DstSC == SPIRV::StorageClass::Private) ||
1586 (DstSC == SPIRV::StorageClass::Function &&
1587 SrcSC == SPIRV::StorageClass::Private))
1588 return BuildCOPY(ResVReg, SrcPtr,
I);
1592 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1595 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1598 Register Tmp =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1599 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1600 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1603 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1608 .
addUse(GR.getSPIRVTypeID(ResType))
1616 return selectUnOp(ResVReg, ResType,
I,
1617 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1619 return selectUnOp(ResVReg, ResType,
I,
1620 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1622 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1624 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1634 return SPIRV::OpFOrdEqual;
1636 return SPIRV::OpFOrdGreaterThanEqual;
1638 return SPIRV::OpFOrdGreaterThan;
1640 return SPIRV::OpFOrdLessThanEqual;
1642 return SPIRV::OpFOrdLessThan;
1644 return SPIRV::OpFOrdNotEqual;
1646 return SPIRV::OpOrdered;
1648 return SPIRV::OpFUnordEqual;
1650 return SPIRV::OpFUnordGreaterThanEqual;
1652 return SPIRV::OpFUnordGreaterThan;
1654 return SPIRV::OpFUnordLessThanEqual;
1656 return SPIRV::OpFUnordLessThan;
1658 return SPIRV::OpFUnordNotEqual;
1660 return SPIRV::OpUnordered;
1670 return SPIRV::OpIEqual;
1672 return SPIRV::OpINotEqual;
1674 return SPIRV::OpSGreaterThanEqual;
1676 return SPIRV::OpSGreaterThan;
1678 return SPIRV::OpSLessThanEqual;
1680 return SPIRV::OpSLessThan;
1682 return SPIRV::OpUGreaterThanEqual;
1684 return SPIRV::OpUGreaterThan;
1686 return SPIRV::OpULessThanEqual;
1688 return SPIRV::OpULessThan;
1697 return SPIRV::OpPtrEqual;
1699 return SPIRV::OpPtrNotEqual;
1710 return SPIRV::OpLogicalEqual;
1712 return SPIRV::OpLogicalNotEqual;
1746bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1749 unsigned OpAnyOrAll)
const {
1750 assert(
I.getNumOperands() == 3);
1751 assert(
I.getOperand(2).isReg());
1753 Register InputRegister =
I.getOperand(2).getReg();
1754 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1759 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1760 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1761 if (IsBoolTy && !IsVectorTy) {
1762 assert(ResVReg ==
I.getOperand(0).getReg());
1763 return BuildCOPY(ResVReg, InputRegister,
I);
1766 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1767 unsigned SpirvNotEqualId =
1768 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1769 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1774 NotEqualReg = IsBoolTy ? InputRegister
1775 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1777 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1783 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1787 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1798 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1803bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1806 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1809bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1812 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1816bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
1819 assert(
I.getNumOperands() == 4);
1820 assert(
I.getOperand(2).isReg());
1821 assert(
I.getOperand(3).isReg());
1824 GR.getSPIRVTypeForVReg(
I.getOperand(2).getReg());
1827 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1828 "dot product requires a vector of at least 2 components");
1831 GR.getSPIRVTypeForVReg(
VecType->getOperand(1).getReg());
1836 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
1838 .
addUse(GR.getSPIRVTypeID(ResType))
1839 .
addUse(
I.getOperand(2).getReg())
1840 .
addUse(
I.getOperand(3).getReg())
1844bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
1848 assert(
I.getNumOperands() == 4);
1849 assert(
I.getOperand(2).isReg());
1850 assert(
I.getOperand(3).isReg());
1853 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1856 .
addUse(GR.getSPIRVTypeID(ResType))
1857 .
addUse(
I.getOperand(2).getReg())
1858 .
addUse(
I.getOperand(3).getReg())
1864bool SPIRVInstructionSelector::selectIntegerDotExpansion(
1866 assert(
I.getNumOperands() == 4);
1867 assert(
I.getOperand(2).isReg());
1868 assert(
I.getOperand(3).isReg());
1872 Register Vec0 =
I.getOperand(2).getReg();
1873 Register Vec1 =
I.getOperand(3).getReg();
1874 Register TmpVec =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1879 .
addUse(GR.getSPIRVTypeID(VecType))
1885 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1886 "dot product requires a vector of at least 2 components");
1888 Register Res =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1891 .
addUse(GR.getSPIRVTypeID(ResType))
1896 for (
unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
1897 Register Elt =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1900 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1902 .
addUse(GR.getSPIRVTypeID(ResType))
1907 Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
1908 ?
MRI->createVirtualRegister(GR.getRegClass(ResType))
1913 .
addUse(GR.getSPIRVTypeID(ResType))
1923template <
bool Signed>
1924bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
1927 assert(
I.getNumOperands() == 5);
1928 assert(
I.getOperand(2).isReg());
1929 assert(
I.getOperand(3).isReg());
1930 assert(
I.getOperand(4).isReg());
1933 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1934 Register Dot =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1937 .
addUse(GR.getSPIRVTypeID(ResType))
1938 .
addUse(
I.getOperand(2).getReg())
1939 .
addUse(
I.getOperand(3).getReg())
1944 .
addUse(GR.getSPIRVTypeID(ResType))
1946 .
addUse(
I.getOperand(4).getReg())
1953template <
bool Signed>
1954bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
1956 assert(
I.getNumOperands() == 5);
1957 assert(
I.getOperand(2).isReg());
1958 assert(
I.getOperand(3).isReg());
1959 assert(
I.getOperand(4).isReg());
1965 Register Acc =
I.getOperand(4).getReg();
1966 SPIRVType *EltType = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1968 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
1971 for (
unsigned i = 0; i < 4; i++) {
1973 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1976 .
addUse(GR.getSPIRVTypeID(ResType))
1977 .
addUse(
I.getOperand(2).getReg())
1978 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1979 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1983 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1986 .
addUse(GR.getSPIRVTypeID(ResType))
1987 .
addUse(
I.getOperand(3).getReg())
1988 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1989 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1996 .
addUse(GR.getSPIRVTypeID(ResType))
2002 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2005 .
addUse(GR.getSPIRVTypeID(ResType))
2007 .
addUse(GR.getOrCreateConstInt(0,
I, EltType,
TII))
2008 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
2013 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
2016 .
addUse(GR.getSPIRVTypeID(ResType))
2029bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
2032 assert(
I.getNumOperands() == 3);
2033 assert(
I.getOperand(2).isReg());
2035 Register VZero = buildZerosValF(ResType,
I);
2036 Register VOne = buildOnesValF(ResType,
I);
2038 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2040 .
addUse(GR.getSPIRVTypeID(ResType))
2041 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2043 .
addUse(
I.getOperand(2).getReg())
2049bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2052 assert(
I.getNumOperands() == 3);
2053 assert(
I.getOperand(2).isReg());
2055 Register InputRegister =
I.getOperand(2).getReg();
2056 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2057 auto &
DL =
I.getDebugLoc();
2062 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2064 unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
2065 unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);
2067 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2069 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2071 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2077 .
addUse(GR.getSPIRVTypeID(InputType))
2078 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2083 if (NeedsConversion) {
2084 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2087 .
addUse(GR.getSPIRVTypeID(ResType))
2095bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2098 unsigned Opcode)
const {
2100 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2102 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2104 .
addUse(GR.getSPIRVTypeID(ResType))
2105 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I,
2108 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2109 BMI.
addUse(
I.getOperand(J).getReg());
2115bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2118 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2119 SPIRVType *BallotType = GR.getOrCreateSPIRVVectorType(IntTy, 4,
I,
TII);
2120 Register BallotReg =
MRI->createVirtualRegister(GR.getRegClass(BallotType));
2121 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2122 SPIRV::OpGroupNonUniformBallot);
2127 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2129 .
addUse(GR.getSPIRVTypeID(ResType))
2130 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII))
2131 .
addImm(SPIRV::GroupOperation::Reduce)
2138bool SPIRVInstructionSelector::selectWaveReduceMax(
Register ResVReg,
2141 bool IsUnsigned)
const {
2142 assert(
I.getNumOperands() == 3);
2143 assert(
I.getOperand(2).isReg());
2145 Register InputRegister =
I.getOperand(2).getReg();
2146 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2151 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2153 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2154 auto IntegerOpcodeType =
2155 IsUnsigned ? SPIRV::OpGroupNonUniformUMax : SPIRV::OpGroupNonUniformSMax;
2156 auto Opcode = IsFloatTy ? SPIRV::OpGroupNonUniformFMax : IntegerOpcodeType;
2157 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2159 .
addUse(GR.getSPIRVTypeID(ResType))
2160 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII))
2161 .
addImm(SPIRV::GroupOperation::Reduce)
2162 .
addUse(
I.getOperand(2).getReg())
2166bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2169 assert(
I.getNumOperands() == 3);
2170 assert(
I.getOperand(2).isReg());
2172 Register InputRegister =
I.getOperand(2).getReg();
2173 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2178 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2180 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2182 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2183 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2185 .
addUse(GR.getSPIRVTypeID(ResType))
2186 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII))
2187 .
addImm(SPIRV::GroupOperation::Reduce)
2188 .
addUse(
I.getOperand(2).getReg());
2191bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2195 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2197 .
addUse(GR.getSPIRVTypeID(ResType))
2198 .
addUse(
I.getOperand(1).getReg())
2202bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2210 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2212 Register OpReg =
I.getOperand(1).getReg();
2215 switch (
Def->getOpcode()) {
2216 case SPIRV::ASSIGN_TYPE:
2218 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2219 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2220 Reg =
Def->getOperand(2).getReg();
2223 case SPIRV::OpUndef:
2224 Reg =
Def->getOperand(1).getReg();
2227 unsigned DestOpCode;
2228 if (
Reg.isValid()) {
2229 DestOpCode = SPIRV::OpConstantNull;
2231 DestOpCode = TargetOpcode::COPY;
2234 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2235 .
addDef(
I.getOperand(0).getReg())
2248 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2253 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
2262 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2274 case TargetOpcode::G_CONSTANT:
2275 case TargetOpcode::G_FCONSTANT:
2277 case TargetOpcode::G_INTRINSIC:
2278 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2279 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2280 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
2281 Intrinsic::spv_const_composite;
2282 case TargetOpcode::G_BUILD_VECTOR:
2283 case TargetOpcode::G_SPLAT_VECTOR: {
2307bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2311 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2312 N = GR.getScalarOrVectorComponentCount(ResType);
2313 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2317 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2322 for (
unsigned i =
I.getNumExplicitDefs();
2323 i <
I.getNumExplicitOperands() && IsConst; ++i)
2327 if (!IsConst &&
N < 2)
2329 "There must be at least two constituent operands in a vector");
2331 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2332 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2333 TII.get(IsConst ? SPIRV::OpConstantComposite
2334 : SPIRV::OpCompositeConstruct))
2336 .
addUse(GR.getSPIRVTypeID(ResType));
2337 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2338 MIB.
addUse(
I.getOperand(i).getReg());
2342bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2346 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2347 N = GR.getScalarOrVectorComponentCount(ResType);
2348 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2353 unsigned OpIdx =
I.getNumExplicitDefs();
2354 if (!
I.getOperand(OpIdx).isReg())
2358 Register OpReg =
I.getOperand(OpIdx).getReg();
2361 if (!IsConst &&
N < 2)
2363 "There must be at least two constituent operands in a vector");
2365 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2366 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2367 TII.get(IsConst ? SPIRV::OpConstantComposite
2368 : SPIRV::OpCompositeConstruct))
2370 .
addUse(GR.getSPIRVTypeID(ResType));
2371 for (
unsigned i = 0; i <
N; ++i)
2376bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2382 if (STI.canUseExtension(
2383 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2385 Opcode = SPIRV::OpDemoteToHelperInvocation;
2387 Opcode = SPIRV::OpKill;
2390 GR.invalidateMachineInstr(NextI);
2391 NextI->removeFromParent();
2396 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2400bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2404 Register Cmp0 =
I.getOperand(2).getReg();
2405 Register Cmp1 =
I.getOperand(3).getReg();
2406 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
2407 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
2408 "CMP operands should have the same type");
2409 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2411 .
addUse(GR.getSPIRVTypeID(ResType))
2417bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2420 auto Pred =
I.getOperand(1).getPredicate();
2423 Register CmpOperand =
I.getOperand(2).getReg();
2424 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
2426 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
2430 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2436 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
2437 "Expected G_FCONSTANT");
2438 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
2445 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2446 "Expected G_CONSTANT");
2447 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
2450std::pair<Register, bool>
2455 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2457 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2458 Register NewReg = GR.find(ConstInt, GR.CurMF);
2462 GR.add(ConstInt, GR.CurMF, NewReg);
2466 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2468 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
2470 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2472 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
2480bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2484 return selectCmp(ResVReg, ResType, CmpOp,
I);
2490 bool ZeroAsNull = STI.isOpenCLEnv();
2491 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2492 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
2493 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
2499 bool ZeroAsNull = STI.isOpenCLEnv();
2501 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2502 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
2503 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
2509 bool ZeroAsNull = STI.isOpenCLEnv();
2511 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2512 return GR.getOrCreateConstVector(VOne,
I, ResType,
TII, ZeroAsNull);
2513 return GR.getOrCreateConstFP(VOne,
I, ResType,
TII, ZeroAsNull);
2519 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2522 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2527bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2530 bool IsSigned)
const {
2532 Register ZeroReg = buildZerosVal(ResType,
I);
2533 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2535 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2537 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2538 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2540 .
addUse(GR.getSPIRVTypeID(ResType))
2541 .
addUse(
I.getOperand(1).getReg())
2547bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2550 unsigned Opcode)
const {
2551 Register SrcReg =
I.getOperand(1).getReg();
2554 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
2555 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2557 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2559 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
2561 SrcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2562 selectSelect(SrcReg, TmpType,
I,
false);
2564 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2567bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2570 Register SrcReg =
I.getOperand(1).getReg();
2571 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
2572 return selectSelect(ResVReg, ResType,
I, IsSigned);
2574 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
2575 if (SrcType == ResType)
2576 return BuildCOPY(ResVReg, SrcReg,
I);
2578 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2579 return selectUnOp(ResVReg, ResType,
I, Opcode);
2582bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2585 bool IsSigned)
const {
2591 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
2593 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
2594 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
2598 Register IsLessEqReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2600 GR.assignSPIRVTypeToVReg(ResType, IsLessEqReg, MIRBuilder.getMF());
2602 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2603 : SPIRV::OpULessThanEqual))
2606 .
addUse(
I.getOperand(1).getReg())
2607 .
addUse(
I.getOperand(2).getReg())
2609 Register IsLessReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2611 GR.assignSPIRVTypeToVReg(ResType, IsLessReg, MIRBuilder.getMF());
2613 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2616 .
addUse(
I.getOperand(1).getReg())
2617 .
addUse(
I.getOperand(2).getReg())
2620 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
2622 MRI->createVirtualRegister(GR.getRegClass(ResType));
2624 GR.assignSPIRVTypeToVReg(ResType, NegOneOrZeroReg, MIRBuilder.getMF());
2625 unsigned SelectOpcode =
2626 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2631 .
addUse(buildOnesVal(
true, ResType,
I))
2632 .
addUse(buildZerosVal(ResType,
I))
2639 .
addUse(buildOnesVal(
false, ResType,
I))
2643bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2649 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2650 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2651 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2653 Register One = buildOnesVal(
false, IntTy,
I);
2657 .
addUse(GR.getSPIRVTypeID(IntTy))
2663 .
addUse(GR.getSPIRVTypeID(BoolTy))
2669bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2672 Register IntReg =
I.getOperand(1).getReg();
2673 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
2674 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
2675 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2676 if (ArgType == ResType)
2677 return BuildCOPY(ResVReg, IntReg,
I);
2678 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
2679 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2680 return selectUnOp(ResVReg, ResType,
I, Opcode);
2683bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2687 unsigned TyOpcode = ResType->
getOpcode();
2688 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
2690 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
2692 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2694 .
addUse(GR.getSPIRVTypeID(ResType))
2696 if (TyOpcode == SPIRV::OpTypeInt) {
2697 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
2699 return Reg == ResVReg ?
true : BuildCOPY(ResVReg, Reg,
I);
2701 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2703 .
addUse(GR.getSPIRVTypeID(ResType));
2710bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2713 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2715 .
addUse(GR.getSPIRVTypeID(ResType))
2722 if (TypeInst->
getOpcode() == SPIRV::ASSIGN_TYPE) {
2725 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
2727 return TypeInst->
getOpcode() == SPIRV::OpConstantI;
2732 if (TypeInst->
getOpcode() == SPIRV::OpConstantI)
2739bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2743 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2745 .
addUse(GR.getSPIRVTypeID(ResType))
2747 .
addUse(
I.getOperand(3).getReg())
2749 .
addUse(
I.getOperand(2).getReg());
2750 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2755bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2759 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2761 .
addUse(GR.getSPIRVTypeID(ResType))
2762 .
addUse(
I.getOperand(2).getReg());
2763 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2768bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2772 return selectInsertVal(ResVReg, ResType,
I);
2774 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
2776 .
addUse(GR.getSPIRVTypeID(ResType))
2777 .
addUse(
I.getOperand(2).getReg())
2778 .
addUse(
I.getOperand(3).getReg())
2779 .
addUse(
I.getOperand(4).getReg())
2783bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
2787 return selectExtractVal(ResVReg, ResType,
I);
2789 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
2791 .
addUse(GR.getSPIRVTypeID(ResType))
2792 .
addUse(
I.getOperand(2).getReg())
2793 .
addUse(
I.getOperand(3).getReg())
2797bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
2800 const bool IsGEPInBounds =
I.getOperand(2).getImm();
2805 const unsigned Opcode = STI.isVulkanEnv()
2806 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2807 : SPIRV::OpAccessChain)
2808 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2809 : SPIRV::OpPtrAccessChain);
2811 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2813 .
addUse(GR.getSPIRVTypeID(ResType))
2815 .
addUse(
I.getOperand(3).getReg());
2817 const unsigned StartingIndex =
2818 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2821 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
2822 Res.addUse(
I.getOperand(i).getReg());
2823 return Res.constrainAllUses(
TII,
TRI, RBI);
2827bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2830 unsigned Lim =
I.getNumExplicitOperands();
2831 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2832 Register OpReg =
I.getOperand(i).getReg();
2834 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2836 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
2837 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2838 GR.isAggregateType(OpType)) {
2845 Register WrapReg = GR.find(OpDefine, MF);
2851 WrapReg =
MRI->createVirtualRegister(GR.getRegClass(OpType));
2852 GR.add(OpDefine, MF, WrapReg);
2856 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2860 .
addUse(GR.getSPIRVTypeID(OpType))
2870bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
2876 case Intrinsic::spv_load:
2877 return selectLoad(ResVReg, ResType,
I);
2878 case Intrinsic::spv_store:
2879 return selectStore(
I);
2880 case Intrinsic::spv_extractv:
2881 return selectExtractVal(ResVReg, ResType,
I);
2882 case Intrinsic::spv_insertv:
2883 return selectInsertVal(ResVReg, ResType,
I);
2884 case Intrinsic::spv_extractelt:
2885 return selectExtractElt(ResVReg, ResType,
I);
2886 case Intrinsic::spv_insertelt:
2887 return selectInsertElt(ResVReg, ResType,
I);
2888 case Intrinsic::spv_gep:
2889 return selectGEP(ResVReg, ResType,
I);
2890 case Intrinsic::spv_unref_global:
2891 case Intrinsic::spv_init_global: {
2894 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
2897 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
2899 case Intrinsic::spv_undef: {
2900 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2902 .
addUse(GR.getSPIRVTypeID(ResType));
2905 case Intrinsic::spv_const_composite: {
2907 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
2909 unsigned Opcode = SPIRV::OpConstantNull;
2912 Opcode = SPIRV::OpConstantComposite;
2913 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
2916 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2917 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2919 .
addUse(GR.getSPIRVTypeID(ResType));
2922 for (
Register OpReg : CompositeArgs)
2927 case Intrinsic::spv_assign_name: {
2928 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
2929 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
2930 for (
unsigned i =
I.getNumExplicitDefs() + 2;
2931 i <
I.getNumExplicitOperands(); ++i) {
2932 MIB.
addImm(
I.getOperand(i).getImm());
2936 case Intrinsic::spv_switch: {
2937 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
2938 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2939 if (
I.getOperand(i).isReg())
2940 MIB.
addReg(
I.getOperand(i).getReg());
2941 else if (
I.getOperand(i).isCImm())
2942 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
2943 else if (
I.getOperand(i).isMBB())
2944 MIB.
addMBB(
I.getOperand(i).getMBB());
2950 case Intrinsic::spv_loop_merge: {
2951 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
2952 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2953 assert(
I.getOperand(i).isMBB());
2954 MIB.
addMBB(
I.getOperand(i).getMBB());
2956 MIB.
addImm(SPIRV::SelectionControl::None);
2959 case Intrinsic::spv_selection_merge: {
2961 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
2962 assert(
I.getOperand(1).isMBB() &&
2963 "operand 1 to spv_selection_merge must be a basic block");
2964 MIB.
addMBB(
I.getOperand(1).getMBB());
2965 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
2968 case Intrinsic::spv_cmpxchg:
2969 return selectAtomicCmpXchg(ResVReg, ResType,
I);
2970 case Intrinsic::spv_unreachable:
2971 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
2973 case Intrinsic::spv_alloca:
2974 return selectFrameIndex(ResVReg, ResType,
I);
2975 case Intrinsic::spv_alloca_array:
2976 return selectAllocaArray(ResVReg, ResType,
I);
2977 case Intrinsic::spv_assume:
2978 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2979 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
2980 .
addUse(
I.getOperand(1).getReg())
2983 case Intrinsic::spv_expect:
2984 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2985 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
2987 .
addUse(GR.getSPIRVTypeID(ResType))
2988 .
addUse(
I.getOperand(2).getReg())
2989 .
addUse(
I.getOperand(3).getReg())
2992 case Intrinsic::arithmetic_fence:
2993 if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
2995 TII.get(SPIRV::OpArithmeticFenceEXT))
2997 .
addUse(GR.getSPIRVTypeID(ResType))
2998 .
addUse(
I.getOperand(2).getReg())
3001 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
3003 case Intrinsic::spv_thread_id:
3009 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
3011 case Intrinsic::spv_thread_id_in_group:
3017 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
3019 case Intrinsic::spv_group_id:
3025 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
3027 case Intrinsic::spv_fdot:
3028 return selectFloatDot(ResVReg, ResType,
I);
3029 case Intrinsic::spv_udot:
3030 case Intrinsic::spv_sdot:
3031 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3033 return selectIntegerDot(ResVReg, ResType,
I,
3034 IID == Intrinsic::spv_sdot);
3035 return selectIntegerDotExpansion(ResVReg, ResType,
I);
3036 case Intrinsic::spv_dot4add_i8packed:
3037 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3039 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
3040 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
3041 case Intrinsic::spv_dot4add_u8packed:
3042 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
3044 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
3045 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
3046 case Intrinsic::spv_all:
3047 return selectAll(ResVReg, ResType,
I);
3048 case Intrinsic::spv_any:
3049 return selectAny(ResVReg, ResType,
I);
3050 case Intrinsic::spv_cross:
3051 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
3052 case Intrinsic::spv_distance:
3053 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
3054 case Intrinsic::spv_lerp:
3055 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
3056 case Intrinsic::spv_length:
3057 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
3058 case Intrinsic::spv_degrees:
3059 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
3060 case Intrinsic::spv_frac:
3061 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
3062 case Intrinsic::spv_normalize:
3063 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
3064 case Intrinsic::spv_rsqrt:
3065 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
3066 case Intrinsic::spv_sign:
3067 return selectSign(ResVReg, ResType,
I);
3068 case Intrinsic::spv_firstbituhigh:
3069 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
3070 case Intrinsic::spv_firstbitshigh:
3071 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
3072 case Intrinsic::spv_firstbitlow:
3073 return selectFirstBitLow(ResVReg, ResType,
I);
3074 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
3076 auto MemSemConstant =
3077 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
3078 Register MemSemReg = MemSemConstant.first;
3079 Result &= MemSemConstant.second;
3080 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3081 Register ScopeReg = ScopeConstant.first;
3082 Result &= ScopeConstant.second;
3085 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3091 case Intrinsic::spv_lifetime_start:
3092 case Intrinsic::spv_lifetime_end: {
3093 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3094 : SPIRV::OpLifetimeStop;
3095 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3096 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3104 case Intrinsic::spv_saturate:
3105 return selectSaturate(ResVReg, ResType,
I);
3106 case Intrinsic::spv_nclamp:
3107 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3108 case Intrinsic::spv_uclamp:
3109 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3110 case Intrinsic::spv_sclamp:
3111 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3112 case Intrinsic::spv_wave_active_countbits:
3113 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3114 case Intrinsic::spv_wave_all:
3115 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3116 case Intrinsic::spv_wave_any:
3117 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3118 case Intrinsic::spv_wave_is_first_lane:
3119 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3120 case Intrinsic::spv_wave_reduce_umax:
3121 return selectWaveReduceMax(ResVReg, ResType,
I,
true);
3122 case Intrinsic::spv_wave_reduce_max:
3123 return selectWaveReduceMax(ResVReg, ResType,
I,
false);
3124 case Intrinsic::spv_wave_reduce_sum:
3125 return selectWaveReduceSum(ResVReg, ResType,
I);
3126 case Intrinsic::spv_wave_readlane:
3127 return selectWaveOpInst(ResVReg, ResType,
I,
3128 SPIRV::OpGroupNonUniformShuffle);
3129 case Intrinsic::spv_step:
3130 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3131 case Intrinsic::spv_radians:
3132 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3136 case Intrinsic::instrprof_increment:
3137 case Intrinsic::instrprof_increment_step:
3138 case Intrinsic::instrprof_value_profile:
3141 case Intrinsic::spv_value_md:
3143 case Intrinsic::spv_resource_handlefrombinding: {
3144 return selectHandleFromBinding(ResVReg, ResType,
I);
3146 case Intrinsic::spv_resource_store_typedbuffer: {
3147 return selectImageWriteIntrinsic(
I);
3149 case Intrinsic::spv_resource_load_typedbuffer: {
3150 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3152 case Intrinsic::spv_resource_getpointer: {
3153 return selectResourceGetPointer(ResVReg, ResType,
I);
3155 case Intrinsic::spv_discard: {
3156 return selectDiscard(ResVReg, ResType,
I);
3159 std::string DiagMsg;
3162 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3169bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3175bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3184 Register ImageReg =
I.getOperand(2).getReg();
3185 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
3186 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3187 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
3192 Register IdxReg =
I.getOperand(3).getReg();
3196 return generateImageRead(ResVReg, ResType, NewImageReg, IdxReg, Loc, Pos);
3199bool SPIRVInstructionSelector::generateImageRead(
Register &ResVReg,
3204 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3205 if (ResultSize == 4) {
3208 .
addUse(GR.getSPIRVTypeID(ResType))
3214 SPIRVType *ReadType = widenTypeToVec4(ResType, Pos);
3215 Register ReadReg =
MRI->createVirtualRegister(GR.getRegClass(ReadType));
3219 .
addUse(GR.getSPIRVTypeID(ReadType))
3226 if (ResultSize == 1) {
3228 TII.get(SPIRV::OpCompositeExtract))
3230 .
addUse(GR.getSPIRVTypeID(ResType))
3235 return extractSubvector(ResVReg, ResType, ReadReg, Pos);
3238bool SPIRVInstructionSelector::selectResourceGetPointer(
3243 Register ResourcePtr =
I.getOperand(2).getReg();
3244 SPIRVType *RegType = GR.getResultType(ResourcePtr);
3246 "Can only handle texel buffers for now.");
3256bool SPIRVInstructionSelector::extractSubvector(
3259 SPIRVType *InputType = GR.getResultType(ReadReg);
3260 [[maybe_unused]]
uint64_t InputSize =
3261 GR.getScalarOrVectorComponentCount(InputType);
3262 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3263 assert(InputSize > 1 &&
"The input must be a vector.");
3264 assert(ResultSize > 1 &&
"The result must be a vector.");
3265 assert(ResultSize < InputSize &&
3266 "Cannot extract more element than there are in the input.");
3268 SPIRVType *ScalarType = GR.getScalarOrVectorComponentType(ResType);
3271 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3274 TII.get(SPIRV::OpCompositeExtract))
3287 TII.get(SPIRV::OpCompositeConstruct))
3289 .
addUse(GR.getSPIRVTypeID(ResType));
3291 for (
Register ComponentReg : ComponentRegisters)
3292 MIB.
addUse(ComponentReg);
3296bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3304 Register ImageReg =
I.getOperand(1).getReg();
3305 auto *ImageDef = cast<GIntrinsic>(
getVRegDef(*
MRI, ImageReg));
3306 Register NewImageReg =
MRI->createVirtualRegister(
MRI->getRegClass(ImageReg));
3307 if (!loadHandleBeforePosition(NewImageReg, GR.getSPIRVTypeForVReg(ImageReg),
3312 Register CoordinateReg =
I.getOperand(2).getReg();
3313 Register DataReg =
I.getOperand(3).getReg();
3314 assert(GR.getResultType(DataReg)->getOpcode() == SPIRV::OpTypeVector);
3315 assert(GR.getScalarOrVectorComponentCount(GR.getResultType(DataReg)) == 4);
3316 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3317 TII.get(SPIRV::OpImageWrite))
3324Register SPIRVInstructionSelector::buildPointerToResource(
3329 return GR.getOrCreateGlobalVariableWithBinding(ResType, Set, Binding,
3332 const SPIRVType *VarType = GR.getOrCreateSPIRVArrayType(
3334 Register VarReg = GR.getOrCreateGlobalVariableWithBinding(
3335 VarType, Set, Binding, MIRBuilder);
3337 SPIRVType *ResPointerType = GR.getOrCreateSPIRVPointerType(
3338 ResType, MIRBuilder, SPIRV::StorageClass::UniformConstant);
3340 Register AcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3344 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3345 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3350 .
addUse(GR.getSPIRVTypeID(ResPointerType))
3357bool SPIRVInstructionSelector::selectFirstBitSet16(
3359 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3360 Register ExtReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3361 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).
getReg()},
3365 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3368bool SPIRVInstructionSelector::selectFirstBitSet32(
3370 Register SrcReg,
unsigned BitSetOpcode)
const {
3371 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3373 .
addUse(GR.getSPIRVTypeID(ResType))
3374 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3380bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3382 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3388 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3389 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3393 SPIRVType *I64Type = GR.getOrCreateSPIRVIntegerType(64, MIRBuilder);
3394 SPIRVType *I64x2Type = GR.getOrCreateSPIRVVectorType(I64Type, 2, MIRBuilder);
3396 GR.getOrCreateSPIRVVectorType(
BaseType, 2, MIRBuilder);
3398 std::vector<Register> PartialRegs;
3401 unsigned CurrentComponent = 0;
3402 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3406 MRI->createVirtualRegister(GR.getRegClass(I64x2Type));
3408 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3409 TII.get(SPIRV::OpVectorShuffle))
3411 .
addUse(GR.getSPIRVTypeID(I64x2Type))
3414 .
addImm(CurrentComponent)
3415 .
addImm(CurrentComponent + 1);
3421 MRI->createVirtualRegister(GR.getRegClass(Vec2ResType));
3423 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3424 BitSetOpcode, SwapPrimarySide))
3427 PartialRegs.push_back(SubVecBitSetReg);
3431 if (CurrentComponent != ComponentCount) {
3432 bool ZeroAsNull = STI.isOpenCLEnv();
3433 Register FinalElemReg =
MRI->createVirtualRegister(GR.getRegClass(I64Type));
3434 Register ConstIntLastIdx = GR.getOrCreateConstInt(
3437 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
3438 SPIRV::OpVectorExtractDynamic))
3442 MRI->createVirtualRegister(GR.getRegClass(
BaseType));
3444 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
3445 BitSetOpcode, SwapPrimarySide))
3448 PartialRegs.push_back(FinalElemBitSetReg);
3453 return selectOpWithSrcs(ResVReg, ResType,
I, PartialRegs,
3454 SPIRV::OpCompositeConstruct);
3457bool SPIRVInstructionSelector::selectFirstBitSet64(
3459 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3460 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3462 bool ZeroAsNull = STI.isOpenCLEnv();
3464 GR.getOrCreateConstInt(0,
I,
BaseType,
TII, ZeroAsNull);
3466 GR.getOrCreateConstInt(1,
I,
BaseType,
TII, ZeroAsNull);
3472 if (ComponentCount > 2) {
3473 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
3474 BitSetOpcode, SwapPrimarySide);
3480 GR.getOrCreateSPIRVVectorType(
BaseType, 2 * ComponentCount, MIRBuilder);
3482 MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3484 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
3489 Register FBSReg =
MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3490 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
3494 Register HighReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3495 Register LowReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3497 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
3500 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
3501 SPIRV::OpVectorExtractDynamic))
3503 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
3504 SPIRV::OpVectorExtractDynamic))
3508 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3509 TII.get(SPIRV::OpVectorShuffle))
3511 .
addUse(GR.getSPIRVTypeID(ResType))
3517 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
3524 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3525 TII.get(SPIRV::OpVectorShuffle))
3527 .
addUse(GR.getSPIRVTypeID(ResType))
3533 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
3551 GR.getOrCreateConstInt((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3552 Reg0 = GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
3553 Reg32 = GR.getOrCreateConstInt(32,
I, ResType,
TII, ZeroAsNull);
3554 SelectOp = SPIRV::OpSelectSISCond;
3555 AddOp = SPIRV::OpIAddS;
3558 GR.getOrCreateSPIRVVectorType(BoolType, ComponentCount, MIRBuilder);
3560 GR.getOrCreateConstVector((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3561 Reg0 = GR.getOrCreateConstVector(0,
I, ResType,
TII, ZeroAsNull);
3562 Reg32 = GR.getOrCreateConstVector(32,
I, ResType,
TII, ZeroAsNull);
3563 SelectOp = SPIRV::OpSelectVIVCond;
3564 AddOp = SPIRV::OpIAddV;
3574 if (SwapPrimarySide) {
3575 PrimaryReg = LowReg;
3576 SecondaryReg = HighReg;
3577 PrimaryShiftReg = Reg0;
3578 SecondaryShiftReg = Reg32;
3582 Register BReg =
MRI->createVirtualRegister(GR.getRegClass(BoolType));
3583 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
3588 Register TmpReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3589 if (!selectOpWithSrcs(TmpReg, ResType,
I, {BReg, SecondaryReg, PrimaryReg},
3594 Register ValReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3595 if (!selectOpWithSrcs(ValReg, ResType,
I,
3596 {BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
3599 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
3602bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
3605 bool IsSigned)
const {
3607 Register OpReg =
I.getOperand(2).getReg();
3608 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3610 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3611 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
3613 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3615 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3617 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3619 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3623 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
3627bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
3631 Register OpReg =
I.getOperand(2).getReg();
3632 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3636 unsigned ExtendOpcode = SPIRV::OpUConvert;
3637 unsigned BitSetOpcode = GL::FindILsb;
3639 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3641 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3643 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3645 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3652bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
3658 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
3659 TII.get(SPIRV::OpVariableLengthArrayINTEL))
3661 .
addUse(GR.getSPIRVTypeID(ResType))
3662 .
addUse(
I.getOperand(2).getReg())
3664 if (!STI.isVulkanEnv()) {
3665 unsigned Alignment =
I.getOperand(3).getImm();
3671bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
3677 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
3678 TII.get(SPIRV::OpVariable))
3680 .
addUse(GR.getSPIRVTypeID(ResType))
3683 if (!STI.isVulkanEnv()) {
3684 unsigned Alignment =
I.getOperand(2).getImm();
3691bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
3698 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
3699 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3702 .
addMBB(
I.getOperand(0).getMBB())
3706 .
addMBB(
I.getOperand(0).getMBB())
3710bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
3723 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
3730 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3731 .
addUse(
I.getOperand(0).getReg())
3732 .
addMBB(
I.getOperand(1).getMBB())
3737bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
3740 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
3742 .
addUse(GR.getSPIRVTypeID(ResType));
3743 const unsigned NumOps =
I.getNumOperands();
3744 for (
unsigned i = 1; i < NumOps; i += 2) {
3745 MIB.
addUse(
I.getOperand(i + 0).getReg());
3746 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
3754bool SPIRVInstructionSelector::selectGlobalValue(
3764 SPIRV::AccessQualifier::ReadWrite,
false);
3765 PointerBaseType = GR.getOrCreateSPIRVArrayType(
3768 PointerBaseType = GR.getOrCreateSPIRVType(
3769 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
3772 std::string GlobalIdent;
3774 unsigned &
ID = UnnamedGlobalIDs[GV];
3776 ID = UnnamedGlobalIDs.size();
3777 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
3792 if (isa<Function>(GV)) {
3795 Register NewReg = GR.find(ConstVal, GR.CurMF);
3798 GR.add(ConstVal, GR.CurMF, NewReg);
3800 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
3801 ? dyn_cast<Function>(GV)
3803 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
3804 PointerBaseType,
I,
TII,
3805 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
3811 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
3814 MRI->createGenericVirtualRegister(GR.getRegType(ResType));
3815 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
3817 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3822 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
3831 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3833 .
addUse(GR.getSPIRVTypeID(ResType))
3836 assert(NewReg != ResVReg);
3837 return BuildCOPY(ResVReg, NewReg,
I);
3839 auto GlobalVar = cast<GlobalVariable>(GV);
3848 SPIRV::LinkageType::LinkageType LnkType =
3850 ? SPIRV::LinkageType::Import
3852 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
3853 ? SPIRV::LinkageType::LinkOnceODR
3854 : SPIRV::LinkageType::Export);
3863 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
3864 return Reg.isValid();
3867bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
3870 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
3871 return selectExtInst(ResVReg, ResType,
I, CL::log10);
3883 Register VarReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3885 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3887 .
addUse(GR.getSPIRVTypeID(ResType))
3888 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3890 .
add(
I.getOperand(1))
3895 ResType->
getOpcode() == SPIRV::OpTypeFloat);
3898 ResType->
getOpcode() == SPIRV::OpTypeVector
3902 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
3905 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
3906 ? SPIRV::OpVectorTimesScalar
3910 .
addUse(GR.getSPIRVTypeID(ResType))
3919bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
3920 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
3923 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
3925 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
3926 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
3927 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
3933 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.
getMF());
3937 Register Variable = GR.buildGlobalVariable(
3939 SPIRV::StorageClass::Input,
nullptr,
true,
true,
3940 SPIRV::LinkageType::Import, MIRBuilder,
false);
3944 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3946 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.
getMF());
3950 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
3952 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
3957 assert(
I.getOperand(2).isReg());
3962 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3964 .
addUse(GR.getSPIRVTypeID(ResType))
3973 if (
Type->getOpcode() != SPIRV::OpTypeVector)
3974 return GR.getOrCreateSPIRVVectorType(
Type, 4, MIRBuilder);
3977 if (VectorSize == 4)
3981 const SPIRVType *ScalarType = GR.getSPIRVTypeForVReg(ScalarTypeReg);
3982 return GR.getOrCreateSPIRVVectorType(ScalarType, 4, MIRBuilder);
3985bool SPIRVInstructionSelector::loadHandleBeforePosition(
3990 Intrinsic::spv_resource_handlefrombinding);
3998 Register VarReg = buildPointerToResource(ResType, Set, Binding, ArraySize,
3999 IndexReg, IsNonUniform, MIRBuilder);
4008 TII.get(SPIRV::OpLoad))
4010 .
addUse(GR.getSPIRVTypeID(ResType))
4020 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
Represents a call to an intrinsic.
Intrinsic::ID getIntrinsicID() const
bool hasPrivateLinkage() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
bool hasInternalLinkage() const
bool hasLinkOnceODRLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
constexpr bool isValid() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
StringRef getName() const
Return a constant reference to the value's name.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Type * toTypedPointer(Type *Ty)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
bool hasInitializer(const GlobalVariable *GV)
MachineInstr * getVRegDef(MachineRegisterInfo &MRI, Register Reg)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...