34#include "llvm/IR/IntrinsicsSPIRV.h"
38#define DEBUG_TYPE "spirv-isel"
41namespace CL = SPIRV::OpenCLExtInst;
42namespace GL = SPIRV::GLSLExtInst;
45 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
49llvm::SPIRV::SelectionControl::SelectionControl
50getSelectionOperandForImm(
int Imm) {
52 return SPIRV::SelectionControl::Flatten;
54 return SPIRV::SelectionControl::DontFlatten;
56 return SPIRV::SelectionControl::None;
60#define GET_GLOBALISEL_PREDICATE_BITSET
61#include "SPIRVGenGlobalISel.inc"
62#undef GET_GLOBALISEL_PREDICATE_BITSET
89#define GET_GLOBALISEL_PREDICATES_DECL
90#include "SPIRVGenGlobalISel.inc"
91#undef GET_GLOBALISEL_PREDICATES_DECL
93#define GET_GLOBALISEL_TEMPORARIES_DECL
94#include "SPIRVGenGlobalISel.inc"
95#undef GET_GLOBALISEL_TEMPORARIES_DECL
117 bool IsSigned)
const;
127 unsigned Opcode)
const;
130 unsigned Opcode)
const;
147 unsigned NegateOpcode = 0)
const;
201 template <
bool Signed>
204 template <
bool Signed>
217 bool IsSigned)
const;
219 bool IsSigned,
unsigned Opcode)
const;
221 bool IsSigned)
const;
227 bool IsSigned)
const;
260 [[maybe_unused]]
bool selectExtInst(
Register ResVReg,
263 GL::GLSLExtInst GLInst)
const;
268 GL::GLSLExtInst GLInst)
const;
295 std::pair<Register, bool>
297 const SPIRVType *ResType =
nullptr)
const;
309 SPIRV::StorageClass::StorageClass SC)
const;
317 Register IndexReg,
bool IsNonUniform,
323 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
330#define GET_GLOBALISEL_IMPL
331#include "SPIRVGenGlobalISel.inc"
332#undef GET_GLOBALISEL_IMPL
338 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
340#include
"SPIRVGenGlobalISel.inc"
343#include
"SPIRVGenGlobalISel.inc"
353 GR.setCurrentFunc(MF);
354 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
359 if (HasVRegsReset == &MF)
364 for (
unsigned I = 0, E =
MRI.getNumVirtRegs();
I != E; ++
I) {
366 LLT RegType =
MRI.getType(Reg);
374 for (
const auto &
MBB : MF) {
375 for (
const auto &
MI :
MBB) {
376 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
379 LLT DstType =
MRI.getType(DstReg);
381 LLT SrcType =
MRI.getType(SrcReg);
382 if (DstType != SrcType)
383 MRI.setType(DstReg,
MRI.getType(SrcReg));
387 if (DstRC != SrcRC && SrcRC)
388 MRI.setRegClass(DstReg, SrcRC);
399 for (
const auto &MO :
MI.all_defs()) {
401 if (Reg.isPhysical() || !
MRI.use_nodbg_empty(Reg))
404 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
405 MI.isLifetimeMarker())
409 if (
MI.mayStore() ||
MI.isCall() ||
410 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
411 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
417 resetVRegsType(*
I.getParent()->getParent());
419 assert(
I.getParent() &&
"Instruction should be in a basic block!");
420 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
425 if (Opcode == SPIRV::ASSIGN_TYPE) {
426 Register DstReg =
I.getOperand(0).getReg();
427 Register SrcReg =
I.getOperand(1).getReg();
428 auto *
Def =
MRI->getVRegDef(SrcReg);
430 bool Res = selectImpl(
I, *CoverageInfo);
432 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
433 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
437 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
444 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
445 MRI->replaceRegWith(SrcReg, DstReg);
446 GR.invalidateMachineInstr(&
I);
447 I.removeFromParent();
449 }
else if (
I.getNumDefs() == 1) {
456 if (DeadMIs.contains(&
I)) {
461 GR.invalidateMachineInstr(&
I);
466 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
467 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
473 bool HasDefs =
I.getNumDefs() > 0;
475 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
476 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
477 if (spvSelect(ResVReg, ResType,
I)) {
479 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
481 GR.invalidateMachineInstr(&
I);
482 I.removeFromParent();
490 case TargetOpcode::G_CONSTANT:
492 case TargetOpcode::G_SADDO:
493 case TargetOpcode::G_SSUBO:
503 if (DstRC != SrcRC && SrcRC)
504 MRI->setRegClass(DestReg, SrcRC);
505 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
506 TII.get(TargetOpcode::COPY))
512bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
515 const unsigned Opcode =
I.getOpcode();
517 return selectImpl(
I, *CoverageInfo);
519 case TargetOpcode::G_CONSTANT:
520 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
522 case TargetOpcode::G_GLOBAL_VALUE:
523 return selectGlobalValue(ResVReg,
I);
524 case TargetOpcode::G_IMPLICIT_DEF:
525 return selectOpUndef(ResVReg, ResType,
I);
526 case TargetOpcode::G_FREEZE:
527 return selectFreeze(ResVReg, ResType,
I);
529 case TargetOpcode::G_INTRINSIC:
530 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
531 case TargetOpcode::G_INTRINSIC_CONVERGENT:
532 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
533 return selectIntrinsic(ResVReg, ResType,
I);
534 case TargetOpcode::G_BITREVERSE:
535 return selectBitreverse(ResVReg, ResType,
I);
537 case TargetOpcode::G_BUILD_VECTOR:
538 return selectBuildVector(ResVReg, ResType,
I);
539 case TargetOpcode::G_SPLAT_VECTOR:
540 return selectSplatVector(ResVReg, ResType,
I);
542 case TargetOpcode::G_SHUFFLE_VECTOR: {
544 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
546 .
addUse(GR.getSPIRVTypeID(ResType))
547 .
addUse(
I.getOperand(1).getReg())
548 .
addUse(
I.getOperand(2).getReg());
549 for (
auto V :
I.getOperand(3).getShuffleMask())
553 case TargetOpcode::G_MEMMOVE:
554 case TargetOpcode::G_MEMCPY:
555 case TargetOpcode::G_MEMSET:
556 return selectMemOperation(ResVReg,
I);
558 case TargetOpcode::G_ICMP:
559 return selectICmp(ResVReg, ResType,
I);
560 case TargetOpcode::G_FCMP:
561 return selectFCmp(ResVReg, ResType,
I);
563 case TargetOpcode::G_FRAME_INDEX:
564 return selectFrameIndex(ResVReg, ResType,
I);
566 case TargetOpcode::G_LOAD:
567 return selectLoad(ResVReg, ResType,
I);
568 case TargetOpcode::G_STORE:
569 return selectStore(
I);
571 case TargetOpcode::G_BR:
572 return selectBranch(
I);
573 case TargetOpcode::G_BRCOND:
574 return selectBranchCond(
I);
576 case TargetOpcode::G_PHI:
577 return selectPhi(ResVReg, ResType,
I);
579 case TargetOpcode::G_FPTOSI:
580 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
581 case TargetOpcode::G_FPTOUI:
582 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
584 case TargetOpcode::G_SITOFP:
585 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
586 case TargetOpcode::G_UITOFP:
587 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
589 case TargetOpcode::G_CTPOP:
590 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
591 case TargetOpcode::G_SMIN:
592 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
593 case TargetOpcode::G_UMIN:
594 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
596 case TargetOpcode::G_SMAX:
597 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
598 case TargetOpcode::G_UMAX:
599 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
601 case TargetOpcode::G_SCMP:
602 return selectSUCmp(ResVReg, ResType,
I,
true);
603 case TargetOpcode::G_UCMP:
604 return selectSUCmp(ResVReg, ResType,
I,
false);
606 case TargetOpcode::G_STRICT_FMA:
607 case TargetOpcode::G_FMA:
608 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
610 case TargetOpcode::G_STRICT_FLDEXP:
611 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
613 case TargetOpcode::G_FPOW:
614 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
615 case TargetOpcode::G_FPOWI:
616 return selectExtInst(ResVReg, ResType,
I, CL::pown);
618 case TargetOpcode::G_FEXP:
619 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
620 case TargetOpcode::G_FEXP2:
621 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
623 case TargetOpcode::G_FLOG:
624 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
625 case TargetOpcode::G_FLOG2:
626 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
627 case TargetOpcode::G_FLOG10:
628 return selectLog10(ResVReg, ResType,
I);
630 case TargetOpcode::G_FABS:
631 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
632 case TargetOpcode::G_ABS:
633 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
635 case TargetOpcode::G_FMINNUM:
636 case TargetOpcode::G_FMINIMUM:
637 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
638 case TargetOpcode::G_FMAXNUM:
639 case TargetOpcode::G_FMAXIMUM:
640 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
642 case TargetOpcode::G_FCOPYSIGN:
643 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
645 case TargetOpcode::G_FCEIL:
646 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
647 case TargetOpcode::G_FFLOOR:
648 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
650 case TargetOpcode::G_FCOS:
651 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
652 case TargetOpcode::G_FSIN:
653 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
654 case TargetOpcode::G_FTAN:
655 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
656 case TargetOpcode::G_FACOS:
657 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
658 case TargetOpcode::G_FASIN:
659 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
660 case TargetOpcode::G_FATAN:
661 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
662 case TargetOpcode::G_FATAN2:
663 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
664 case TargetOpcode::G_FCOSH:
665 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
666 case TargetOpcode::G_FSINH:
667 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
668 case TargetOpcode::G_FTANH:
669 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
671 case TargetOpcode::G_STRICT_FSQRT:
672 case TargetOpcode::G_FSQRT:
673 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
675 case TargetOpcode::G_CTTZ:
676 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
677 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
678 case TargetOpcode::G_CTLZ:
679 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
680 return selectExtInst(ResVReg, ResType,
I, CL::clz);
682 case TargetOpcode::G_INTRINSIC_ROUND:
683 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
684 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
685 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
686 case TargetOpcode::G_INTRINSIC_TRUNC:
687 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
688 case TargetOpcode::G_FRINT:
689 case TargetOpcode::G_FNEARBYINT:
690 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
692 case TargetOpcode::G_SMULH:
693 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
694 case TargetOpcode::G_UMULH:
695 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
697 case TargetOpcode::G_SADDSAT:
698 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
699 case TargetOpcode::G_UADDSAT:
700 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
701 case TargetOpcode::G_SSUBSAT:
702 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
703 case TargetOpcode::G_USUBSAT:
704 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
706 case TargetOpcode::G_UADDO:
707 return selectOverflowArith(ResVReg, ResType,
I,
708 ResType->
getOpcode() == SPIRV::OpTypeVector
709 ? SPIRV::OpIAddCarryV
710 : SPIRV::OpIAddCarryS);
711 case TargetOpcode::G_USUBO:
712 return selectOverflowArith(ResVReg, ResType,
I,
713 ResType->
getOpcode() == SPIRV::OpTypeVector
714 ? SPIRV::OpISubBorrowV
715 : SPIRV::OpISubBorrowS);
716 case TargetOpcode::G_UMULO:
717 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
718 case TargetOpcode::G_SMULO:
719 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
721 case TargetOpcode::G_SEXT:
722 return selectExt(ResVReg, ResType,
I,
true);
723 case TargetOpcode::G_ANYEXT:
724 case TargetOpcode::G_ZEXT:
725 return selectExt(ResVReg, ResType,
I,
false);
726 case TargetOpcode::G_TRUNC:
727 return selectTrunc(ResVReg, ResType,
I);
728 case TargetOpcode::G_FPTRUNC:
729 case TargetOpcode::G_FPEXT:
730 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
732 case TargetOpcode::G_PTRTOINT:
733 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
734 case TargetOpcode::G_INTTOPTR:
735 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
736 case TargetOpcode::G_BITCAST:
737 return selectBitcast(ResVReg, ResType,
I);
738 case TargetOpcode::G_ADDRSPACE_CAST:
739 return selectAddrSpaceCast(ResVReg, ResType,
I);
740 case TargetOpcode::G_PTR_ADD: {
742 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
746 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
747 (*II).getOpcode() == TargetOpcode::COPY ||
748 (*II).getOpcode() == SPIRV::OpVariable) &&
751 bool IsGVInit =
false;
753 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
754 UseEnd =
MRI->use_instr_end();
755 UseIt != UseEnd; UseIt = std::next(UseIt)) {
756 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
757 (*UseIt).getOpcode() == SPIRV::OpVariable) {
764 SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
765 SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
766 SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
767 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
770 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
771 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
778 if (!GR.isBitcastCompatible(ResType, GVType))
780 "incompatible result and operand types in a bitcast");
781 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
783 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
789 TII.get(STI.isVulkanEnv()
790 ? SPIRV::OpInBoundsAccessChain
791 : SPIRV::OpInBoundsPtrAccessChain))
795 .
addUse(
I.getOperand(2).getReg())
798 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
800 .
addUse(GR.getSPIRVTypeID(ResType))
802 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
804 .
addUse(
I.getOperand(2).getReg())
811 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
812 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
814 .
addUse(GR.getSPIRVTypeID(ResType))
816 SPIRV::Opcode::InBoundsPtrAccessChain))
819 .
addUse(
I.getOperand(2).getReg());
823 case TargetOpcode::G_ATOMICRMW_OR:
824 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
825 case TargetOpcode::G_ATOMICRMW_ADD:
826 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
827 case TargetOpcode::G_ATOMICRMW_AND:
828 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
829 case TargetOpcode::G_ATOMICRMW_MAX:
830 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
831 case TargetOpcode::G_ATOMICRMW_MIN:
832 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
833 case TargetOpcode::G_ATOMICRMW_SUB:
834 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
835 case TargetOpcode::G_ATOMICRMW_XOR:
836 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
837 case TargetOpcode::G_ATOMICRMW_UMAX:
838 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
839 case TargetOpcode::G_ATOMICRMW_UMIN:
840 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
841 case TargetOpcode::G_ATOMICRMW_XCHG:
842 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
843 case TargetOpcode::G_ATOMIC_CMPXCHG:
844 return selectAtomicCmpXchg(ResVReg, ResType,
I);
846 case TargetOpcode::G_ATOMICRMW_FADD:
847 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
848 case TargetOpcode::G_ATOMICRMW_FSUB:
850 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
852 case TargetOpcode::G_ATOMICRMW_FMIN:
853 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
854 case TargetOpcode::G_ATOMICRMW_FMAX:
855 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
857 case TargetOpcode::G_FENCE:
858 return selectFence(
I);
860 case TargetOpcode::G_STACKSAVE:
861 return selectStackSave(ResVReg, ResType,
I);
862 case TargetOpcode::G_STACKRESTORE:
863 return selectStackRestore(
I);
865 case TargetOpcode::G_UNMERGE_VALUES:
871 case TargetOpcode::G_TRAP:
872 case TargetOpcode::G_DEBUGTRAP:
873 case TargetOpcode::G_UBSANTRAP:
874 case TargetOpcode::DBG_LABEL:
882bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
885 GL::GLSLExtInst GLInst)
const {
886 return selectExtInst(ResVReg, ResType,
I,
887 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
890bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
893 CL::OpenCLExtInst CLInst)
const {
894 return selectExtInst(ResVReg, ResType,
I,
895 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
898bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
901 CL::OpenCLExtInst CLInst,
902 GL::GLSLExtInst GLInst)
const {
903 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
904 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
905 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
908bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
913 for (
const auto &Ex : Insts) {
914 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
916 if (STI.canUseExtInstSet(Set)) {
918 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
920 .
addUse(GR.getSPIRVTypeID(ResType))
923 const unsigned NumOps =
I.getNumOperands();
925 if (Index < NumOps &&
926 I.getOperand(Index).getType() ==
927 MachineOperand::MachineOperandType::MO_IntrinsicID)
930 MIB.
add(
I.getOperand(Index));
937bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
940 std::vector<Register> Srcs,
941 unsigned Opcode)
const {
942 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
944 .
addUse(GR.getSPIRVTypeID(ResType));
951bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
954 unsigned Opcode)
const {
955 if (STI.isOpenCLEnv() &&
I.getOperand(1).isReg()) {
956 Register SrcReg =
I.getOperand(1).getReg();
959 MRI->def_instr_begin(SrcReg);
960 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
961 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
969 case SPIRV::OpConvertPtrToU:
970 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
972 case SPIRV::OpConvertUToPtr:
973 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
977 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
978 TII.get(SPIRV::OpSpecConstantOp))
980 .
addUse(GR.getSPIRVTypeID(ResType))
986 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).
getReg()},
990bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
993 Register OpReg =
I.getOperand(1).getReg();
994 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
995 if (!GR.isBitcastCompatible(ResType, OpType))
997 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1003 if (
MemOp->isVolatile())
1004 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1005 if (
MemOp->isNonTemporal())
1006 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1007 if (
MemOp->getAlign().value())
1008 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1010 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1012 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1019 if (Flags & MachineMemOperand::Flags::MOVolatile)
1020 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1021 if (Flags & MachineMemOperand::Flags::MONonTemporal)
1022 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1024 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1028bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1031 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1033 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1035 .
addUse(GR.getSPIRVTypeID(ResType))
1037 if (!
I.getNumMemOperands()) {
1038 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1040 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1048bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
1049 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1050 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1053 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1056 if (!
I.getNumMemOperands()) {
1057 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1059 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1067bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1070 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1072 "llvm.stacksave intrinsic: this instruction requires the following "
1073 "SPIR-V extension: SPV_INTEL_variable_length_array",
1076 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1078 .
addUse(GR.getSPIRVTypeID(ResType))
1082bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
1083 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1085 "llvm.stackrestore intrinsic: this instruction requires the following "
1086 "SPIR-V extension: SPV_INTEL_variable_length_array",
1088 if (!
I.getOperand(0).isReg())
1091 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1092 .
addUse(
I.getOperand(0).getReg())
1096bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1099 Register SrcReg =
I.getOperand(1).getReg();
1101 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1102 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1105 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1106 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
1108 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
1109 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1119 GR.add(GV, GR.CurMF, VarReg);
1120 GR.addGlobalObject(GV, GR.CurMF, VarReg);
1123 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1125 .
addUse(GR.getSPIRVTypeID(VarTy))
1126 .
addImm(SPIRV::StorageClass::UniformConstant)
1130 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
1131 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1133 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1135 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1136 .
addUse(
I.getOperand(0).getReg())
1138 .
addUse(
I.getOperand(2).getReg());
1139 if (
I.getNumMemOperands())
1147bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1151 unsigned NegateOpcode)
const {
1156 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1157 auto ScopeConstant = buildI32Constant(Scope,
I);
1158 Register ScopeReg = ScopeConstant.first;
1159 Result &= ScopeConstant.second;
1167 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1168 Register MemSemReg = MemSemConstant.first;
1169 Result &= MemSemConstant.second;
1171 Register ValueReg =
I.getOperand(2).getReg();
1172 if (NegateOpcode != 0) {
1174 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1175 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1180 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1182 .
addUse(GR.getSPIRVTypeID(ResType))
1190bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
1191 unsigned ArgI =
I.getNumOperands() - 1;
1193 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1195 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
1196 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1198 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1204 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1205 Register ResVReg =
I.getOperand(i).getReg();
1206 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1209 ResType = ScalarType;
1210 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1211 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1212 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1215 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1217 .
addUse(GR.getSPIRVTypeID(ResType))
1219 .
addImm(
static_cast<int64_t
>(i));
1225bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
1228 auto MemSemConstant = buildI32Constant(MemSem,
I);
1229 Register MemSemReg = MemSemConstant.first;
1230 bool Result = MemSemConstant.second;
1233 getMemScope(GR.CurMF->getFunction().getContext(), Ord));
1234 auto ScopeConstant = buildI32Constant(Scope,
I);
1235 Register ScopeReg = ScopeConstant.first;
1236 Result &= ScopeConstant.second;
1239 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1245bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1248 unsigned Opcode)
const {
1249 Type *ResTy =
nullptr;
1251 if (!GR.findValueAttrs(&
I, ResTy, ResName))
1253 "Not enough info to select the arithmetic with overflow instruction");
1256 "with overflow instruction");
1259 Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
1264 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1265 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1267 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1269 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
1270 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1271 Register ZeroReg = buildZerosVal(ResType,
I);
1274 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1276 if (ResName.
size() > 0)
1281 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1284 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1285 MIB.
addUse(
I.getOperand(i).getReg());
1290 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1291 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1293 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1294 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1295 .
addUse(GR.getSPIRVTypeID(ResType))
1302 .
addDef(
I.getOperand(1).getReg())
1309bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1317 if (!isa<GIntrinsic>(
I)) {
1321 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1322 auto ScopeConstant = buildI32Constant(Scope,
I);
1323 ScopeReg = ScopeConstant.first;
1324 Result &= ScopeConstant.second;
1326 unsigned ScSem =
static_cast<uint32_t>(
1330 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1331 MemSemEqReg = MemSemEqConstant.first;
1332 Result &= MemSemEqConstant.second;
1335 if (MemSemEq == MemSemNeq)
1336 MemSemNeqReg = MemSemEqReg;
1338 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1339 MemSemNeqReg = MemSemNeqConstant.first;
1340 Result &= MemSemNeqConstant.second;
1343 ScopeReg =
I.getOperand(5).getReg();
1344 MemSemEqReg =
I.getOperand(6).getReg();
1345 MemSemNeqReg =
I.getOperand(7).getReg();
1349 Register Val =
I.getOperand(4).getReg();
1350 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1351 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1354 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1356 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1364 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1368 .
addUse(GR.getSPIRVTypeID(BoolTy))
1372 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1375 .
addUse(GR.getSPIRVTypeID(ResType))
1377 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1381 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1383 .
addUse(GR.getSPIRVTypeID(ResType))
1392 case SPIRV::StorageClass::Workgroup:
1393 case SPIRV::StorageClass::CrossWorkgroup:
1394 case SPIRV::StorageClass::Function:
1403 case SPIRV::StorageClass::DeviceOnlyINTEL:
1404 case SPIRV::StorageClass::HostOnlyINTEL:
1413 bool IsGRef =
false;
1414 bool IsAllowedRefs =
1415 std::all_of(
MRI->use_instr_begin(ResVReg),
MRI->use_instr_end(),
1416 [&IsGRef](
auto const &It) {
1417 unsigned Opcode = It.getOpcode();
1418 if (Opcode == SPIRV::OpConstantComposite ||
1419 Opcode == SPIRV::OpVariable ||
1420 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1421 return IsGRef = true;
1422 return Opcode == SPIRV::OpName;
1424 return IsAllowedRefs && IsGRef;
1427Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1428 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1429 return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
1430 GR.getOrCreateSPIRVIntegerType(8,
I,
TII),
I,
TII, SC));
1437 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1438 TII.get(SPIRV::OpSpecConstantOp))
1448 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1449 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1450 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1452 SPIRV::StorageClass::Generic),
1453 GR.getPointerSize()));
1455 GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
1457 I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
1458 static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
1468bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1474 Register SrcPtr =
I.getOperand(1).getReg();
1475 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1478 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1479 ResType->
getOpcode() != SPIRV::OpTypePointer)
1480 return BuildCOPY(ResVReg, SrcPtr,
I);
1482 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
1483 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);
1490 unsigned SpecOpcode =
1492 ?
static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
1493 : (SrcSC == SPIRV::StorageClass::Generic &&
1495 ?
static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
1502 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1503 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1504 .constrainAllUses(
TII,
TRI, RBI);
1508 buildSpecConstantOp(
1510 getUcharPtrTypeReg(
I, DstSC),
1511 static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
1512 .constrainAllUses(
TII,
TRI, RBI);
1518 return BuildCOPY(ResVReg, SrcPtr,
I);
1520 if ((SrcSC == SPIRV::StorageClass::Function &&
1521 DstSC == SPIRV::StorageClass::Private) ||
1522 (DstSC == SPIRV::StorageClass::Function &&
1523 SrcSC == SPIRV::StorageClass::Private))
1524 return BuildCOPY(ResVReg, SrcPtr,
I);
1528 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1531 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1534 Register Tmp =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1535 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1536 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1539 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1544 .
addUse(GR.getSPIRVTypeID(ResType))
1552 return selectUnOp(ResVReg, ResType,
I,
1553 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1555 return selectUnOp(ResVReg, ResType,
I,
1556 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1558 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1560 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1570 return SPIRV::OpFOrdEqual;
1572 return SPIRV::OpFOrdGreaterThanEqual;
1574 return SPIRV::OpFOrdGreaterThan;
1576 return SPIRV::OpFOrdLessThanEqual;
1578 return SPIRV::OpFOrdLessThan;
1580 return SPIRV::OpFOrdNotEqual;
1582 return SPIRV::OpOrdered;
1584 return SPIRV::OpFUnordEqual;
1586 return SPIRV::OpFUnordGreaterThanEqual;
1588 return SPIRV::OpFUnordGreaterThan;
1590 return SPIRV::OpFUnordLessThanEqual;
1592 return SPIRV::OpFUnordLessThan;
1594 return SPIRV::OpFUnordNotEqual;
1596 return SPIRV::OpUnordered;
1606 return SPIRV::OpIEqual;
1608 return SPIRV::OpINotEqual;
1610 return SPIRV::OpSGreaterThanEqual;
1612 return SPIRV::OpSGreaterThan;
1614 return SPIRV::OpSLessThanEqual;
1616 return SPIRV::OpSLessThan;
1618 return SPIRV::OpUGreaterThanEqual;
1620 return SPIRV::OpUGreaterThan;
1622 return SPIRV::OpULessThanEqual;
1624 return SPIRV::OpULessThan;
1633 return SPIRV::OpPtrEqual;
1635 return SPIRV::OpPtrNotEqual;
1646 return SPIRV::OpLogicalEqual;
1648 return SPIRV::OpLogicalNotEqual;
1682bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1685 unsigned OpAnyOrAll)
const {
1686 assert(
I.getNumOperands() == 3);
1687 assert(
I.getOperand(2).isReg());
1689 Register InputRegister =
I.getOperand(2).getReg();
1690 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1695 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1696 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1697 if (IsBoolTy && !IsVectorTy) {
1698 assert(ResVReg ==
I.getOperand(0).getReg());
1699 return BuildCOPY(ResVReg, InputRegister,
I);
1702 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1703 unsigned SpirvNotEqualId =
1704 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1705 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1710 NotEqualReg = IsBoolTy ? InputRegister
1711 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1713 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1719 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1723 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1734 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1739bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1742 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1745bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1748 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1752bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
1755 assert(
I.getNumOperands() == 4);
1756 assert(
I.getOperand(2).isReg());
1757 assert(
I.getOperand(3).isReg());
1760 GR.getSPIRVTypeForVReg(
I.getOperand(2).getReg());
1763 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1764 "dot product requires a vector of at least 2 components");
1767 GR.getSPIRVTypeForVReg(
VecType->getOperand(1).getReg());
1772 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
1774 .
addUse(GR.getSPIRVTypeID(ResType))
1775 .
addUse(
I.getOperand(2).getReg())
1776 .
addUse(
I.getOperand(3).getReg())
1780bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
1784 assert(
I.getNumOperands() == 4);
1785 assert(
I.getOperand(2).isReg());
1786 assert(
I.getOperand(3).isReg());
1789 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1792 .
addUse(GR.getSPIRVTypeID(ResType))
1793 .
addUse(
I.getOperand(2).getReg())
1794 .
addUse(
I.getOperand(3).getReg())
1800bool SPIRVInstructionSelector::selectIntegerDotExpansion(
1802 assert(
I.getNumOperands() == 4);
1803 assert(
I.getOperand(2).isReg());
1804 assert(
I.getOperand(3).isReg());
1808 Register Vec0 =
I.getOperand(2).getReg();
1809 Register Vec1 =
I.getOperand(3).getReg();
1810 Register TmpVec =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1815 .
addUse(GR.getSPIRVTypeID(VecType))
1821 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1822 "dot product requires a vector of at least 2 components");
1824 Register Res =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1827 .
addUse(GR.getSPIRVTypeID(ResType))
1832 for (
unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
1833 Register Elt =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1836 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1838 .
addUse(GR.getSPIRVTypeID(ResType))
1843 Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
1844 ?
MRI->createVirtualRegister(GR.getRegClass(ResType))
1849 .
addUse(GR.getSPIRVTypeID(ResType))
1859template <
bool Signed>
1860bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
1863 assert(
I.getNumOperands() == 5);
1864 assert(
I.getOperand(2).isReg());
1865 assert(
I.getOperand(3).isReg());
1866 assert(
I.getOperand(4).isReg());
1869 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1870 Register Dot =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1873 .
addUse(GR.getSPIRVTypeID(ResType))
1874 .
addUse(
I.getOperand(2).getReg())
1875 .
addUse(
I.getOperand(3).getReg())
1880 .
addUse(GR.getSPIRVTypeID(ResType))
1882 .
addUse(
I.getOperand(4).getReg())
1889template <
bool Signed>
1890bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
1892 assert(
I.getNumOperands() == 5);
1893 assert(
I.getOperand(2).isReg());
1894 assert(
I.getOperand(3).isReg());
1895 assert(
I.getOperand(4).isReg());
1901 Register Acc =
I.getOperand(4).getReg();
1902 SPIRVType *EltType = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1904 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
1907 for (
unsigned i = 0; i < 4; i++) {
1909 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1912 .
addUse(GR.getSPIRVTypeID(ResType))
1913 .
addUse(
I.getOperand(2).getReg())
1914 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1915 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1919 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1922 .
addUse(GR.getSPIRVTypeID(ResType))
1923 .
addUse(
I.getOperand(3).getReg())
1924 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1925 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1932 .
addUse(GR.getSPIRVTypeID(ResType))
1938 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1941 .
addUse(GR.getSPIRVTypeID(ResType))
1943 .
addUse(GR.getOrCreateConstInt(0,
I, EltType,
TII))
1944 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1949 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
1952 .
addUse(GR.getSPIRVTypeID(ResType))
1965bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
1968 assert(
I.getNumOperands() == 3);
1969 assert(
I.getOperand(2).isReg());
1971 Register VZero = buildZerosValF(ResType,
I);
1972 Register VOne = buildOnesValF(ResType,
I);
1974 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1976 .
addUse(GR.getSPIRVTypeID(ResType))
1977 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1979 .
addUse(
I.getOperand(2).getReg())
1985bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
1988 assert(
I.getNumOperands() == 3);
1989 assert(
I.getOperand(2).isReg());
1991 Register InputRegister =
I.getOperand(2).getReg();
1992 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1993 auto &
DL =
I.getDebugLoc();
1998 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2000 unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
2001 unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);
2003 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2005 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2007 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2013 .
addUse(GR.getSPIRVTypeID(InputType))
2014 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2019 if (NeedsConversion) {
2020 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2023 .
addUse(GR.getSPIRVTypeID(ResType))
2031bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2034 unsigned Opcode)
const {
2036 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2038 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2040 .
addUse(GR.getSPIRVTypeID(ResType))
2041 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I,
2044 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2045 BMI.
addUse(
I.getOperand(J).getReg());
2051bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2054 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2055 SPIRVType *BallotType = GR.getOrCreateSPIRVVectorType(IntTy, 4,
I,
TII);
2056 Register BallotReg =
MRI->createVirtualRegister(GR.getRegClass(BallotType));
2057 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2058 SPIRV::OpGroupNonUniformBallot);
2063 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2065 .
addUse(GR.getSPIRVTypeID(ResType))
2066 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII))
2067 .
addImm(SPIRV::GroupOperation::Reduce)
2074bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2078 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2080 .
addUse(GR.getSPIRVTypeID(ResType))
2081 .
addUse(
I.getOperand(1).getReg())
2085bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2093 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2095 Register OpReg =
I.getOperand(1).getReg();
2098 switch (
Def->getOpcode()) {
2099 case SPIRV::ASSIGN_TYPE:
2101 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2102 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2103 Reg =
Def->getOperand(2).getReg();
2106 case SPIRV::OpUndef:
2107 Reg =
Def->getOperand(1).getReg();
2110 unsigned DestOpCode;
2111 if (
Reg.isValid()) {
2112 DestOpCode = SPIRV::OpConstantNull;
2114 DestOpCode = TargetOpcode::COPY;
2117 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2118 .
addDef(
I.getOperand(0).getReg())
2131 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2136 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
2145 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2157 case TargetOpcode::G_CONSTANT:
2158 case TargetOpcode::G_FCONSTANT:
2160 case TargetOpcode::G_INTRINSIC:
2161 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2162 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2163 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
2164 Intrinsic::spv_const_composite;
2165 case TargetOpcode::G_BUILD_VECTOR:
2166 case TargetOpcode::G_SPLAT_VECTOR: {
2190bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2194 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2195 N = GR.getScalarOrVectorComponentCount(ResType);
2196 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2200 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2205 for (
unsigned i =
I.getNumExplicitDefs();
2206 i <
I.getNumExplicitOperands() && IsConst; ++i)
2210 if (!IsConst &&
N < 2)
2212 "There must be at least two constituent operands in a vector");
2214 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2215 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2216 TII.get(IsConst ? SPIRV::OpConstantComposite
2217 : SPIRV::OpCompositeConstruct))
2219 .
addUse(GR.getSPIRVTypeID(ResType));
2220 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2221 MIB.
addUse(
I.getOperand(i).getReg());
2225bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2229 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2230 N = GR.getScalarOrVectorComponentCount(ResType);
2231 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2236 unsigned OpIdx =
I.getNumExplicitDefs();
2237 if (!
I.getOperand(OpIdx).isReg())
2241 Register OpReg =
I.getOperand(OpIdx).getReg();
2244 if (!IsConst &&
N < 2)
2246 "There must be at least two constituent operands in a vector");
2248 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2249 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2250 TII.get(IsConst ? SPIRV::OpConstantComposite
2251 : SPIRV::OpCompositeConstruct))
2253 .
addUse(GR.getSPIRVTypeID(ResType));
2254 for (
unsigned i = 0; i <
N; ++i)
2259bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2265 if (STI.canUseExtension(
2266 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2268 Opcode = SPIRV::OpDemoteToHelperInvocation;
2270 Opcode = SPIRV::OpKill;
2273 GR.invalidateMachineInstr(NextI);
2274 NextI->removeFromParent();
2279 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2283bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2287 Register Cmp0 =
I.getOperand(2).getReg();
2288 Register Cmp1 =
I.getOperand(3).getReg();
2289 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
2290 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
2291 "CMP operands should have the same type");
2292 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2294 .
addUse(GR.getSPIRVTypeID(ResType))
2300bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2303 auto Pred =
I.getOperand(1).getPredicate();
2306 Register CmpOperand =
I.getOperand(2).getReg();
2307 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
2309 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
2313 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2319 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
2320 "Expected G_FCONSTANT");
2321 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
2328 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2329 "Expected G_CONSTANT");
2330 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
2333std::pair<Register, bool>
2338 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2340 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2341 Register NewReg = GR.find(ConstInt, GR.CurMF);
2345 GR.add(ConstInt, GR.CurMF, NewReg);
2349 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2351 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
2353 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2355 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
2363bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2367 return selectCmp(ResVReg, ResType, CmpOp,
I);
2373 bool ZeroAsNull = STI.isOpenCLEnv();
2374 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2375 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
2376 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
2382 bool ZeroAsNull = STI.isOpenCLEnv();
2384 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2385 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
2386 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
2392 bool ZeroAsNull = STI.isOpenCLEnv();
2394 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2395 return GR.getOrCreateConstVector(VOne,
I, ResType,
TII, ZeroAsNull);
2396 return GR.getOrCreateConstFP(VOne,
I, ResType,
TII, ZeroAsNull);
2402 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2405 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2410bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2413 bool IsSigned)
const {
2415 Register ZeroReg = buildZerosVal(ResType,
I);
2416 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2418 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2420 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2421 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2423 .
addUse(GR.getSPIRVTypeID(ResType))
2424 .
addUse(
I.getOperand(1).getReg())
2430bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2433 unsigned Opcode)
const {
2434 Register SrcReg =
I.getOperand(1).getReg();
2437 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
2438 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2440 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2442 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
2444 SrcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2445 selectSelect(SrcReg, TmpType,
I,
false);
2447 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2450bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2453 Register SrcReg =
I.getOperand(1).getReg();
2454 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
2455 return selectSelect(ResVReg, ResType,
I, IsSigned);
2457 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
2458 if (SrcType == ResType)
2459 return BuildCOPY(ResVReg, SrcReg,
I);
2461 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2462 return selectUnOp(ResVReg, ResType,
I, Opcode);
2465bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2468 bool IsSigned)
const {
2474 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
2476 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
2477 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
2481 Register IsLessEqReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2483 GR.assignSPIRVTypeToVReg(ResType, IsLessEqReg, MIRBuilder.getMF());
2485 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2486 : SPIRV::OpULessThanEqual))
2489 .
addUse(
I.getOperand(1).getReg())
2490 .
addUse(
I.getOperand(2).getReg())
2492 Register IsLessReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2494 GR.assignSPIRVTypeToVReg(ResType, IsLessReg, MIRBuilder.getMF());
2496 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2499 .
addUse(
I.getOperand(1).getReg())
2500 .
addUse(
I.getOperand(2).getReg())
2503 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
2505 MRI->createVirtualRegister(GR.getRegClass(ResType));
2507 GR.assignSPIRVTypeToVReg(ResType, NegOneOrZeroReg, MIRBuilder.getMF());
2508 unsigned SelectOpcode =
2509 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2514 .
addUse(buildOnesVal(
true, ResType,
I))
2515 .
addUse(buildZerosVal(ResType,
I))
2522 .
addUse(buildOnesVal(
false, ResType,
I))
2526bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2532 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2533 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2534 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2536 Register One = buildOnesVal(
false, IntTy,
I);
2540 .
addUse(GR.getSPIRVTypeID(IntTy))
2546 .
addUse(GR.getSPIRVTypeID(BoolTy))
2552bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2555 Register IntReg =
I.getOperand(1).getReg();
2556 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
2557 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
2558 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2559 if (ArgType == ResType)
2560 return BuildCOPY(ResVReg, IntReg,
I);
2561 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
2562 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2563 return selectUnOp(ResVReg, ResType,
I, Opcode);
2566bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2570 unsigned TyOpcode = ResType->
getOpcode();
2571 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
2573 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
2575 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2577 .
addUse(GR.getSPIRVTypeID(ResType))
2579 if (TyOpcode == SPIRV::OpTypeInt) {
2580 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
2582 return Reg == ResVReg ?
true : BuildCOPY(ResVReg, Reg,
I);
2584 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2586 .
addUse(GR.getSPIRVTypeID(ResType));
2593bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2596 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2598 .
addUse(GR.getSPIRVTypeID(ResType))
2605 if (TypeInst->
getOpcode() == SPIRV::ASSIGN_TYPE) {
2608 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
2610 return TypeInst->
getOpcode() == SPIRV::OpConstantI;
2615 if (TypeInst->
getOpcode() == SPIRV::OpConstantI)
2622bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2626 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2628 .
addUse(GR.getSPIRVTypeID(ResType))
2630 .
addUse(
I.getOperand(3).getReg())
2632 .
addUse(
I.getOperand(2).getReg());
2633 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2638bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2642 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2644 .
addUse(GR.getSPIRVTypeID(ResType))
2645 .
addUse(
I.getOperand(2).getReg());
2646 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2651bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2655 return selectInsertVal(ResVReg, ResType,
I);
2657 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
2659 .
addUse(GR.getSPIRVTypeID(ResType))
2660 .
addUse(
I.getOperand(2).getReg())
2661 .
addUse(
I.getOperand(3).getReg())
2662 .
addUse(
I.getOperand(4).getReg())
2666bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
2670 return selectExtractVal(ResVReg, ResType,
I);
2672 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
2674 .
addUse(GR.getSPIRVTypeID(ResType))
2675 .
addUse(
I.getOperand(2).getReg())
2676 .
addUse(
I.getOperand(3).getReg())
2680bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
2683 const bool IsGEPInBounds =
I.getOperand(2).getImm();
2688 const unsigned Opcode = STI.isVulkanEnv()
2689 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2690 : SPIRV::OpAccessChain)
2691 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2692 : SPIRV::OpPtrAccessChain);
2694 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2696 .
addUse(GR.getSPIRVTypeID(ResType))
2698 .
addUse(
I.getOperand(3).getReg());
2700 const unsigned StartingIndex =
2701 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2704 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
2705 Res.addUse(
I.getOperand(i).getReg());
2706 return Res.constrainAllUses(
TII,
TRI, RBI);
2710bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2713 unsigned Lim =
I.getNumExplicitOperands();
2714 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2715 Register OpReg =
I.getOperand(i).getReg();
2717 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2719 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
2720 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2721 GR.isAggregateType(OpType)) {
2728 Register WrapReg = GR.find(OpDefine, MF);
2734 WrapReg =
MRI->createVirtualRegister(GR.getRegClass(OpType));
2735 GR.add(OpDefine, MF, WrapReg);
2739 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2743 .
addUse(GR.getSPIRVTypeID(OpType))
2753bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
2759 case Intrinsic::spv_load:
2760 return selectLoad(ResVReg, ResType,
I);
2761 case Intrinsic::spv_store:
2762 return selectStore(
I);
2763 case Intrinsic::spv_extractv:
2764 return selectExtractVal(ResVReg, ResType,
I);
2765 case Intrinsic::spv_insertv:
2766 return selectInsertVal(ResVReg, ResType,
I);
2767 case Intrinsic::spv_extractelt:
2768 return selectExtractElt(ResVReg, ResType,
I);
2769 case Intrinsic::spv_insertelt:
2770 return selectInsertElt(ResVReg, ResType,
I);
2771 case Intrinsic::spv_gep:
2772 return selectGEP(ResVReg, ResType,
I);
2773 case Intrinsic::spv_unref_global:
2774 case Intrinsic::spv_init_global: {
2777 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
2780 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
2782 case Intrinsic::spv_undef: {
2783 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2785 .
addUse(GR.getSPIRVTypeID(ResType));
2788 case Intrinsic::spv_const_composite: {
2790 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
2792 unsigned Opcode = SPIRV::OpConstantNull;
2795 Opcode = SPIRV::OpConstantComposite;
2796 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
2799 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2800 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2802 .
addUse(GR.getSPIRVTypeID(ResType));
2805 for (
Register OpReg : CompositeArgs)
2810 case Intrinsic::spv_assign_name: {
2811 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
2812 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
2813 for (
unsigned i =
I.getNumExplicitDefs() + 2;
2814 i <
I.getNumExplicitOperands(); ++i) {
2815 MIB.
addImm(
I.getOperand(i).getImm());
2819 case Intrinsic::spv_switch: {
2820 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
2821 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2822 if (
I.getOperand(i).isReg())
2823 MIB.
addReg(
I.getOperand(i).getReg());
2824 else if (
I.getOperand(i).isCImm())
2825 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
2826 else if (
I.getOperand(i).isMBB())
2827 MIB.
addMBB(
I.getOperand(i).getMBB());
2833 case Intrinsic::spv_loop_merge: {
2834 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
2835 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2836 assert(
I.getOperand(i).isMBB());
2837 MIB.
addMBB(
I.getOperand(i).getMBB());
2839 MIB.
addImm(SPIRV::SelectionControl::None);
2842 case Intrinsic::spv_selection_merge: {
2844 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
2845 assert(
I.getOperand(1).isMBB() &&
2846 "operand 1 to spv_selection_merge must be a basic block");
2847 MIB.
addMBB(
I.getOperand(1).getMBB());
2848 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
2851 case Intrinsic::spv_cmpxchg:
2852 return selectAtomicCmpXchg(ResVReg, ResType,
I);
2853 case Intrinsic::spv_unreachable:
2854 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
2856 case Intrinsic::spv_alloca:
2857 return selectFrameIndex(ResVReg, ResType,
I);
2858 case Intrinsic::spv_alloca_array:
2859 return selectAllocaArray(ResVReg, ResType,
I);
2860 case Intrinsic::spv_assume:
2861 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2862 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
2863 .
addUse(
I.getOperand(1).getReg())
2866 case Intrinsic::spv_expect:
2867 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2868 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
2870 .
addUse(GR.getSPIRVTypeID(ResType))
2871 .
addUse(
I.getOperand(2).getReg())
2872 .
addUse(
I.getOperand(3).getReg())
2875 case Intrinsic::arithmetic_fence:
2876 if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
2878 TII.get(SPIRV::OpArithmeticFenceEXT))
2880 .
addUse(GR.getSPIRVTypeID(ResType))
2881 .
addUse(
I.getOperand(2).getReg())
2884 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
2886 case Intrinsic::spv_thread_id:
2892 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
2894 case Intrinsic::spv_thread_id_in_group:
2900 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
2902 case Intrinsic::spv_group_id:
2908 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
2910 case Intrinsic::spv_fdot:
2911 return selectFloatDot(ResVReg, ResType,
I);
2912 case Intrinsic::spv_udot:
2913 case Intrinsic::spv_sdot:
2914 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2916 return selectIntegerDot(ResVReg, ResType,
I,
2917 IID == Intrinsic::spv_sdot);
2918 return selectIntegerDotExpansion(ResVReg, ResType,
I);
2919 case Intrinsic::spv_dot4add_i8packed:
2920 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2922 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
2923 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
2924 case Intrinsic::spv_dot4add_u8packed:
2925 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2927 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
2928 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
2929 case Intrinsic::spv_all:
2930 return selectAll(ResVReg, ResType,
I);
2931 case Intrinsic::spv_any:
2932 return selectAny(ResVReg, ResType,
I);
2933 case Intrinsic::spv_cross:
2934 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
2935 case Intrinsic::spv_distance:
2936 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
2937 case Intrinsic::spv_lerp:
2938 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
2939 case Intrinsic::spv_length:
2940 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
2941 case Intrinsic::spv_degrees:
2942 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
2943 case Intrinsic::spv_frac:
2944 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
2945 case Intrinsic::spv_normalize:
2946 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
2947 case Intrinsic::spv_rsqrt:
2948 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
2949 case Intrinsic::spv_sign:
2950 return selectSign(ResVReg, ResType,
I);
2951 case Intrinsic::spv_firstbituhigh:
2952 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
2953 case Intrinsic::spv_firstbitshigh:
2954 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
2955 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
2957 auto MemSemConstant =
2958 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
2959 Register MemSemReg = MemSemConstant.first;
2960 Result &= MemSemConstant.second;
2961 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
2962 Register ScopeReg = ScopeConstant.first;
2963 Result &= ScopeConstant.second;
2966 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
2972 case Intrinsic::spv_lifetime_start:
2973 case Intrinsic::spv_lifetime_end: {
2974 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2975 : SPIRV::OpLifetimeStop;
2976 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
2977 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
2985 case Intrinsic::spv_saturate:
2986 return selectSaturate(ResVReg, ResType,
I);
2987 case Intrinsic::spv_nclamp:
2988 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
2989 case Intrinsic::spv_uclamp:
2990 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
2991 case Intrinsic::spv_sclamp:
2992 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
2993 case Intrinsic::spv_wave_active_countbits:
2994 return selectWaveActiveCountBits(ResVReg, ResType,
I);
2995 case Intrinsic::spv_wave_all:
2996 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
2997 case Intrinsic::spv_wave_any:
2998 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
2999 case Intrinsic::spv_wave_is_first_lane:
3000 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3001 case Intrinsic::spv_wave_readlane:
3002 return selectWaveOpInst(ResVReg, ResType,
I,
3003 SPIRV::OpGroupNonUniformShuffle);
3004 case Intrinsic::spv_step:
3005 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3006 case Intrinsic::spv_radians:
3007 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3011 case Intrinsic::instrprof_increment:
3012 case Intrinsic::instrprof_increment_step:
3013 case Intrinsic::instrprof_value_profile:
3016 case Intrinsic::spv_value_md:
3018 case Intrinsic::spv_resource_handlefrombinding: {
3019 return selectHandleFromBinding(ResVReg, ResType,
I);
3021 case Intrinsic::spv_resource_store_typedbuffer: {
3022 selectImageWriteIntrinsic(
I);
3025 case Intrinsic::spv_resource_load_typedbuffer: {
3026 selectReadImageIntrinsic(ResVReg, ResType,
I);
3029 case Intrinsic::spv_discard: {
3030 return selectDiscard(ResVReg, ResType,
I);
3033 std::string DiagMsg;
3036 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3043bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3050 Register IndexReg =
I.getOperand(5).getReg();
3051 bool IsNonUniform = ArraySize > 1 &&
foldImm(
I.getOperand(6),
MRI);
3054 Register VarReg = buildPointerToResource(ResType, Set, Binding, ArraySize,
3055 IndexReg, IsNonUniform, MIRBuilder);
3062 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
3064 .
addUse(GR.getSPIRVTypeID(ResType))
3069void SPIRVInstructionSelector::selectReadImageIntrinsic(
3078 Register ImageReg =
I.getOperand(2).getReg();
3079 assert(
MRI->getVRegDef(ImageReg)->getParent() ==
I.getParent() &&
3080 "The image must be loaded in the same basic block as its use.");
3082 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3083 if (ResultSize == 4) {
3084 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpImageRead))
3086 .
addUse(GR.getSPIRVTypeID(ResType))
3088 .
addUse(
I.getOperand(3).getReg());
3092 SPIRVType *ReadType = widenTypeToVec4(ResType,
I);
3093 Register ReadReg =
MRI->createVirtualRegister(GR.getRegClass(ReadType));
3094 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpImageRead))
3096 .
addUse(GR.getSPIRVTypeID(ReadType))
3098 .
addUse(
I.getOperand(3).getReg());
3100 if (ResultSize == 1) {
3102 TII.get(SPIRV::OpCompositeExtract))
3104 .
addUse(GR.getSPIRVTypeID(ResType))
3109 extractSubvector(ResVReg, ResType, ReadReg,
I);
3112void SPIRVInstructionSelector::extractSubvector(
3115 SPIRVType *InputType = GR.getResultType(ReadReg);
3116 [[maybe_unused]]
uint64_t InputSize =
3117 GR.getScalarOrVectorComponentCount(InputType);
3118 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3119 assert(InputSize > 1 &&
"The input must be a vector.");
3120 assert(ResultSize > 1 &&
"The result must be a vector.");
3121 assert(ResultSize < InputSize &&
3122 "Cannot extract more element than there are in the input.");
3124 SPIRVType *ScalarType = GR.getScalarOrVectorComponentType(ResType);
3127 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3139 TII.get(SPIRV::OpCompositeConstruct))
3141 .
addUse(GR.getSPIRVTypeID(ResType));
3143 for (
Register ComponentReg : ComponentRegisters)
3144 MIB.
addUse(ComponentReg);
3147void SPIRVInstructionSelector::selectImageWriteIntrinsic(
3155 Register ImageReg =
I.getOperand(1).getReg();
3156 assert(
MRI->getVRegDef(ImageReg)->getParent() ==
I.getParent() &&
3157 "The image must be loaded in the same basic block as its use.");
3158 Register CoordinateReg =
I.getOperand(2).getReg();
3159 Register DataReg =
I.getOperand(3).getReg();
3160 assert(GR.getResultType(DataReg)->getOpcode() == SPIRV::OpTypeVector);
3161 assert(GR.getScalarOrVectorComponentCount(GR.getResultType(DataReg)) == 4);
3162 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpImageWrite))
3168Register SPIRVInstructionSelector::buildPointerToResource(
3173 return GR.getOrCreateGlobalVariableWithBinding(ResType, Set, Binding,
3176 const SPIRVType *VarType = GR.getOrCreateSPIRVArrayType(
3178 Register VarReg = GR.getOrCreateGlobalVariableWithBinding(
3179 VarType, Set, Binding, MIRBuilder);
3181 SPIRVType *ResPointerType = GR.getOrCreateSPIRVPointerType(
3182 ResType, MIRBuilder, SPIRV::StorageClass::UniformConstant);
3184 Register AcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3188 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3189 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3194 .
addUse(GR.getSPIRVTypeID(ResPointerType))
3201bool SPIRVInstructionSelector::selectFirstBitHigh16(
Register ResVReg,
3204 bool IsSigned)
const {
3205 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3207 Register ExtReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3209 selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).
getReg()}, Opcode);
3210 return Result && selectFirstBitHigh32(ResVReg, ResType,
I, ExtReg, IsSigned);
3213bool SPIRVInstructionSelector::selectFirstBitHigh32(
Register ResVReg,
3217 bool IsSigned)
const {
3218 unsigned Opcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
3219 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3221 .
addUse(GR.getSPIRVTypeID(ResType))
3222 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3228bool SPIRVInstructionSelector::selectFirstBitHigh64(
Register ResVReg,
3231 bool IsSigned)
const {
3232 Register OpReg =
I.getOperand(2).getReg();
3234 unsigned count = GR.getScalarOrVectorComponentCount(ResType);
3235 SPIRVType *baseType = GR.retrieveScalarOrVectorIntType(ResType);
3238 GR.getOrCreateSPIRVVectorType(baseType, 2 *
count, MIRBuilder);
3239 Register bitcastReg =
MRI->createVirtualRegister(GR.getRegClass(postCastT));
3241 selectOpWithSrcs(bitcastReg, postCastT,
I, {OpReg}, SPIRV::OpBitcast);
3244 Register FBHReg =
MRI->createVirtualRegister(GR.getRegClass(postCastT));
3245 Result &= selectFirstBitHigh32(FBHReg, postCastT,
I, bitcastReg, IsSigned);
3248 Register HighReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3249 Register LowReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3251 bool ZeroAsNull = STI.isOpenCLEnv();
3252 bool isScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
3255 Result &= selectOpWithSrcs(
3256 HighReg, ResType,
I,
3257 {FBHReg, GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull)},
3258 SPIRV::OpVectorExtractDynamic);
3259 Result &= selectOpWithSrcs(
3261 {FBHReg, GR.getOrCreateConstInt(1,
I, ResType,
TII, ZeroAsNull)},
3262 SPIRV::OpVectorExtractDynamic);
3264 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3265 TII.get(SPIRV::OpVectorShuffle))
3267 .
addUse(GR.getSPIRVTypeID(ResType))
3272 for (j = 0;
j <
count * 2;
j += 2) {
3278 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3279 TII.get(SPIRV::OpVectorShuffle))
3281 .
addUse(GR.getSPIRVTypeID(ResType))
3285 for (j = 1;
j <
count * 2;
j += 2) {
3300 GR.getOrCreateConstInt((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3301 Reg0 = GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
3302 Reg32 = GR.getOrCreateConstInt(32,
I, ResType,
TII, ZeroAsNull);
3303 selectOp = SPIRV::OpSelectSISCond;
3304 addOp = SPIRV::OpIAddS;
3306 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
count, MIRBuilder);
3308 GR.getOrCreateConstVector((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3309 Reg0 = GR.getOrCreateConstVector(0,
I, ResType,
TII, ZeroAsNull);
3310 Reg32 = GR.getOrCreateConstVector(32,
I, ResType,
TII, ZeroAsNull);
3311 selectOp = SPIRV::OpSelectVIVCond;
3312 addOp = SPIRV::OpIAddV;
3316 Register BReg =
MRI->createVirtualRegister(GR.getRegClass(BoolType));
3317 Result &= selectOpWithSrcs(BReg, BoolType,
I, {HighReg, NegOneReg},
3321 Register TmpReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3323 selectOpWithSrcs(TmpReg, ResType,
I, {BReg, LowReg, HighReg}, selectOp);
3326 Register ValReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3327 Result &= selectOpWithSrcs(ValReg, ResType,
I, {BReg, Reg0, Reg32}, selectOp);
3330 selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, addOp);
3333bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
3336 bool IsSigned)
const {
3338 Register OpReg =
I.getOperand(2).getReg();
3339 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3341 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3343 return selectFirstBitHigh16(ResVReg, ResType,
I, IsSigned);
3345 return selectFirstBitHigh32(ResVReg, ResType,
I, OpReg, IsSigned);
3347 return selectFirstBitHigh64(ResVReg, ResType,
I, IsSigned);
3350 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
3354bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
3360 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
3361 TII.get(SPIRV::OpVariableLengthArrayINTEL))
3363 .
addUse(GR.getSPIRVTypeID(ResType))
3364 .
addUse(
I.getOperand(2).getReg())
3366 if (!STI.isVulkanEnv()) {
3367 unsigned Alignment =
I.getOperand(3).getImm();
3373bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
3379 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
3380 TII.get(SPIRV::OpVariable))
3382 .
addUse(GR.getSPIRVTypeID(ResType))
3385 if (!STI.isVulkanEnv()) {
3386 unsigned Alignment =
I.getOperand(2).getImm();
3393bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
3400 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
3401 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3404 .
addMBB(
I.getOperand(0).getMBB())
3408 .
addMBB(
I.getOperand(0).getMBB())
3412bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
3425 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
3432 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3433 .
addUse(
I.getOperand(0).getReg())
3434 .
addMBB(
I.getOperand(1).getMBB())
3439bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
3442 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
3444 .
addUse(GR.getSPIRVTypeID(ResType));
3445 const unsigned NumOps =
I.getNumOperands();
3446 for (
unsigned i = 1; i < NumOps; i += 2) {
3447 MIB.
addUse(
I.getOperand(i + 0).getReg());
3448 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
3456bool SPIRVInstructionSelector::selectGlobalValue(
3466 SPIRV::AccessQualifier::ReadWrite,
false);
3467 PointerBaseType = GR.getOrCreateSPIRVArrayType(
3470 PointerBaseType = GR.getOrCreateSPIRVType(
3471 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
3474 std::string GlobalIdent;
3476 unsigned &
ID = UnnamedGlobalIDs[GV];
3478 ID = UnnamedGlobalIDs.size();
3479 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
3494 if (isa<Function>(GV)) {
3497 Register NewReg = GR.find(ConstVal, GR.CurMF);
3500 GR.add(ConstVal, GR.CurMF, NewReg);
3502 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
3503 ? dyn_cast<Function>(GV)
3505 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
3506 PointerBaseType,
I,
TII,
3507 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
3513 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
3516 MRI->createGenericVirtualRegister(GR.getRegType(ResType));
3517 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
3519 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3524 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
3533 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3535 .
addUse(GR.getSPIRVTypeID(ResType))
3538 assert(NewReg != ResVReg);
3539 return BuildCOPY(ResVReg, NewReg,
I);
3541 auto GlobalVar = cast<GlobalVariable>(GV);
3544 bool HasInit =
GlobalVar->hasInitializer() &&
3545 !isa<UndefValue>(
GlobalVar->getInitializer());
3548 if (HasInit && !
Init)
3552 SPIRV::LinkageType::LinkageType LnkType =
3554 ? SPIRV::LinkageType::Import
3556 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
3557 ? SPIRV::LinkageType::LinkOnceODR
3558 : SPIRV::LinkageType::Export);
3567 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
3568 return Reg.isValid();
3571bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
3574 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
3575 return selectExtInst(ResVReg, ResType,
I, CL::log10);
3587 Register VarReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3589 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3591 .
addUse(GR.getSPIRVTypeID(ResType))
3592 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3594 .
add(
I.getOperand(1))
3599 ResType->
getOpcode() == SPIRV::OpTypeFloat);
3602 ResType->
getOpcode() == SPIRV::OpTypeVector
3606 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
3609 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
3610 ? SPIRV::OpVectorTimesScalar
3614 .
addUse(GR.getSPIRVTypeID(ResType))
3623bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
3624 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
3627 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
3629 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
3630 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
3631 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
3637 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.
getMF());
3641 Register Variable = GR.buildGlobalVariable(
3643 SPIRV::StorageClass::Input,
nullptr,
true,
true,
3644 SPIRV::LinkageType::Import, MIRBuilder,
false);
3648 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3650 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.
getMF());
3654 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
3656 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
3661 assert(
I.getOperand(2).isReg());
3666 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3668 .
addUse(GR.getSPIRVTypeID(ResType))
3677 if (
Type->getOpcode() != SPIRV::OpTypeVector)
3678 return GR.getOrCreateSPIRVVectorType(
Type, 4, MIRBuilder);
3681 if (VectorSize == 4)
3685 const SPIRVType *ScalarType = GR.getSPIRVTypeForVReg(ScalarTypeReg);
3686 return GR.getOrCreateSPIRVVectorType(ScalarType, 4, MIRBuilder);
3694 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
LinkageTypes getLinkage() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
bool hasAvailableExternallyLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
constexpr bool isValid() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Type * toTypedPointer(Type *Ty)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
auto count(R &&Range, const E &Element)
Wrapper function around std::count to count the number of times an element Element occurs in the give...
constexpr unsigned BitWidth
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...