34#include "llvm/IR/IntrinsicsSPIRV.h"
38#define DEBUG_TYPE "spirv-isel"
41namespace CL = SPIRV::OpenCLExtInst;
42namespace GL = SPIRV::GLSLExtInst;
45 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
49llvm::SPIRV::SelectionControl::SelectionControl
50getSelectionOperandForImm(
int Imm) {
52 return SPIRV::SelectionControl::Flatten;
54 return SPIRV::SelectionControl::DontFlatten;
56 return SPIRV::SelectionControl::None;
60#define GET_GLOBALISEL_PREDICATE_BITSET
61#include "SPIRVGenGlobalISel.inc"
62#undef GET_GLOBALISEL_PREDICATE_BITSET
89#define GET_GLOBALISEL_PREDICATES_DECL
90#include "SPIRVGenGlobalISel.inc"
91#undef GET_GLOBALISEL_PREDICATES_DECL
93#define GET_GLOBALISEL_TEMPORARIES_DECL
94#include "SPIRVGenGlobalISel.inc"
95#undef GET_GLOBALISEL_TEMPORARIES_DECL
117 unsigned BitSetOpcode)
const;
121 unsigned BitSetOpcode)
const;
125 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
129 unsigned BitSetOpcode,
130 bool SwapPrimarySide)
const;
137 unsigned Opcode)
const;
140 unsigned Opcode)
const;
157 unsigned NegateOpcode = 0)
const;
211 template <
bool Signed>
214 template <
bool Signed>
230 bool IsSigned)
const;
232 bool IsSigned,
unsigned Opcode)
const;
234 bool IsSigned)
const;
240 bool IsSigned)
const;
273 [[maybe_unused]]
bool selectExtInst(
Register ResVReg,
276 GL::GLSLExtInst GLInst)
const;
281 GL::GLSLExtInst GLInst)
const;
308 std::pair<Register, bool>
310 const SPIRVType *ResType =
nullptr)
const;
322 SPIRV::StorageClass::StorageClass SC)
const;
330 Register IndexReg,
bool IsNonUniform,
336 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
343#define GET_GLOBALISEL_IMPL
344#include "SPIRVGenGlobalISel.inc"
345#undef GET_GLOBALISEL_IMPL
351 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
353#include
"SPIRVGenGlobalISel.inc"
356#include
"SPIRVGenGlobalISel.inc"
366 GR.setCurrentFunc(MF);
367 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
372 if (HasVRegsReset == &MF)
377 for (
unsigned I = 0, E =
MRI.getNumVirtRegs();
I != E; ++
I) {
379 LLT RegType =
MRI.getType(Reg);
387 for (
const auto &
MBB : MF) {
388 for (
const auto &
MI :
MBB) {
389 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
392 LLT DstType =
MRI.getType(DstReg);
394 LLT SrcType =
MRI.getType(SrcReg);
395 if (DstType != SrcType)
396 MRI.setType(DstReg,
MRI.getType(SrcReg));
400 if (DstRC != SrcRC && SrcRC)
401 MRI.setRegClass(DstReg, SrcRC);
412 for (
const auto &MO :
MI.all_defs()) {
414 if (Reg.isPhysical() || !
MRI.use_nodbg_empty(Reg))
417 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
418 MI.isLifetimeMarker())
422 if (
MI.mayStore() ||
MI.isCall() ||
423 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
424 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
430 resetVRegsType(*
I.getParent()->getParent());
432 assert(
I.getParent() &&
"Instruction should be in a basic block!");
433 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
438 if (Opcode == SPIRV::ASSIGN_TYPE) {
439 Register DstReg =
I.getOperand(0).getReg();
440 Register SrcReg =
I.getOperand(1).getReg();
441 auto *
Def =
MRI->getVRegDef(SrcReg);
443 bool Res = selectImpl(
I, *CoverageInfo);
445 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
446 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
450 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
457 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
458 MRI->replaceRegWith(SrcReg, DstReg);
459 GR.invalidateMachineInstr(&
I);
460 I.removeFromParent();
462 }
else if (
I.getNumDefs() == 1) {
469 if (DeadMIs.contains(&
I)) {
474 GR.invalidateMachineInstr(&
I);
479 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
480 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
486 bool HasDefs =
I.getNumDefs() > 0;
488 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
489 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
490 if (spvSelect(ResVReg, ResType,
I)) {
492 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
494 GR.invalidateMachineInstr(&
I);
495 I.removeFromParent();
503 case TargetOpcode::G_CONSTANT:
505 case TargetOpcode::G_SADDO:
506 case TargetOpcode::G_SSUBO:
516 if (DstRC != SrcRC && SrcRC)
517 MRI->setRegClass(DestReg, SrcRC);
518 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
519 TII.get(TargetOpcode::COPY))
525bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
528 const unsigned Opcode =
I.getOpcode();
530 return selectImpl(
I, *CoverageInfo);
532 case TargetOpcode::G_CONSTANT:
533 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
535 case TargetOpcode::G_GLOBAL_VALUE:
536 return selectGlobalValue(ResVReg,
I);
537 case TargetOpcode::G_IMPLICIT_DEF:
538 return selectOpUndef(ResVReg, ResType,
I);
539 case TargetOpcode::G_FREEZE:
540 return selectFreeze(ResVReg, ResType,
I);
542 case TargetOpcode::G_INTRINSIC:
543 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
544 case TargetOpcode::G_INTRINSIC_CONVERGENT:
545 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
546 return selectIntrinsic(ResVReg, ResType,
I);
547 case TargetOpcode::G_BITREVERSE:
548 return selectBitreverse(ResVReg, ResType,
I);
550 case TargetOpcode::G_BUILD_VECTOR:
551 return selectBuildVector(ResVReg, ResType,
I);
552 case TargetOpcode::G_SPLAT_VECTOR:
553 return selectSplatVector(ResVReg, ResType,
I);
555 case TargetOpcode::G_SHUFFLE_VECTOR: {
557 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
559 .
addUse(GR.getSPIRVTypeID(ResType))
560 .
addUse(
I.getOperand(1).getReg())
561 .
addUse(
I.getOperand(2).getReg());
562 for (
auto V :
I.getOperand(3).getShuffleMask())
566 case TargetOpcode::G_MEMMOVE:
567 case TargetOpcode::G_MEMCPY:
568 case TargetOpcode::G_MEMSET:
569 return selectMemOperation(ResVReg,
I);
571 case TargetOpcode::G_ICMP:
572 return selectICmp(ResVReg, ResType,
I);
573 case TargetOpcode::G_FCMP:
574 return selectFCmp(ResVReg, ResType,
I);
576 case TargetOpcode::G_FRAME_INDEX:
577 return selectFrameIndex(ResVReg, ResType,
I);
579 case TargetOpcode::G_LOAD:
580 return selectLoad(ResVReg, ResType,
I);
581 case TargetOpcode::G_STORE:
582 return selectStore(
I);
584 case TargetOpcode::G_BR:
585 return selectBranch(
I);
586 case TargetOpcode::G_BRCOND:
587 return selectBranchCond(
I);
589 case TargetOpcode::G_PHI:
590 return selectPhi(ResVReg, ResType,
I);
592 case TargetOpcode::G_FPTOSI:
593 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
594 case TargetOpcode::G_FPTOUI:
595 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
597 case TargetOpcode::G_SITOFP:
598 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
599 case TargetOpcode::G_UITOFP:
600 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
602 case TargetOpcode::G_CTPOP:
603 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
604 case TargetOpcode::G_SMIN:
605 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
606 case TargetOpcode::G_UMIN:
607 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
609 case TargetOpcode::G_SMAX:
610 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
611 case TargetOpcode::G_UMAX:
612 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
614 case TargetOpcode::G_SCMP:
615 return selectSUCmp(ResVReg, ResType,
I,
true);
616 case TargetOpcode::G_UCMP:
617 return selectSUCmp(ResVReg, ResType,
I,
false);
619 case TargetOpcode::G_STRICT_FMA:
620 case TargetOpcode::G_FMA:
621 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
623 case TargetOpcode::G_STRICT_FLDEXP:
624 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
626 case TargetOpcode::G_FPOW:
627 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
628 case TargetOpcode::G_FPOWI:
629 return selectExtInst(ResVReg, ResType,
I, CL::pown);
631 case TargetOpcode::G_FEXP:
632 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
633 case TargetOpcode::G_FEXP2:
634 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
636 case TargetOpcode::G_FLOG:
637 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
638 case TargetOpcode::G_FLOG2:
639 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
640 case TargetOpcode::G_FLOG10:
641 return selectLog10(ResVReg, ResType,
I);
643 case TargetOpcode::G_FABS:
644 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
645 case TargetOpcode::G_ABS:
646 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
648 case TargetOpcode::G_FMINNUM:
649 case TargetOpcode::G_FMINIMUM:
650 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
651 case TargetOpcode::G_FMAXNUM:
652 case TargetOpcode::G_FMAXIMUM:
653 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
655 case TargetOpcode::G_FCOPYSIGN:
656 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
658 case TargetOpcode::G_FCEIL:
659 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
660 case TargetOpcode::G_FFLOOR:
661 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
663 case TargetOpcode::G_FCOS:
664 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
665 case TargetOpcode::G_FSIN:
666 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
667 case TargetOpcode::G_FTAN:
668 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
669 case TargetOpcode::G_FACOS:
670 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
671 case TargetOpcode::G_FASIN:
672 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
673 case TargetOpcode::G_FATAN:
674 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
675 case TargetOpcode::G_FATAN2:
676 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
677 case TargetOpcode::G_FCOSH:
678 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
679 case TargetOpcode::G_FSINH:
680 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
681 case TargetOpcode::G_FTANH:
682 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
684 case TargetOpcode::G_STRICT_FSQRT:
685 case TargetOpcode::G_FSQRT:
686 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
688 case TargetOpcode::G_CTTZ:
689 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
690 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
691 case TargetOpcode::G_CTLZ:
692 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
693 return selectExtInst(ResVReg, ResType,
I, CL::clz);
695 case TargetOpcode::G_INTRINSIC_ROUND:
696 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
697 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
698 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
699 case TargetOpcode::G_INTRINSIC_TRUNC:
700 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
701 case TargetOpcode::G_FRINT:
702 case TargetOpcode::G_FNEARBYINT:
703 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
705 case TargetOpcode::G_SMULH:
706 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
707 case TargetOpcode::G_UMULH:
708 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
710 case TargetOpcode::G_SADDSAT:
711 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
712 case TargetOpcode::G_UADDSAT:
713 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
714 case TargetOpcode::G_SSUBSAT:
715 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
716 case TargetOpcode::G_USUBSAT:
717 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
719 case TargetOpcode::G_UADDO:
720 return selectOverflowArith(ResVReg, ResType,
I,
721 ResType->
getOpcode() == SPIRV::OpTypeVector
722 ? SPIRV::OpIAddCarryV
723 : SPIRV::OpIAddCarryS);
724 case TargetOpcode::G_USUBO:
725 return selectOverflowArith(ResVReg, ResType,
I,
726 ResType->
getOpcode() == SPIRV::OpTypeVector
727 ? SPIRV::OpISubBorrowV
728 : SPIRV::OpISubBorrowS);
729 case TargetOpcode::G_UMULO:
730 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
731 case TargetOpcode::G_SMULO:
732 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
734 case TargetOpcode::G_SEXT:
735 return selectExt(ResVReg, ResType,
I,
true);
736 case TargetOpcode::G_ANYEXT:
737 case TargetOpcode::G_ZEXT:
738 return selectExt(ResVReg, ResType,
I,
false);
739 case TargetOpcode::G_TRUNC:
740 return selectTrunc(ResVReg, ResType,
I);
741 case TargetOpcode::G_FPTRUNC:
742 case TargetOpcode::G_FPEXT:
743 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
745 case TargetOpcode::G_PTRTOINT:
746 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
747 case TargetOpcode::G_INTTOPTR:
748 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
749 case TargetOpcode::G_BITCAST:
750 return selectBitcast(ResVReg, ResType,
I);
751 case TargetOpcode::G_ADDRSPACE_CAST:
752 return selectAddrSpaceCast(ResVReg, ResType,
I);
753 case TargetOpcode::G_PTR_ADD: {
755 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
759 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
760 (*II).getOpcode() == TargetOpcode::COPY ||
761 (*II).getOpcode() == SPIRV::OpVariable) &&
764 bool IsGVInit =
false;
766 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
767 UseEnd =
MRI->use_instr_end();
768 UseIt != UseEnd; UseIt = std::next(UseIt)) {
769 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
770 (*UseIt).getOpcode() == SPIRV::OpVariable) {
777 SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
778 SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
779 SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
780 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
783 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
784 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
791 if (!GR.isBitcastCompatible(ResType, GVType))
793 "incompatible result and operand types in a bitcast");
794 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
796 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
802 TII.get(STI.isVulkanEnv()
803 ? SPIRV::OpInBoundsAccessChain
804 : SPIRV::OpInBoundsPtrAccessChain))
808 .
addUse(
I.getOperand(2).getReg())
811 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
813 .
addUse(GR.getSPIRVTypeID(ResType))
815 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
817 .
addUse(
I.getOperand(2).getReg())
824 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
825 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
827 .
addUse(GR.getSPIRVTypeID(ResType))
829 SPIRV::Opcode::InBoundsPtrAccessChain))
832 .
addUse(
I.getOperand(2).getReg());
836 case TargetOpcode::G_ATOMICRMW_OR:
837 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
838 case TargetOpcode::G_ATOMICRMW_ADD:
839 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
840 case TargetOpcode::G_ATOMICRMW_AND:
841 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
842 case TargetOpcode::G_ATOMICRMW_MAX:
843 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
844 case TargetOpcode::G_ATOMICRMW_MIN:
845 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
846 case TargetOpcode::G_ATOMICRMW_SUB:
847 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
848 case TargetOpcode::G_ATOMICRMW_XOR:
849 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
850 case TargetOpcode::G_ATOMICRMW_UMAX:
851 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
852 case TargetOpcode::G_ATOMICRMW_UMIN:
853 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
854 case TargetOpcode::G_ATOMICRMW_XCHG:
855 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
856 case TargetOpcode::G_ATOMIC_CMPXCHG:
857 return selectAtomicCmpXchg(ResVReg, ResType,
I);
859 case TargetOpcode::G_ATOMICRMW_FADD:
860 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
861 case TargetOpcode::G_ATOMICRMW_FSUB:
863 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
865 case TargetOpcode::G_ATOMICRMW_FMIN:
866 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
867 case TargetOpcode::G_ATOMICRMW_FMAX:
868 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
870 case TargetOpcode::G_FENCE:
871 return selectFence(
I);
873 case TargetOpcode::G_STACKSAVE:
874 return selectStackSave(ResVReg, ResType,
I);
875 case TargetOpcode::G_STACKRESTORE:
876 return selectStackRestore(
I);
878 case TargetOpcode::G_UNMERGE_VALUES:
884 case TargetOpcode::G_TRAP:
885 case TargetOpcode::G_DEBUGTRAP:
886 case TargetOpcode::G_UBSANTRAP:
887 case TargetOpcode::DBG_LABEL:
895bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
898 GL::GLSLExtInst GLInst)
const {
899 return selectExtInst(ResVReg, ResType,
I,
900 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
903bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
906 CL::OpenCLExtInst CLInst)
const {
907 return selectExtInst(ResVReg, ResType,
I,
908 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
911bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
914 CL::OpenCLExtInst CLInst,
915 GL::GLSLExtInst GLInst)
const {
916 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
917 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
918 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
921bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
926 for (
const auto &Ex : Insts) {
927 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
929 if (STI.canUseExtInstSet(Set)) {
931 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
933 .
addUse(GR.getSPIRVTypeID(ResType))
936 const unsigned NumOps =
I.getNumOperands();
938 if (Index < NumOps &&
939 I.getOperand(Index).getType() ==
940 MachineOperand::MachineOperandType::MO_IntrinsicID)
943 MIB.
add(
I.getOperand(Index));
950bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
953 std::vector<Register> Srcs,
954 unsigned Opcode)
const {
955 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
957 .
addUse(GR.getSPIRVTypeID(ResType));
964bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
967 unsigned Opcode)
const {
968 if (STI.isOpenCLEnv() &&
I.getOperand(1).isReg()) {
969 Register SrcReg =
I.getOperand(1).getReg();
972 MRI->def_instr_begin(SrcReg);
973 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
974 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
982 case SPIRV::OpConvertPtrToU:
983 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
985 case SPIRV::OpConvertUToPtr:
986 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
990 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
991 TII.get(SPIRV::OpSpecConstantOp))
993 .
addUse(GR.getSPIRVTypeID(ResType))
999 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).
getReg()},
1003bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1006 Register OpReg =
I.getOperand(1).getReg();
1007 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
1008 if (!GR.isBitcastCompatible(ResType, OpType))
1010 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1016 if (
MemOp->isVolatile())
1017 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1018 if (
MemOp->isNonTemporal())
1019 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1020 if (
MemOp->getAlign().value())
1021 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1023 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1025 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1032 if (Flags & MachineMemOperand::Flags::MOVolatile)
1033 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1034 if (Flags & MachineMemOperand::Flags::MONonTemporal)
1035 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1037 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1041bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1044 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1046 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1048 .
addUse(GR.getSPIRVTypeID(ResType))
1050 if (!
I.getNumMemOperands()) {
1051 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1053 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1061bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
1062 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1063 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1066 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1069 if (!
I.getNumMemOperands()) {
1070 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1072 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1080bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1083 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1085 "llvm.stacksave intrinsic: this instruction requires the following "
1086 "SPIR-V extension: SPV_INTEL_variable_length_array",
1089 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1091 .
addUse(GR.getSPIRVTypeID(ResType))
1095bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
1096 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1098 "llvm.stackrestore intrinsic: this instruction requires the following "
1099 "SPIR-V extension: SPV_INTEL_variable_length_array",
1101 if (!
I.getOperand(0).isReg())
1104 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1105 .
addUse(
I.getOperand(0).getReg())
1109bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1112 Register SrcReg =
I.getOperand(1).getReg();
1114 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1115 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1118 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1119 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
1121 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
1122 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1132 GR.add(GV, GR.CurMF, VarReg);
1133 GR.addGlobalObject(GV, GR.CurMF, VarReg);
1136 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1138 .
addUse(GR.getSPIRVTypeID(VarTy))
1139 .
addImm(SPIRV::StorageClass::UniformConstant)
1143 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
1144 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1146 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1148 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1149 .
addUse(
I.getOperand(0).getReg())
1151 .
addUse(
I.getOperand(2).getReg());
1152 if (
I.getNumMemOperands())
1160bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1164 unsigned NegateOpcode)
const {
1169 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1170 auto ScopeConstant = buildI32Constant(Scope,
I);
1171 Register ScopeReg = ScopeConstant.first;
1172 Result &= ScopeConstant.second;
1180 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1181 Register MemSemReg = MemSemConstant.first;
1182 Result &= MemSemConstant.second;
1184 Register ValueReg =
I.getOperand(2).getReg();
1185 if (NegateOpcode != 0) {
1187 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1188 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1193 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1195 .
addUse(GR.getSPIRVTypeID(ResType))
1203bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
1204 unsigned ArgI =
I.getNumOperands() - 1;
1206 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1208 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
1209 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1211 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1217 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1218 Register ResVReg =
I.getOperand(i).getReg();
1219 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1222 ResType = ScalarType;
1223 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1224 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1225 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1228 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1230 .
addUse(GR.getSPIRVTypeID(ResType))
1232 .
addImm(
static_cast<int64_t
>(i));
1238bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
1241 auto MemSemConstant = buildI32Constant(MemSem,
I);
1242 Register MemSemReg = MemSemConstant.first;
1243 bool Result = MemSemConstant.second;
1246 getMemScope(GR.CurMF->getFunction().getContext(), Ord));
1247 auto ScopeConstant = buildI32Constant(Scope,
I);
1248 Register ScopeReg = ScopeConstant.first;
1249 Result &= ScopeConstant.second;
1252 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1258bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1261 unsigned Opcode)
const {
1262 Type *ResTy =
nullptr;
1264 if (!GR.findValueAttrs(&
I, ResTy, ResName))
1266 "Not enough info to select the arithmetic with overflow instruction");
1269 "with overflow instruction");
1272 Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
1277 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1278 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1280 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1282 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
1283 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1284 Register ZeroReg = buildZerosVal(ResType,
I);
1287 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1289 if (ResName.
size() > 0)
1294 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1297 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1298 MIB.
addUse(
I.getOperand(i).getReg());
1303 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1304 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1306 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1307 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1308 .
addUse(GR.getSPIRVTypeID(ResType))
1315 .
addDef(
I.getOperand(1).getReg())
1322bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1330 if (!isa<GIntrinsic>(
I)) {
1334 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1335 auto ScopeConstant = buildI32Constant(Scope,
I);
1336 ScopeReg = ScopeConstant.first;
1337 Result &= ScopeConstant.second;
1339 unsigned ScSem =
static_cast<uint32_t>(
1343 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1344 MemSemEqReg = MemSemEqConstant.first;
1345 Result &= MemSemEqConstant.second;
1348 if (MemSemEq == MemSemNeq)
1349 MemSemNeqReg = MemSemEqReg;
1351 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1352 MemSemNeqReg = MemSemNeqConstant.first;
1353 Result &= MemSemNeqConstant.second;
1356 ScopeReg =
I.getOperand(5).getReg();
1357 MemSemEqReg =
I.getOperand(6).getReg();
1358 MemSemNeqReg =
I.getOperand(7).getReg();
1362 Register Val =
I.getOperand(4).getReg();
1363 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1364 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1367 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1369 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1377 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1381 .
addUse(GR.getSPIRVTypeID(BoolTy))
1385 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1388 .
addUse(GR.getSPIRVTypeID(ResType))
1390 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1394 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1396 .
addUse(GR.getSPIRVTypeID(ResType))
1405 case SPIRV::StorageClass::Workgroup:
1406 case SPIRV::StorageClass::CrossWorkgroup:
1407 case SPIRV::StorageClass::Function:
1416 case SPIRV::StorageClass::DeviceOnlyINTEL:
1417 case SPIRV::StorageClass::HostOnlyINTEL:
1426 bool IsGRef =
false;
1427 bool IsAllowedRefs =
1428 std::all_of(
MRI->use_instr_begin(ResVReg),
MRI->use_instr_end(),
1429 [&IsGRef](
auto const &It) {
1430 unsigned Opcode = It.getOpcode();
1431 if (Opcode == SPIRV::OpConstantComposite ||
1432 Opcode == SPIRV::OpVariable ||
1433 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1434 return IsGRef = true;
1435 return Opcode == SPIRV::OpName;
1437 return IsAllowedRefs && IsGRef;
1440Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1441 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1442 return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
1443 GR.getOrCreateSPIRVIntegerType(8,
I,
TII),
I,
TII, SC));
1450 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1451 TII.get(SPIRV::OpSpecConstantOp))
1461 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1462 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1463 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1465 SPIRV::StorageClass::Generic),
1466 GR.getPointerSize()));
1468 GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
1470 I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
1471 static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
1481bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1487 Register SrcPtr =
I.getOperand(1).getReg();
1488 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1491 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1492 ResType->
getOpcode() != SPIRV::OpTypePointer)
1493 return BuildCOPY(ResVReg, SrcPtr,
I);
1495 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
1496 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);
1503 unsigned SpecOpcode =
1505 ?
static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
1506 : (SrcSC == SPIRV::StorageClass::Generic &&
1508 ?
static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
1515 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1516 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1517 .constrainAllUses(
TII,
TRI, RBI);
1521 buildSpecConstantOp(
1523 getUcharPtrTypeReg(
I, DstSC),
1524 static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
1525 .constrainAllUses(
TII,
TRI, RBI);
1531 return BuildCOPY(ResVReg, SrcPtr,
I);
1533 if ((SrcSC == SPIRV::StorageClass::Function &&
1534 DstSC == SPIRV::StorageClass::Private) ||
1535 (DstSC == SPIRV::StorageClass::Function &&
1536 SrcSC == SPIRV::StorageClass::Private))
1537 return BuildCOPY(ResVReg, SrcPtr,
I);
1541 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1544 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1547 Register Tmp =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1548 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1549 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1552 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1557 .
addUse(GR.getSPIRVTypeID(ResType))
1565 return selectUnOp(ResVReg, ResType,
I,
1566 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1568 return selectUnOp(ResVReg, ResType,
I,
1569 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1571 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1573 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1583 return SPIRV::OpFOrdEqual;
1585 return SPIRV::OpFOrdGreaterThanEqual;
1587 return SPIRV::OpFOrdGreaterThan;
1589 return SPIRV::OpFOrdLessThanEqual;
1591 return SPIRV::OpFOrdLessThan;
1593 return SPIRV::OpFOrdNotEqual;
1595 return SPIRV::OpOrdered;
1597 return SPIRV::OpFUnordEqual;
1599 return SPIRV::OpFUnordGreaterThanEqual;
1601 return SPIRV::OpFUnordGreaterThan;
1603 return SPIRV::OpFUnordLessThanEqual;
1605 return SPIRV::OpFUnordLessThan;
1607 return SPIRV::OpFUnordNotEqual;
1609 return SPIRV::OpUnordered;
1619 return SPIRV::OpIEqual;
1621 return SPIRV::OpINotEqual;
1623 return SPIRV::OpSGreaterThanEqual;
1625 return SPIRV::OpSGreaterThan;
1627 return SPIRV::OpSLessThanEqual;
1629 return SPIRV::OpSLessThan;
1631 return SPIRV::OpUGreaterThanEqual;
1633 return SPIRV::OpUGreaterThan;
1635 return SPIRV::OpULessThanEqual;
1637 return SPIRV::OpULessThan;
1646 return SPIRV::OpPtrEqual;
1648 return SPIRV::OpPtrNotEqual;
1659 return SPIRV::OpLogicalEqual;
1661 return SPIRV::OpLogicalNotEqual;
1695bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1698 unsigned OpAnyOrAll)
const {
1699 assert(
I.getNumOperands() == 3);
1700 assert(
I.getOperand(2).isReg());
1702 Register InputRegister =
I.getOperand(2).getReg();
1703 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1708 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1709 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1710 if (IsBoolTy && !IsVectorTy) {
1711 assert(ResVReg ==
I.getOperand(0).getReg());
1712 return BuildCOPY(ResVReg, InputRegister,
I);
1715 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1716 unsigned SpirvNotEqualId =
1717 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1718 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1723 NotEqualReg = IsBoolTy ? InputRegister
1724 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1726 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1732 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1736 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1747 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1752bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1755 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1758bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1761 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1765bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
1768 assert(
I.getNumOperands() == 4);
1769 assert(
I.getOperand(2).isReg());
1770 assert(
I.getOperand(3).isReg());
1773 GR.getSPIRVTypeForVReg(
I.getOperand(2).getReg());
1776 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1777 "dot product requires a vector of at least 2 components");
1780 GR.getSPIRVTypeForVReg(
VecType->getOperand(1).getReg());
1785 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
1787 .
addUse(GR.getSPIRVTypeID(ResType))
1788 .
addUse(
I.getOperand(2).getReg())
1789 .
addUse(
I.getOperand(3).getReg())
1793bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
1797 assert(
I.getNumOperands() == 4);
1798 assert(
I.getOperand(2).isReg());
1799 assert(
I.getOperand(3).isReg());
1802 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1805 .
addUse(GR.getSPIRVTypeID(ResType))
1806 .
addUse(
I.getOperand(2).getReg())
1807 .
addUse(
I.getOperand(3).getReg())
1813bool SPIRVInstructionSelector::selectIntegerDotExpansion(
1815 assert(
I.getNumOperands() == 4);
1816 assert(
I.getOperand(2).isReg());
1817 assert(
I.getOperand(3).isReg());
1821 Register Vec0 =
I.getOperand(2).getReg();
1822 Register Vec1 =
I.getOperand(3).getReg();
1823 Register TmpVec =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1828 .
addUse(GR.getSPIRVTypeID(VecType))
1834 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1835 "dot product requires a vector of at least 2 components");
1837 Register Res =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1840 .
addUse(GR.getSPIRVTypeID(ResType))
1845 for (
unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
1846 Register Elt =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1849 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1851 .
addUse(GR.getSPIRVTypeID(ResType))
1856 Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
1857 ?
MRI->createVirtualRegister(GR.getRegClass(ResType))
1862 .
addUse(GR.getSPIRVTypeID(ResType))
1872template <
bool Signed>
1873bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
1876 assert(
I.getNumOperands() == 5);
1877 assert(
I.getOperand(2).isReg());
1878 assert(
I.getOperand(3).isReg());
1879 assert(
I.getOperand(4).isReg());
1882 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1883 Register Dot =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1886 .
addUse(GR.getSPIRVTypeID(ResType))
1887 .
addUse(
I.getOperand(2).getReg())
1888 .
addUse(
I.getOperand(3).getReg())
1893 .
addUse(GR.getSPIRVTypeID(ResType))
1895 .
addUse(
I.getOperand(4).getReg())
1902template <
bool Signed>
1903bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
1905 assert(
I.getNumOperands() == 5);
1906 assert(
I.getOperand(2).isReg());
1907 assert(
I.getOperand(3).isReg());
1908 assert(
I.getOperand(4).isReg());
1914 Register Acc =
I.getOperand(4).getReg();
1915 SPIRVType *EltType = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1917 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
1920 for (
unsigned i = 0; i < 4; i++) {
1922 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1925 .
addUse(GR.getSPIRVTypeID(ResType))
1926 .
addUse(
I.getOperand(2).getReg())
1927 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1928 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1932 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1935 .
addUse(GR.getSPIRVTypeID(ResType))
1936 .
addUse(
I.getOperand(3).getReg())
1937 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1938 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1945 .
addUse(GR.getSPIRVTypeID(ResType))
1951 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1954 .
addUse(GR.getSPIRVTypeID(ResType))
1956 .
addUse(GR.getOrCreateConstInt(0,
I, EltType,
TII))
1957 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1962 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
1965 .
addUse(GR.getSPIRVTypeID(ResType))
1978bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
1981 assert(
I.getNumOperands() == 3);
1982 assert(
I.getOperand(2).isReg());
1984 Register VZero = buildZerosValF(ResType,
I);
1985 Register VOne = buildOnesValF(ResType,
I);
1987 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1989 .
addUse(GR.getSPIRVTypeID(ResType))
1990 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1992 .
addUse(
I.getOperand(2).getReg())
1998bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
2001 assert(
I.getNumOperands() == 3);
2002 assert(
I.getOperand(2).isReg());
2004 Register InputRegister =
I.getOperand(2).getReg();
2005 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2006 auto &
DL =
I.getDebugLoc();
2011 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2013 unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
2014 unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);
2016 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2018 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2020 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2026 .
addUse(GR.getSPIRVTypeID(InputType))
2027 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2032 if (NeedsConversion) {
2033 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2036 .
addUse(GR.getSPIRVTypeID(ResType))
2044bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2047 unsigned Opcode)
const {
2049 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2051 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2053 .
addUse(GR.getSPIRVTypeID(ResType))
2054 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I,
2057 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2058 BMI.
addUse(
I.getOperand(J).getReg());
2064bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2067 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2068 SPIRVType *BallotType = GR.getOrCreateSPIRVVectorType(IntTy, 4,
I,
TII);
2069 Register BallotReg =
MRI->createVirtualRegister(GR.getRegClass(BallotType));
2070 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2071 SPIRV::OpGroupNonUniformBallot);
2076 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2078 .
addUse(GR.getSPIRVTypeID(ResType))
2079 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII))
2080 .
addImm(SPIRV::GroupOperation::Reduce)
2087bool SPIRVInstructionSelector::selectWaveReduceSum(
Register ResVReg,
2090 assert(
I.getNumOperands() == 3);
2091 assert(
I.getOperand(2).isReg());
2093 Register InputRegister =
I.getOperand(2).getReg();
2094 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2099 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2101 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2103 IsFloatTy ? SPIRV::OpGroupNonUniformFAdd : SPIRV::OpGroupNonUniformIAdd;
2104 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2106 .
addUse(GR.getSPIRVTypeID(ResType))
2107 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII))
2108 .
addImm(SPIRV::GroupOperation::Reduce)
2109 .
addUse(
I.getOperand(2).getReg());
2112bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2116 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2118 .
addUse(GR.getSPIRVTypeID(ResType))
2119 .
addUse(
I.getOperand(1).getReg())
2123bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2131 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2133 Register OpReg =
I.getOperand(1).getReg();
2136 switch (
Def->getOpcode()) {
2137 case SPIRV::ASSIGN_TYPE:
2139 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2140 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2141 Reg =
Def->getOperand(2).getReg();
2144 case SPIRV::OpUndef:
2145 Reg =
Def->getOperand(1).getReg();
2148 unsigned DestOpCode;
2149 if (
Reg.isValid()) {
2150 DestOpCode = SPIRV::OpConstantNull;
2152 DestOpCode = TargetOpcode::COPY;
2155 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2156 .
addDef(
I.getOperand(0).getReg())
2169 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2174 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
2183 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2195 case TargetOpcode::G_CONSTANT:
2196 case TargetOpcode::G_FCONSTANT:
2198 case TargetOpcode::G_INTRINSIC:
2199 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2200 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2201 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
2202 Intrinsic::spv_const_composite;
2203 case TargetOpcode::G_BUILD_VECTOR:
2204 case TargetOpcode::G_SPLAT_VECTOR: {
2228bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2232 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2233 N = GR.getScalarOrVectorComponentCount(ResType);
2234 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2238 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2243 for (
unsigned i =
I.getNumExplicitDefs();
2244 i <
I.getNumExplicitOperands() && IsConst; ++i)
2248 if (!IsConst &&
N < 2)
2250 "There must be at least two constituent operands in a vector");
2252 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2253 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2254 TII.get(IsConst ? SPIRV::OpConstantComposite
2255 : SPIRV::OpCompositeConstruct))
2257 .
addUse(GR.getSPIRVTypeID(ResType));
2258 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2259 MIB.
addUse(
I.getOperand(i).getReg());
2263bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2267 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2268 N = GR.getScalarOrVectorComponentCount(ResType);
2269 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2274 unsigned OpIdx =
I.getNumExplicitDefs();
2275 if (!
I.getOperand(OpIdx).isReg())
2279 Register OpReg =
I.getOperand(OpIdx).getReg();
2282 if (!IsConst &&
N < 2)
2284 "There must be at least two constituent operands in a vector");
2286 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2287 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2288 TII.get(IsConst ? SPIRV::OpConstantComposite
2289 : SPIRV::OpCompositeConstruct))
2291 .
addUse(GR.getSPIRVTypeID(ResType));
2292 for (
unsigned i = 0; i <
N; ++i)
2297bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2303 if (STI.canUseExtension(
2304 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2306 Opcode = SPIRV::OpDemoteToHelperInvocation;
2308 Opcode = SPIRV::OpKill;
2311 GR.invalidateMachineInstr(NextI);
2312 NextI->removeFromParent();
2317 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2321bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2325 Register Cmp0 =
I.getOperand(2).getReg();
2326 Register Cmp1 =
I.getOperand(3).getReg();
2327 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
2328 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
2329 "CMP operands should have the same type");
2330 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2332 .
addUse(GR.getSPIRVTypeID(ResType))
2338bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2341 auto Pred =
I.getOperand(1).getPredicate();
2344 Register CmpOperand =
I.getOperand(2).getReg();
2345 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
2347 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
2351 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2357 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
2358 "Expected G_FCONSTANT");
2359 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
2366 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2367 "Expected G_CONSTANT");
2368 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
2371std::pair<Register, bool>
2376 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2378 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2379 Register NewReg = GR.find(ConstInt, GR.CurMF);
2383 GR.add(ConstInt, GR.CurMF, NewReg);
2387 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2389 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
2391 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2393 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
2401bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2405 return selectCmp(ResVReg, ResType, CmpOp,
I);
2411 bool ZeroAsNull = STI.isOpenCLEnv();
2412 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2413 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
2414 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
2420 bool ZeroAsNull = STI.isOpenCLEnv();
2422 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2423 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
2424 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
2430 bool ZeroAsNull = STI.isOpenCLEnv();
2432 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2433 return GR.getOrCreateConstVector(VOne,
I, ResType,
TII, ZeroAsNull);
2434 return GR.getOrCreateConstFP(VOne,
I, ResType,
TII, ZeroAsNull);
2440 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2443 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2448bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2451 bool IsSigned)
const {
2453 Register ZeroReg = buildZerosVal(ResType,
I);
2454 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2456 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2458 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2459 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2461 .
addUse(GR.getSPIRVTypeID(ResType))
2462 .
addUse(
I.getOperand(1).getReg())
2468bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2471 unsigned Opcode)
const {
2472 Register SrcReg =
I.getOperand(1).getReg();
2475 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
2476 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2478 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2480 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
2482 SrcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2483 selectSelect(SrcReg, TmpType,
I,
false);
2485 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2488bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2491 Register SrcReg =
I.getOperand(1).getReg();
2492 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
2493 return selectSelect(ResVReg, ResType,
I, IsSigned);
2495 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
2496 if (SrcType == ResType)
2497 return BuildCOPY(ResVReg, SrcReg,
I);
2499 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2500 return selectUnOp(ResVReg, ResType,
I, Opcode);
2503bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2506 bool IsSigned)
const {
2512 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
2514 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
2515 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
2519 Register IsLessEqReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2521 GR.assignSPIRVTypeToVReg(ResType, IsLessEqReg, MIRBuilder.getMF());
2523 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2524 : SPIRV::OpULessThanEqual))
2527 .
addUse(
I.getOperand(1).getReg())
2528 .
addUse(
I.getOperand(2).getReg())
2530 Register IsLessReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2532 GR.assignSPIRVTypeToVReg(ResType, IsLessReg, MIRBuilder.getMF());
2534 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2537 .
addUse(
I.getOperand(1).getReg())
2538 .
addUse(
I.getOperand(2).getReg())
2541 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
2543 MRI->createVirtualRegister(GR.getRegClass(ResType));
2545 GR.assignSPIRVTypeToVReg(ResType, NegOneOrZeroReg, MIRBuilder.getMF());
2546 unsigned SelectOpcode =
2547 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2552 .
addUse(buildOnesVal(
true, ResType,
I))
2553 .
addUse(buildZerosVal(ResType,
I))
2560 .
addUse(buildOnesVal(
false, ResType,
I))
2564bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2570 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2571 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2572 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2574 Register One = buildOnesVal(
false, IntTy,
I);
2578 .
addUse(GR.getSPIRVTypeID(IntTy))
2584 .
addUse(GR.getSPIRVTypeID(BoolTy))
2590bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2593 Register IntReg =
I.getOperand(1).getReg();
2594 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
2595 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
2596 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2597 if (ArgType == ResType)
2598 return BuildCOPY(ResVReg, IntReg,
I);
2599 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
2600 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2601 return selectUnOp(ResVReg, ResType,
I, Opcode);
2604bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2608 unsigned TyOpcode = ResType->
getOpcode();
2609 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
2611 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
2613 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2615 .
addUse(GR.getSPIRVTypeID(ResType))
2617 if (TyOpcode == SPIRV::OpTypeInt) {
2618 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
2620 return Reg == ResVReg ?
true : BuildCOPY(ResVReg, Reg,
I);
2622 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2624 .
addUse(GR.getSPIRVTypeID(ResType));
2631bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2634 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2636 .
addUse(GR.getSPIRVTypeID(ResType))
2643 if (TypeInst->
getOpcode() == SPIRV::ASSIGN_TYPE) {
2646 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
2648 return TypeInst->
getOpcode() == SPIRV::OpConstantI;
2653 if (TypeInst->
getOpcode() == SPIRV::OpConstantI)
2660bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2664 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2666 .
addUse(GR.getSPIRVTypeID(ResType))
2668 .
addUse(
I.getOperand(3).getReg())
2670 .
addUse(
I.getOperand(2).getReg());
2671 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2676bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2680 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2682 .
addUse(GR.getSPIRVTypeID(ResType))
2683 .
addUse(
I.getOperand(2).getReg());
2684 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2689bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2693 return selectInsertVal(ResVReg, ResType,
I);
2695 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
2697 .
addUse(GR.getSPIRVTypeID(ResType))
2698 .
addUse(
I.getOperand(2).getReg())
2699 .
addUse(
I.getOperand(3).getReg())
2700 .
addUse(
I.getOperand(4).getReg())
2704bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
2708 return selectExtractVal(ResVReg, ResType,
I);
2710 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
2712 .
addUse(GR.getSPIRVTypeID(ResType))
2713 .
addUse(
I.getOperand(2).getReg())
2714 .
addUse(
I.getOperand(3).getReg())
2718bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
2721 const bool IsGEPInBounds =
I.getOperand(2).getImm();
2726 const unsigned Opcode = STI.isVulkanEnv()
2727 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2728 : SPIRV::OpAccessChain)
2729 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2730 : SPIRV::OpPtrAccessChain);
2732 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2734 .
addUse(GR.getSPIRVTypeID(ResType))
2736 .
addUse(
I.getOperand(3).getReg());
2738 const unsigned StartingIndex =
2739 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2742 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
2743 Res.addUse(
I.getOperand(i).getReg());
2744 return Res.constrainAllUses(
TII,
TRI, RBI);
2748bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2751 unsigned Lim =
I.getNumExplicitOperands();
2752 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2753 Register OpReg =
I.getOperand(i).getReg();
2755 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2757 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
2758 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2759 GR.isAggregateType(OpType)) {
2766 Register WrapReg = GR.find(OpDefine, MF);
2772 WrapReg =
MRI->createVirtualRegister(GR.getRegClass(OpType));
2773 GR.add(OpDefine, MF, WrapReg);
2777 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2781 .
addUse(GR.getSPIRVTypeID(OpType))
2791bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
2797 case Intrinsic::spv_load:
2798 return selectLoad(ResVReg, ResType,
I);
2799 case Intrinsic::spv_store:
2800 return selectStore(
I);
2801 case Intrinsic::spv_extractv:
2802 return selectExtractVal(ResVReg, ResType,
I);
2803 case Intrinsic::spv_insertv:
2804 return selectInsertVal(ResVReg, ResType,
I);
2805 case Intrinsic::spv_extractelt:
2806 return selectExtractElt(ResVReg, ResType,
I);
2807 case Intrinsic::spv_insertelt:
2808 return selectInsertElt(ResVReg, ResType,
I);
2809 case Intrinsic::spv_gep:
2810 return selectGEP(ResVReg, ResType,
I);
2811 case Intrinsic::spv_unref_global:
2812 case Intrinsic::spv_init_global: {
2815 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
2818 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
2820 case Intrinsic::spv_undef: {
2821 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2823 .
addUse(GR.getSPIRVTypeID(ResType));
2826 case Intrinsic::spv_const_composite: {
2828 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
2830 unsigned Opcode = SPIRV::OpConstantNull;
2833 Opcode = SPIRV::OpConstantComposite;
2834 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
2837 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2838 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2840 .
addUse(GR.getSPIRVTypeID(ResType));
2843 for (
Register OpReg : CompositeArgs)
2848 case Intrinsic::spv_assign_name: {
2849 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
2850 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
2851 for (
unsigned i =
I.getNumExplicitDefs() + 2;
2852 i <
I.getNumExplicitOperands(); ++i) {
2853 MIB.
addImm(
I.getOperand(i).getImm());
2857 case Intrinsic::spv_switch: {
2858 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
2859 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2860 if (
I.getOperand(i).isReg())
2861 MIB.
addReg(
I.getOperand(i).getReg());
2862 else if (
I.getOperand(i).isCImm())
2863 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
2864 else if (
I.getOperand(i).isMBB())
2865 MIB.
addMBB(
I.getOperand(i).getMBB());
2871 case Intrinsic::spv_loop_merge: {
2872 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
2873 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2874 assert(
I.getOperand(i).isMBB());
2875 MIB.
addMBB(
I.getOperand(i).getMBB());
2877 MIB.
addImm(SPIRV::SelectionControl::None);
2880 case Intrinsic::spv_selection_merge: {
2882 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
2883 assert(
I.getOperand(1).isMBB() &&
2884 "operand 1 to spv_selection_merge must be a basic block");
2885 MIB.
addMBB(
I.getOperand(1).getMBB());
2886 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
2889 case Intrinsic::spv_cmpxchg:
2890 return selectAtomicCmpXchg(ResVReg, ResType,
I);
2891 case Intrinsic::spv_unreachable:
2892 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
2894 case Intrinsic::spv_alloca:
2895 return selectFrameIndex(ResVReg, ResType,
I);
2896 case Intrinsic::spv_alloca_array:
2897 return selectAllocaArray(ResVReg, ResType,
I);
2898 case Intrinsic::spv_assume:
2899 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2900 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
2901 .
addUse(
I.getOperand(1).getReg())
2904 case Intrinsic::spv_expect:
2905 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2906 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
2908 .
addUse(GR.getSPIRVTypeID(ResType))
2909 .
addUse(
I.getOperand(2).getReg())
2910 .
addUse(
I.getOperand(3).getReg())
2913 case Intrinsic::arithmetic_fence:
2914 if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
2916 TII.get(SPIRV::OpArithmeticFenceEXT))
2918 .
addUse(GR.getSPIRVTypeID(ResType))
2919 .
addUse(
I.getOperand(2).getReg())
2922 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
2924 case Intrinsic::spv_thread_id:
2930 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
2932 case Intrinsic::spv_thread_id_in_group:
2938 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
2940 case Intrinsic::spv_group_id:
2946 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
2948 case Intrinsic::spv_fdot:
2949 return selectFloatDot(ResVReg, ResType,
I);
2950 case Intrinsic::spv_udot:
2951 case Intrinsic::spv_sdot:
2952 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2954 return selectIntegerDot(ResVReg, ResType,
I,
2955 IID == Intrinsic::spv_sdot);
2956 return selectIntegerDotExpansion(ResVReg, ResType,
I);
2957 case Intrinsic::spv_dot4add_i8packed:
2958 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2960 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
2961 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
2962 case Intrinsic::spv_dot4add_u8packed:
2963 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2965 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
2966 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
2967 case Intrinsic::spv_all:
2968 return selectAll(ResVReg, ResType,
I);
2969 case Intrinsic::spv_any:
2970 return selectAny(ResVReg, ResType,
I);
2971 case Intrinsic::spv_cross:
2972 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
2973 case Intrinsic::spv_distance:
2974 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
2975 case Intrinsic::spv_lerp:
2976 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
2977 case Intrinsic::spv_length:
2978 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
2979 case Intrinsic::spv_degrees:
2980 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
2981 case Intrinsic::spv_frac:
2982 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
2983 case Intrinsic::spv_normalize:
2984 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
2985 case Intrinsic::spv_rsqrt:
2986 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
2987 case Intrinsic::spv_sign:
2988 return selectSign(ResVReg, ResType,
I);
2989 case Intrinsic::spv_firstbituhigh:
2990 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
2991 case Intrinsic::spv_firstbitshigh:
2992 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
2993 case Intrinsic::spv_firstbitlow:
2994 return selectFirstBitLow(ResVReg, ResType,
I);
2995 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
2997 auto MemSemConstant =
2998 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
2999 Register MemSemReg = MemSemConstant.first;
3000 Result &= MemSemConstant.second;
3001 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
3002 Register ScopeReg = ScopeConstant.first;
3003 Result &= ScopeConstant.second;
3006 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
3012 case Intrinsic::spv_lifetime_start:
3013 case Intrinsic::spv_lifetime_end: {
3014 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
3015 : SPIRV::OpLifetimeStop;
3016 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
3017 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
3025 case Intrinsic::spv_saturate:
3026 return selectSaturate(ResVReg, ResType,
I);
3027 case Intrinsic::spv_nclamp:
3028 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3029 case Intrinsic::spv_uclamp:
3030 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3031 case Intrinsic::spv_sclamp:
3032 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3033 case Intrinsic::spv_wave_active_countbits:
3034 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3035 case Intrinsic::spv_wave_all:
3036 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3037 case Intrinsic::spv_wave_any:
3038 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3039 case Intrinsic::spv_wave_is_first_lane:
3040 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3041 case Intrinsic::spv_wave_reduce_sum:
3042 return selectWaveReduceSum(ResVReg, ResType,
I);
3043 case Intrinsic::spv_wave_readlane:
3044 return selectWaveOpInst(ResVReg, ResType,
I,
3045 SPIRV::OpGroupNonUniformShuffle);
3046 case Intrinsic::spv_step:
3047 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3048 case Intrinsic::spv_radians:
3049 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3053 case Intrinsic::instrprof_increment:
3054 case Intrinsic::instrprof_increment_step:
3055 case Intrinsic::instrprof_value_profile:
3058 case Intrinsic::spv_value_md:
3060 case Intrinsic::spv_resource_handlefrombinding: {
3061 return selectHandleFromBinding(ResVReg, ResType,
I);
3063 case Intrinsic::spv_resource_store_typedbuffer: {
3064 return selectImageWriteIntrinsic(
I);
3066 case Intrinsic::spv_resource_load_typedbuffer: {
3067 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3069 case Intrinsic::spv_discard: {
3070 return selectDiscard(ResVReg, ResType,
I);
3073 std::string DiagMsg;
3076 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3083bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3090 Register IndexReg =
I.getOperand(5).getReg();
3091 bool IsNonUniform = ArraySize > 1 &&
foldImm(
I.getOperand(6),
MRI);
3094 Register VarReg = buildPointerToResource(ResType, Set, Binding, ArraySize,
3095 IndexReg, IsNonUniform, MIRBuilder);
3102 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
3104 .
addUse(GR.getSPIRVTypeID(ResType))
3109bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3118 Register ImageReg =
I.getOperand(2).getReg();
3119 assert(
MRI->getVRegDef(ImageReg)->getParent() ==
I.getParent() &&
3120 "The image must be loaded in the same basic block as its use.");
3122 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3123 if (ResultSize == 4) {
3124 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3125 TII.get(SPIRV::OpImageRead))
3127 .
addUse(GR.getSPIRVTypeID(ResType))
3129 .
addUse(
I.getOperand(3).getReg())
3133 SPIRVType *ReadType = widenTypeToVec4(ResType,
I);
3134 Register ReadReg =
MRI->createVirtualRegister(GR.getRegClass(ReadType));
3136 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpImageRead))
3138 .
addUse(GR.getSPIRVTypeID(ReadType))
3140 .
addUse(
I.getOperand(3).getReg())
3145 if (ResultSize == 1) {
3146 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3147 TII.get(SPIRV::OpCompositeExtract))
3149 .
addUse(GR.getSPIRVTypeID(ResType))
3154 return extractSubvector(ResVReg, ResType, ReadReg,
I);
3157bool SPIRVInstructionSelector::extractSubvector(
3160 SPIRVType *InputType = GR.getResultType(ReadReg);
3161 [[maybe_unused]]
uint64_t InputSize =
3162 GR.getScalarOrVectorComponentCount(InputType);
3163 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3164 assert(InputSize > 1 &&
"The input must be a vector.");
3165 assert(ResultSize > 1 &&
"The result must be a vector.");
3166 assert(ResultSize < InputSize &&
3167 "Cannot extract more element than there are in the input.");
3169 SPIRVType *ScalarType = GR.getScalarOrVectorComponentType(ResType);
3172 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3175 TII.get(SPIRV::OpCompositeExtract))
3188 TII.get(SPIRV::OpCompositeConstruct))
3190 .
addUse(GR.getSPIRVTypeID(ResType));
3192 for (
Register ComponentReg : ComponentRegisters)
3193 MIB.
addUse(ComponentReg);
3197bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3205 Register ImageReg =
I.getOperand(1).getReg();
3206 assert(
MRI->getVRegDef(ImageReg)->getParent() ==
I.getParent() &&
3207 "The image must be loaded in the same basic block as its use.");
3208 Register CoordinateReg =
I.getOperand(2).getReg();
3209 Register DataReg =
I.getOperand(3).getReg();
3210 assert(GR.getResultType(DataReg)->getOpcode() == SPIRV::OpTypeVector);
3211 assert(GR.getScalarOrVectorComponentCount(GR.getResultType(DataReg)) == 4);
3212 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3213 TII.get(SPIRV::OpImageWrite))
3220Register SPIRVInstructionSelector::buildPointerToResource(
3225 return GR.getOrCreateGlobalVariableWithBinding(ResType, Set, Binding,
3228 const SPIRVType *VarType = GR.getOrCreateSPIRVArrayType(
3230 Register VarReg = GR.getOrCreateGlobalVariableWithBinding(
3231 VarType, Set, Binding, MIRBuilder);
3233 SPIRVType *ResPointerType = GR.getOrCreateSPIRVPointerType(
3234 ResType, MIRBuilder, SPIRV::StorageClass::UniformConstant);
3236 Register AcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3240 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3241 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3246 .
addUse(GR.getSPIRVTypeID(ResPointerType))
3253bool SPIRVInstructionSelector::selectFirstBitSet16(
3255 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3256 Register ExtReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3257 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).
getReg()},
3261 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3264bool SPIRVInstructionSelector::selectFirstBitSet32(
3266 Register SrcReg,
unsigned BitSetOpcode)
const {
3267 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3269 .
addUse(GR.getSPIRVTypeID(ResType))
3270 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3276bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3278 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3284 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3285 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3289 SPIRVType *I64Type = GR.getOrCreateSPIRVIntegerType(64, MIRBuilder);
3290 SPIRVType *I64x2Type = GR.getOrCreateSPIRVVectorType(I64Type, 2, MIRBuilder);
3292 GR.getOrCreateSPIRVVectorType(
BaseType, 2, MIRBuilder);
3294 std::vector<Register> PartialRegs;
3297 unsigned CurrentComponent = 0;
3298 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3302 MRI->createVirtualRegister(GR.getRegClass(I64x2Type));
3304 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3305 TII.get(SPIRV::OpVectorShuffle))
3307 .
addUse(GR.getSPIRVTypeID(I64x2Type))
3310 .
addImm(CurrentComponent)
3311 .
addImm(CurrentComponent + 1);
3317 MRI->createVirtualRegister(GR.getRegClass(Vec2ResType));
3319 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3320 BitSetOpcode, SwapPrimarySide))
3323 PartialRegs.push_back(SubVecBitSetReg);
3327 if (CurrentComponent != ComponentCount) {
3328 bool ZeroAsNull = STI.isOpenCLEnv();
3329 Register FinalElemReg =
MRI->createVirtualRegister(GR.getRegClass(I64Type));
3330 Register ConstIntLastIdx = GR.getOrCreateConstInt(
3333 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
3334 SPIRV::OpVectorExtractDynamic))
3338 MRI->createVirtualRegister(GR.getRegClass(
BaseType));
3340 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
3341 BitSetOpcode, SwapPrimarySide))
3344 PartialRegs.push_back(FinalElemBitSetReg);
3349 return selectOpWithSrcs(ResVReg, ResType,
I, PartialRegs,
3350 SPIRV::OpCompositeConstruct);
3353bool SPIRVInstructionSelector::selectFirstBitSet64(
3355 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3356 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3358 bool ZeroAsNull = STI.isOpenCLEnv();
3360 GR.getOrCreateConstInt(0,
I,
BaseType,
TII, ZeroAsNull);
3362 GR.getOrCreateConstInt(1,
I,
BaseType,
TII, ZeroAsNull);
3368 if (ComponentCount > 2) {
3369 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
3370 BitSetOpcode, SwapPrimarySide);
3376 GR.getOrCreateSPIRVVectorType(
BaseType, 2 * ComponentCount, MIRBuilder);
3378 MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3380 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
3385 Register FBSReg =
MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3386 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
3390 Register HighReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3391 Register LowReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3393 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
3396 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
3397 SPIRV::OpVectorExtractDynamic))
3399 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
3400 SPIRV::OpVectorExtractDynamic))
3404 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3405 TII.get(SPIRV::OpVectorShuffle))
3407 .
addUse(GR.getSPIRVTypeID(ResType))
3413 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
3420 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3421 TII.get(SPIRV::OpVectorShuffle))
3423 .
addUse(GR.getSPIRVTypeID(ResType))
3429 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
3447 GR.getOrCreateConstInt((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3448 Reg0 = GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
3449 Reg32 = GR.getOrCreateConstInt(32,
I, ResType,
TII, ZeroAsNull);
3450 SelectOp = SPIRV::OpSelectSISCond;
3451 AddOp = SPIRV::OpIAddS;
3454 GR.getOrCreateSPIRVVectorType(BoolType, ComponentCount, MIRBuilder);
3456 GR.getOrCreateConstVector((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3457 Reg0 = GR.getOrCreateConstVector(0,
I, ResType,
TII, ZeroAsNull);
3458 Reg32 = GR.getOrCreateConstVector(32,
I, ResType,
TII, ZeroAsNull);
3459 SelectOp = SPIRV::OpSelectVIVCond;
3460 AddOp = SPIRV::OpIAddV;
3470 if (SwapPrimarySide) {
3471 PrimaryReg = LowReg;
3472 SecondaryReg = HighReg;
3473 PrimaryShiftReg = Reg0;
3474 SecondaryShiftReg = Reg32;
3478 Register BReg =
MRI->createVirtualRegister(GR.getRegClass(BoolType));
3479 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
3484 Register TmpReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3485 if (!selectOpWithSrcs(TmpReg, ResType,
I, {BReg, SecondaryReg, PrimaryReg},
3490 Register ValReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3491 if (!selectOpWithSrcs(ValReg, ResType,
I,
3492 {BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
3495 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
3498bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
3501 bool IsSigned)
const {
3503 Register OpReg =
I.getOperand(2).getReg();
3504 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3506 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3507 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
3509 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3511 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3513 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3515 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3519 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
3523bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
3527 Register OpReg =
I.getOperand(2).getReg();
3528 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3532 unsigned ExtendOpcode = SPIRV::OpUConvert;
3533 unsigned BitSetOpcode = GL::FindILsb;
3535 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3537 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3539 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3541 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3548bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
3554 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
3555 TII.get(SPIRV::OpVariableLengthArrayINTEL))
3557 .
addUse(GR.getSPIRVTypeID(ResType))
3558 .
addUse(
I.getOperand(2).getReg())
3560 if (!STI.isVulkanEnv()) {
3561 unsigned Alignment =
I.getOperand(3).getImm();
3567bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
3573 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
3574 TII.get(SPIRV::OpVariable))
3576 .
addUse(GR.getSPIRVTypeID(ResType))
3579 if (!STI.isVulkanEnv()) {
3580 unsigned Alignment =
I.getOperand(2).getImm();
3587bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
3594 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
3595 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3598 .
addMBB(
I.getOperand(0).getMBB())
3602 .
addMBB(
I.getOperand(0).getMBB())
3606bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
3619 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
3626 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3627 .
addUse(
I.getOperand(0).getReg())
3628 .
addMBB(
I.getOperand(1).getMBB())
3633bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
3636 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
3638 .
addUse(GR.getSPIRVTypeID(ResType));
3639 const unsigned NumOps =
I.getNumOperands();
3640 for (
unsigned i = 1; i < NumOps; i += 2) {
3641 MIB.
addUse(
I.getOperand(i + 0).getReg());
3642 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
3650bool SPIRVInstructionSelector::selectGlobalValue(
3660 SPIRV::AccessQualifier::ReadWrite,
false);
3661 PointerBaseType = GR.getOrCreateSPIRVArrayType(
3664 PointerBaseType = GR.getOrCreateSPIRVType(
3665 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
3668 std::string GlobalIdent;
3670 unsigned &
ID = UnnamedGlobalIDs[GV];
3672 ID = UnnamedGlobalIDs.size();
3673 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
3688 if (isa<Function>(GV)) {
3691 Register NewReg = GR.find(ConstVal, GR.CurMF);
3694 GR.add(ConstVal, GR.CurMF, NewReg);
3696 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
3697 ? dyn_cast<Function>(GV)
3699 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
3700 PointerBaseType,
I,
TII,
3701 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
3707 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
3710 MRI->createGenericVirtualRegister(GR.getRegType(ResType));
3711 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
3713 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3718 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
3727 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3729 .
addUse(GR.getSPIRVTypeID(ResType))
3732 assert(NewReg != ResVReg);
3733 return BuildCOPY(ResVReg, NewReg,
I);
3735 auto GlobalVar = cast<GlobalVariable>(GV);
3744 SPIRV::LinkageType::LinkageType LnkType =
3746 ? SPIRV::LinkageType::Import
3748 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
3749 ? SPIRV::LinkageType::LinkOnceODR
3750 : SPIRV::LinkageType::Export);
3759 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
3760 return Reg.isValid();
3763bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
3766 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
3767 return selectExtInst(ResVReg, ResType,
I, CL::log10);
3779 Register VarReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3781 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3783 .
addUse(GR.getSPIRVTypeID(ResType))
3784 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3786 .
add(
I.getOperand(1))
3791 ResType->
getOpcode() == SPIRV::OpTypeFloat);
3794 ResType->
getOpcode() == SPIRV::OpTypeVector
3798 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
3801 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
3802 ? SPIRV::OpVectorTimesScalar
3806 .
addUse(GR.getSPIRVTypeID(ResType))
3815bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
3816 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
3819 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
3821 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
3822 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
3823 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
3829 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.
getMF());
3833 Register Variable = GR.buildGlobalVariable(
3835 SPIRV::StorageClass::Input,
nullptr,
true,
true,
3836 SPIRV::LinkageType::Import, MIRBuilder,
false);
3840 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3842 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.
getMF());
3846 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
3848 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
3853 assert(
I.getOperand(2).isReg());
3858 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3860 .
addUse(GR.getSPIRVTypeID(ResType))
3869 if (
Type->getOpcode() != SPIRV::OpTypeVector)
3870 return GR.getOrCreateSPIRVVectorType(
Type, 4, MIRBuilder);
3873 if (VectorSize == 4)
3877 const SPIRVType *ScalarType = GR.getSPIRVTypeForVReg(ScalarTypeReg);
3878 return GR.getOrCreateSPIRVVectorType(ScalarType, 4, MIRBuilder);
3886 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasPrivateLinkage() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
bool hasInternalLinkage() const
bool hasLinkOnceODRLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
constexpr bool isValid() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
StringRef getName() const
Return a constant reference to the value's name.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Type * toTypedPointer(Type *Ty)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
bool hasInitializer(const GlobalVariable *GV)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...