34#include "llvm/IR/IntrinsicsSPIRV.h"
38#define DEBUG_TYPE "spirv-isel"
41namespace CL = SPIRV::OpenCLExtInst;
42namespace GL = SPIRV::GLSLExtInst;
45 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
49llvm::SPIRV::SelectionControl::SelectionControl
50getSelectionOperandForImm(
int Imm) {
52 return SPIRV::SelectionControl::Flatten;
54 return SPIRV::SelectionControl::DontFlatten;
56 return SPIRV::SelectionControl::None;
60#define GET_GLOBALISEL_PREDICATE_BITSET
61#include "SPIRVGenGlobalISel.inc"
62#undef GET_GLOBALISEL_PREDICATE_BITSET
89#define GET_GLOBALISEL_PREDICATES_DECL
90#include "SPIRVGenGlobalISel.inc"
91#undef GET_GLOBALISEL_PREDICATES_DECL
93#define GET_GLOBALISEL_TEMPORARIES_DECL
94#include "SPIRVGenGlobalISel.inc"
95#undef GET_GLOBALISEL_TEMPORARIES_DECL
117 unsigned BitSetOpcode)
const;
121 unsigned BitSetOpcode)
const;
125 unsigned BitSetOpcode,
bool SwapPrimarySide)
const;
129 unsigned BitSetOpcode,
130 bool SwapPrimarySide)
const;
137 unsigned Opcode)
const;
140 unsigned Opcode)
const;
157 unsigned NegateOpcode = 0)
const;
211 template <
bool Signed>
214 template <
bool Signed>
227 bool IsSigned)
const;
229 bool IsSigned,
unsigned Opcode)
const;
231 bool IsSigned)
const;
237 bool IsSigned)
const;
270 [[maybe_unused]]
bool selectExtInst(
Register ResVReg,
273 GL::GLSLExtInst GLInst)
const;
278 GL::GLSLExtInst GLInst)
const;
305 std::pair<Register, bool>
307 const SPIRVType *ResType =
nullptr)
const;
319 SPIRV::StorageClass::StorageClass SC)
const;
327 Register IndexReg,
bool IsNonUniform,
333 bool loadVec3BuiltinInputID(SPIRV::BuiltIn::BuiltIn BuiltInValue,
340#define GET_GLOBALISEL_IMPL
341#include "SPIRVGenGlobalISel.inc"
342#undef GET_GLOBALISEL_IMPL
348 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
350#include
"SPIRVGenGlobalISel.inc"
353#include
"SPIRVGenGlobalISel.inc"
363 GR.setCurrentFunc(MF);
364 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
369 if (HasVRegsReset == &MF)
374 for (
unsigned I = 0, E =
MRI.getNumVirtRegs();
I != E; ++
I) {
376 LLT RegType =
MRI.getType(Reg);
384 for (
const auto &
MBB : MF) {
385 for (
const auto &
MI :
MBB) {
386 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
389 LLT DstType =
MRI.getType(DstReg);
391 LLT SrcType =
MRI.getType(SrcReg);
392 if (DstType != SrcType)
393 MRI.setType(DstReg,
MRI.getType(SrcReg));
397 if (DstRC != SrcRC && SrcRC)
398 MRI.setRegClass(DstReg, SrcRC);
409 for (
const auto &MO :
MI.all_defs()) {
411 if (Reg.isPhysical() || !
MRI.use_nodbg_empty(Reg))
414 if (
MI.getOpcode() == TargetOpcode::LOCAL_ESCAPE ||
MI.isFakeUse() ||
415 MI.isLifetimeMarker())
419 if (
MI.mayStore() ||
MI.isCall() ||
420 (
MI.mayLoad() &&
MI.hasOrderedMemoryRef()) ||
MI.isPosition() ||
421 MI.isDebugInstr() ||
MI.isTerminator() ||
MI.isJumpTableDebugInfo())
427 resetVRegsType(*
I.getParent()->getParent());
429 assert(
I.getParent() &&
"Instruction should be in a basic block!");
430 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
435 if (Opcode == SPIRV::ASSIGN_TYPE) {
436 Register DstReg =
I.getOperand(0).getReg();
437 Register SrcReg =
I.getOperand(1).getReg();
438 auto *
Def =
MRI->getVRegDef(SrcReg);
440 bool Res = selectImpl(
I, *CoverageInfo);
442 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
443 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
447 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
454 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
455 MRI->replaceRegWith(SrcReg, DstReg);
456 GR.invalidateMachineInstr(&
I);
457 I.removeFromParent();
459 }
else if (
I.getNumDefs() == 1) {
466 if (DeadMIs.contains(&
I)) {
471 GR.invalidateMachineInstr(&
I);
476 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
477 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
483 bool HasDefs =
I.getNumDefs() > 0;
485 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
486 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
487 if (spvSelect(ResVReg, ResType,
I)) {
489 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
491 GR.invalidateMachineInstr(&
I);
492 I.removeFromParent();
500 case TargetOpcode::G_CONSTANT:
502 case TargetOpcode::G_SADDO:
503 case TargetOpcode::G_SSUBO:
513 if (DstRC != SrcRC && SrcRC)
514 MRI->setRegClass(DestReg, SrcRC);
515 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
516 TII.get(TargetOpcode::COPY))
522bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
525 const unsigned Opcode =
I.getOpcode();
527 return selectImpl(
I, *CoverageInfo);
529 case TargetOpcode::G_CONSTANT:
530 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
532 case TargetOpcode::G_GLOBAL_VALUE:
533 return selectGlobalValue(ResVReg,
I);
534 case TargetOpcode::G_IMPLICIT_DEF:
535 return selectOpUndef(ResVReg, ResType,
I);
536 case TargetOpcode::G_FREEZE:
537 return selectFreeze(ResVReg, ResType,
I);
539 case TargetOpcode::G_INTRINSIC:
540 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
541 case TargetOpcode::G_INTRINSIC_CONVERGENT:
542 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
543 return selectIntrinsic(ResVReg, ResType,
I);
544 case TargetOpcode::G_BITREVERSE:
545 return selectBitreverse(ResVReg, ResType,
I);
547 case TargetOpcode::G_BUILD_VECTOR:
548 return selectBuildVector(ResVReg, ResType,
I);
549 case TargetOpcode::G_SPLAT_VECTOR:
550 return selectSplatVector(ResVReg, ResType,
I);
552 case TargetOpcode::G_SHUFFLE_VECTOR: {
554 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
556 .
addUse(GR.getSPIRVTypeID(ResType))
557 .
addUse(
I.getOperand(1).getReg())
558 .
addUse(
I.getOperand(2).getReg());
559 for (
auto V :
I.getOperand(3).getShuffleMask())
563 case TargetOpcode::G_MEMMOVE:
564 case TargetOpcode::G_MEMCPY:
565 case TargetOpcode::G_MEMSET:
566 return selectMemOperation(ResVReg,
I);
568 case TargetOpcode::G_ICMP:
569 return selectICmp(ResVReg, ResType,
I);
570 case TargetOpcode::G_FCMP:
571 return selectFCmp(ResVReg, ResType,
I);
573 case TargetOpcode::G_FRAME_INDEX:
574 return selectFrameIndex(ResVReg, ResType,
I);
576 case TargetOpcode::G_LOAD:
577 return selectLoad(ResVReg, ResType,
I);
578 case TargetOpcode::G_STORE:
579 return selectStore(
I);
581 case TargetOpcode::G_BR:
582 return selectBranch(
I);
583 case TargetOpcode::G_BRCOND:
584 return selectBranchCond(
I);
586 case TargetOpcode::G_PHI:
587 return selectPhi(ResVReg, ResType,
I);
589 case TargetOpcode::G_FPTOSI:
590 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
591 case TargetOpcode::G_FPTOUI:
592 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
594 case TargetOpcode::G_SITOFP:
595 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
596 case TargetOpcode::G_UITOFP:
597 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
599 case TargetOpcode::G_CTPOP:
600 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
601 case TargetOpcode::G_SMIN:
602 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
603 case TargetOpcode::G_UMIN:
604 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
606 case TargetOpcode::G_SMAX:
607 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
608 case TargetOpcode::G_UMAX:
609 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
611 case TargetOpcode::G_SCMP:
612 return selectSUCmp(ResVReg, ResType,
I,
true);
613 case TargetOpcode::G_UCMP:
614 return selectSUCmp(ResVReg, ResType,
I,
false);
616 case TargetOpcode::G_STRICT_FMA:
617 case TargetOpcode::G_FMA:
618 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
620 case TargetOpcode::G_STRICT_FLDEXP:
621 return selectExtInst(ResVReg, ResType,
I, CL::ldexp);
623 case TargetOpcode::G_FPOW:
624 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
625 case TargetOpcode::G_FPOWI:
626 return selectExtInst(ResVReg, ResType,
I, CL::pown);
628 case TargetOpcode::G_FEXP:
629 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
630 case TargetOpcode::G_FEXP2:
631 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
633 case TargetOpcode::G_FLOG:
634 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
635 case TargetOpcode::G_FLOG2:
636 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
637 case TargetOpcode::G_FLOG10:
638 return selectLog10(ResVReg, ResType,
I);
640 case TargetOpcode::G_FABS:
641 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
642 case TargetOpcode::G_ABS:
643 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
645 case TargetOpcode::G_FMINNUM:
646 case TargetOpcode::G_FMINIMUM:
647 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
648 case TargetOpcode::G_FMAXNUM:
649 case TargetOpcode::G_FMAXIMUM:
650 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
652 case TargetOpcode::G_FCOPYSIGN:
653 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
655 case TargetOpcode::G_FCEIL:
656 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
657 case TargetOpcode::G_FFLOOR:
658 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
660 case TargetOpcode::G_FCOS:
661 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
662 case TargetOpcode::G_FSIN:
663 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
664 case TargetOpcode::G_FTAN:
665 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
666 case TargetOpcode::G_FACOS:
667 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
668 case TargetOpcode::G_FASIN:
669 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
670 case TargetOpcode::G_FATAN:
671 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
672 case TargetOpcode::G_FATAN2:
673 return selectExtInst(ResVReg, ResType,
I, CL::atan2, GL::Atan2);
674 case TargetOpcode::G_FCOSH:
675 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
676 case TargetOpcode::G_FSINH:
677 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
678 case TargetOpcode::G_FTANH:
679 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
681 case TargetOpcode::G_STRICT_FSQRT:
682 case TargetOpcode::G_FSQRT:
683 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
685 case TargetOpcode::G_CTTZ:
686 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
687 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
688 case TargetOpcode::G_CTLZ:
689 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
690 return selectExtInst(ResVReg, ResType,
I, CL::clz);
692 case TargetOpcode::G_INTRINSIC_ROUND:
693 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
694 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
695 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
696 case TargetOpcode::G_INTRINSIC_TRUNC:
697 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
698 case TargetOpcode::G_FRINT:
699 case TargetOpcode::G_FNEARBYINT:
700 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
702 case TargetOpcode::G_SMULH:
703 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
704 case TargetOpcode::G_UMULH:
705 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
707 case TargetOpcode::G_SADDSAT:
708 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
709 case TargetOpcode::G_UADDSAT:
710 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
711 case TargetOpcode::G_SSUBSAT:
712 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
713 case TargetOpcode::G_USUBSAT:
714 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
716 case TargetOpcode::G_UADDO:
717 return selectOverflowArith(ResVReg, ResType,
I,
718 ResType->
getOpcode() == SPIRV::OpTypeVector
719 ? SPIRV::OpIAddCarryV
720 : SPIRV::OpIAddCarryS);
721 case TargetOpcode::G_USUBO:
722 return selectOverflowArith(ResVReg, ResType,
I,
723 ResType->
getOpcode() == SPIRV::OpTypeVector
724 ? SPIRV::OpISubBorrowV
725 : SPIRV::OpISubBorrowS);
726 case TargetOpcode::G_UMULO:
727 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpUMulExtended);
728 case TargetOpcode::G_SMULO:
729 return selectOverflowArith(ResVReg, ResType,
I, SPIRV::OpSMulExtended);
731 case TargetOpcode::G_SEXT:
732 return selectExt(ResVReg, ResType,
I,
true);
733 case TargetOpcode::G_ANYEXT:
734 case TargetOpcode::G_ZEXT:
735 return selectExt(ResVReg, ResType,
I,
false);
736 case TargetOpcode::G_TRUNC:
737 return selectTrunc(ResVReg, ResType,
I);
738 case TargetOpcode::G_FPTRUNC:
739 case TargetOpcode::G_FPEXT:
740 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
742 case TargetOpcode::G_PTRTOINT:
743 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
744 case TargetOpcode::G_INTTOPTR:
745 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
746 case TargetOpcode::G_BITCAST:
747 return selectBitcast(ResVReg, ResType,
I);
748 case TargetOpcode::G_ADDRSPACE_CAST:
749 return selectAddrSpaceCast(ResVReg, ResType,
I);
750 case TargetOpcode::G_PTR_ADD: {
752 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
756 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
757 (*II).getOpcode() == TargetOpcode::COPY ||
758 (*II).getOpcode() == SPIRV::OpVariable) &&
761 bool IsGVInit =
false;
763 UseIt =
MRI->use_instr_begin(
I.getOperand(0).getReg()),
764 UseEnd =
MRI->use_instr_end();
765 UseIt != UseEnd; UseIt = std::next(UseIt)) {
766 if ((*UseIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
767 (*UseIt).getOpcode() == SPIRV::OpVariable) {
774 SPIRVType *GVType = GR.getSPIRVTypeForVReg(GV);
775 SPIRVType *GVPointeeType = GR.getPointeeType(GVType);
776 SPIRVType *ResPointeeType = GR.getPointeeType(ResType);
777 if (GVPointeeType && ResPointeeType && GVPointeeType != ResPointeeType) {
780 Register NewVReg =
MRI->createGenericVirtualRegister(
MRI->getType(GV));
781 MRI->setRegClass(NewVReg,
MRI->getRegClass(GV));
788 if (!GR.isBitcastCompatible(ResType, GVType))
790 "incompatible result and operand types in a bitcast");
791 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
793 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitcast))
799 TII.get(STI.isVulkanEnv()
800 ? SPIRV::OpInBoundsAccessChain
801 : SPIRV::OpInBoundsPtrAccessChain))
805 .
addUse(
I.getOperand(2).getReg())
808 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
810 .
addUse(GR.getSPIRVTypeID(ResType))
812 static_cast<uint32_t>(SPIRV::Opcode::InBoundsPtrAccessChain))
814 .
addUse(
I.getOperand(2).getReg())
821 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
822 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
824 .
addUse(GR.getSPIRVTypeID(ResType))
826 SPIRV::Opcode::InBoundsPtrAccessChain))
829 .
addUse(
I.getOperand(2).getReg());
833 case TargetOpcode::G_ATOMICRMW_OR:
834 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
835 case TargetOpcode::G_ATOMICRMW_ADD:
836 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
837 case TargetOpcode::G_ATOMICRMW_AND:
838 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
839 case TargetOpcode::G_ATOMICRMW_MAX:
840 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
841 case TargetOpcode::G_ATOMICRMW_MIN:
842 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
843 case TargetOpcode::G_ATOMICRMW_SUB:
844 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
845 case TargetOpcode::G_ATOMICRMW_XOR:
846 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
847 case TargetOpcode::G_ATOMICRMW_UMAX:
848 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
849 case TargetOpcode::G_ATOMICRMW_UMIN:
850 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
851 case TargetOpcode::G_ATOMICRMW_XCHG:
852 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
853 case TargetOpcode::G_ATOMIC_CMPXCHG:
854 return selectAtomicCmpXchg(ResVReg, ResType,
I);
856 case TargetOpcode::G_ATOMICRMW_FADD:
857 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
858 case TargetOpcode::G_ATOMICRMW_FSUB:
860 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
862 case TargetOpcode::G_ATOMICRMW_FMIN:
863 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
864 case TargetOpcode::G_ATOMICRMW_FMAX:
865 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
867 case TargetOpcode::G_FENCE:
868 return selectFence(
I);
870 case TargetOpcode::G_STACKSAVE:
871 return selectStackSave(ResVReg, ResType,
I);
872 case TargetOpcode::G_STACKRESTORE:
873 return selectStackRestore(
I);
875 case TargetOpcode::G_UNMERGE_VALUES:
881 case TargetOpcode::G_TRAP:
882 case TargetOpcode::G_DEBUGTRAP:
883 case TargetOpcode::G_UBSANTRAP:
884 case TargetOpcode::DBG_LABEL:
892bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
895 GL::GLSLExtInst GLInst)
const {
896 return selectExtInst(ResVReg, ResType,
I,
897 {{SPIRV::InstructionSet::GLSL_std_450, GLInst}});
900bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
903 CL::OpenCLExtInst CLInst)
const {
904 return selectExtInst(ResVReg, ResType,
I,
905 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
908bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
911 CL::OpenCLExtInst CLInst,
912 GL::GLSLExtInst GLInst)
const {
913 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
914 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
915 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
918bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
923 for (
const auto &Ex : Insts) {
924 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
926 if (STI.canUseExtInstSet(Set)) {
928 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
930 .
addUse(GR.getSPIRVTypeID(ResType))
933 const unsigned NumOps =
I.getNumOperands();
935 if (Index < NumOps &&
936 I.getOperand(Index).getType() ==
937 MachineOperand::MachineOperandType::MO_IntrinsicID)
940 MIB.
add(
I.getOperand(Index));
947bool SPIRVInstructionSelector::selectOpWithSrcs(
Register ResVReg,
950 std::vector<Register> Srcs,
951 unsigned Opcode)
const {
952 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
954 .
addUse(GR.getSPIRVTypeID(ResType));
961bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
964 unsigned Opcode)
const {
965 if (STI.isOpenCLEnv() &&
I.getOperand(1).isReg()) {
966 Register SrcReg =
I.getOperand(1).getReg();
969 MRI->def_instr_begin(SrcReg);
970 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
971 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
979 case SPIRV::OpConvertPtrToU:
980 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
982 case SPIRV::OpConvertUToPtr:
983 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
987 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
988 TII.get(SPIRV::OpSpecConstantOp))
990 .
addUse(GR.getSPIRVTypeID(ResType))
996 return selectOpWithSrcs(ResVReg, ResType,
I, {
I.getOperand(1).
getReg()},
1000bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
1003 Register OpReg =
I.getOperand(1).getReg();
1004 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
1005 if (!GR.isBitcastCompatible(ResType, OpType))
1007 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
1013 if (
MemOp->isVolatile())
1014 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1015 if (
MemOp->isNonTemporal())
1016 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1017 if (
MemOp->getAlign().value())
1018 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
1020 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
1022 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
1029 if (Flags & MachineMemOperand::Flags::MOVolatile)
1030 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
1031 if (Flags & MachineMemOperand::Flags::MONonTemporal)
1032 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
1034 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
1038bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
1041 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1043 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
1045 .
addUse(GR.getSPIRVTypeID(ResType))
1047 if (!
I.getNumMemOperands()) {
1048 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1050 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1058bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
1059 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
1060 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
1063 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
1066 if (!
I.getNumMemOperands()) {
1067 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
1069 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
1077bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
1080 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1082 "llvm.stacksave intrinsic: this instruction requires the following "
1083 "SPIR-V extension: SPV_INTEL_variable_length_array",
1086 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
1088 .
addUse(GR.getSPIRVTypeID(ResType))
1092bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
1093 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
1095 "llvm.stackrestore intrinsic: this instruction requires the following "
1096 "SPIR-V extension: SPV_INTEL_variable_length_array",
1098 if (!
I.getOperand(0).isReg())
1101 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
1102 .
addUse(
I.getOperand(0).getReg())
1106bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
1109 Register SrcReg =
I.getOperand(1).getReg();
1111 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
1112 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
1115 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1116 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
1118 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
1119 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1129 GR.add(GV, GR.CurMF, VarReg);
1130 GR.addGlobalObject(GV, GR.CurMF, VarReg);
1133 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
1135 .
addUse(GR.getSPIRVTypeID(VarTy))
1136 .
addImm(SPIRV::StorageClass::UniformConstant)
1140 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
1141 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
1143 selectOpWithSrcs(SrcReg, SourceTy,
I, {VarReg}, SPIRV::OpBitcast);
1145 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
1146 .
addUse(
I.getOperand(0).getReg())
1148 .
addUse(
I.getOperand(2).getReg());
1149 if (
I.getNumMemOperands())
1157bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
1161 unsigned NegateOpcode)
const {
1166 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1167 auto ScopeConstant = buildI32Constant(Scope,
I);
1168 Register ScopeReg = ScopeConstant.first;
1169 Result &= ScopeConstant.second;
1177 auto MemSemConstant = buildI32Constant(MemSem ,
I);
1178 Register MemSemReg = MemSemConstant.first;
1179 Result &= MemSemConstant.second;
1181 Register ValueReg =
I.getOperand(2).getReg();
1182 if (NegateOpcode != 0) {
1184 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1185 Result &= selectOpWithSrcs(TmpReg, ResType,
I, {ValueReg}, NegateOpcode);
1190 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(NewOpcode))
1192 .
addUse(GR.getSPIRVTypeID(ResType))
1200bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
1201 unsigned ArgI =
I.getNumOperands() - 1;
1203 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
1205 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
1206 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
1208 "cannot select G_UNMERGE_VALUES with a non-vector argument");
1214 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1215 Register ResVReg =
I.getOperand(i).getReg();
1216 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1219 ResType = ScalarType;
1220 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1221 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1222 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1225 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1227 .
addUse(GR.getSPIRVTypeID(ResType))
1229 .
addImm(
static_cast<int64_t
>(i));
1235bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
1238 auto MemSemConstant = buildI32Constant(MemSem,
I);
1239 Register MemSemReg = MemSemConstant.first;
1240 bool Result = MemSemConstant.second;
1243 getMemScope(GR.CurMF->getFunction().getContext(), Ord));
1244 auto ScopeConstant = buildI32Constant(Scope,
I);
1245 Register ScopeReg = ScopeConstant.first;
1246 Result &= ScopeConstant.second;
1249 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1255bool SPIRVInstructionSelector::selectOverflowArith(
Register ResVReg,
1258 unsigned Opcode)
const {
1259 Type *ResTy =
nullptr;
1261 if (!GR.findValueAttrs(&
I, ResTy, ResName))
1263 "Not enough info to select the arithmetic with overflow instruction");
1266 "with overflow instruction");
1269 Type *ResElemTy = cast<StructType>(ResTy)->getElementType(0);
1274 ResTy, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
1275 assert(
I.getNumDefs() > 1 &&
"Not enought operands");
1277 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
1279 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
1280 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
1281 Register ZeroReg = buildZerosVal(ResType,
I);
1284 MRI->setRegClass(StructVReg, &SPIRV::IDRegClass);
1286 if (ResName.
size() > 0)
1291 BuildMI(BB, MIRBuilder.getInsertPt(),
I.getDebugLoc(),
TII.get(Opcode))
1294 for (
unsigned i =
I.getNumDefs(); i <
I.getNumOperands(); ++i)
1295 MIB.
addUse(
I.getOperand(i).getReg());
1300 MRI->setRegClass(HigherVReg, &SPIRV::iIDRegClass);
1301 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1303 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1304 .
addDef(i == 1 ? HigherVReg :
I.getOperand(i).getReg())
1305 .
addUse(GR.getSPIRVTypeID(ResType))
1312 .
addDef(
I.getOperand(1).getReg())
1319bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1327 if (!isa<GIntrinsic>(
I)) {
1331 GR.CurMF->getFunction().getContext(),
MemOp->getSyncScopeID()));
1332 auto ScopeConstant = buildI32Constant(Scope,
I);
1333 ScopeReg = ScopeConstant.first;
1334 Result &= ScopeConstant.second;
1336 unsigned ScSem =
static_cast<uint32_t>(
1340 auto MemSemEqConstant = buildI32Constant(MemSemEq,
I);
1341 MemSemEqReg = MemSemEqConstant.first;
1342 Result &= MemSemEqConstant.second;
1345 if (MemSemEq == MemSemNeq)
1346 MemSemNeqReg = MemSemEqReg;
1348 auto MemSemNeqConstant = buildI32Constant(MemSemEq,
I);
1349 MemSemNeqReg = MemSemNeqConstant.first;
1350 Result &= MemSemNeqConstant.second;
1353 ScopeReg =
I.getOperand(5).getReg();
1354 MemSemEqReg =
I.getOperand(6).getReg();
1355 MemSemNeqReg =
I.getOperand(7).getReg();
1359 Register Val =
I.getOperand(4).getReg();
1360 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1361 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1364 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1366 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1374 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1378 .
addUse(GR.getSPIRVTypeID(BoolTy))
1382 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1385 .
addUse(GR.getSPIRVTypeID(ResType))
1387 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1391 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpCompositeInsert))
1393 .
addUse(GR.getSPIRVTypeID(ResType))
1402 case SPIRV::StorageClass::Workgroup:
1403 case SPIRV::StorageClass::CrossWorkgroup:
1404 case SPIRV::StorageClass::Function:
1413 case SPIRV::StorageClass::DeviceOnlyINTEL:
1414 case SPIRV::StorageClass::HostOnlyINTEL:
1423 bool IsGRef =
false;
1424 bool IsAllowedRefs =
1425 std::all_of(
MRI->use_instr_begin(ResVReg),
MRI->use_instr_end(),
1426 [&IsGRef](
auto const &It) {
1427 unsigned Opcode = It.getOpcode();
1428 if (Opcode == SPIRV::OpConstantComposite ||
1429 Opcode == SPIRV::OpVariable ||
1430 isSpvIntrinsic(It, Intrinsic::spv_init_global))
1431 return IsGRef = true;
1432 return Opcode == SPIRV::OpName;
1434 return IsAllowedRefs && IsGRef;
1437Register SPIRVInstructionSelector::getUcharPtrTypeReg(
1438 MachineInstr &
I, SPIRV::StorageClass::StorageClass SC)
const {
1439 return GR.getSPIRVTypeID(GR.getOrCreateSPIRVPointerType(
1440 GR.getOrCreateSPIRVIntegerType(8,
I,
TII),
I,
TII, SC));
1447 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1448 TII.get(SPIRV::OpSpecConstantOp))
1458 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1459 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1460 Register Tmp =
MRI->createVirtualRegister(&SPIRV::pIDRegClass);
1462 SPIRV::StorageClass::Generic),
1463 GR.getPointerSize()));
1465 GR.assignSPIRVTypeToVReg(GenericPtrTy, Tmp, *MF);
1467 I, Tmp, SrcPtr, GR.getSPIRVTypeID(GenericPtrTy),
1468 static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric));
1478bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1484 Register SrcPtr =
I.getOperand(1).getReg();
1485 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1488 if (SrcPtrTy->
getOpcode() != SPIRV::OpTypePointer ||
1489 ResType->
getOpcode() != SPIRV::OpTypePointer)
1490 return BuildCOPY(ResVReg, SrcPtr,
I);
1492 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtrTy);
1493 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResType);
1500 unsigned SpecOpcode =
1502 ?
static_cast<uint32_t>(SPIRV::Opcode::PtrCastToGeneric)
1503 : (SrcSC == SPIRV::StorageClass::Generic &&
1505 ?
static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr)
1512 return buildSpecConstantOp(
I, ResVReg, SrcPtr,
1513 getUcharPtrTypeReg(
I, DstSC), SpecOpcode)
1514 .constrainAllUses(
TII,
TRI, RBI);
1518 buildSpecConstantOp(
1520 getUcharPtrTypeReg(
I, DstSC),
1521 static_cast<uint32_t>(SPIRV::Opcode::GenericCastToPtr))
1522 .constrainAllUses(
TII,
TRI, RBI);
1528 return BuildCOPY(ResVReg, SrcPtr,
I);
1530 if ((SrcSC == SPIRV::StorageClass::Function &&
1531 DstSC == SPIRV::StorageClass::Private) ||
1532 (DstSC == SPIRV::StorageClass::Function &&
1533 SrcSC == SPIRV::StorageClass::Private))
1534 return BuildCOPY(ResVReg, SrcPtr,
I);
1538 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1541 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1544 Register Tmp =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1545 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1546 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1549 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1554 .
addUse(GR.getSPIRVTypeID(ResType))
1562 return selectUnOp(ResVReg, ResType,
I,
1563 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1565 return selectUnOp(ResVReg, ResType,
I,
1566 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1568 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1570 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1580 return SPIRV::OpFOrdEqual;
1582 return SPIRV::OpFOrdGreaterThanEqual;
1584 return SPIRV::OpFOrdGreaterThan;
1586 return SPIRV::OpFOrdLessThanEqual;
1588 return SPIRV::OpFOrdLessThan;
1590 return SPIRV::OpFOrdNotEqual;
1592 return SPIRV::OpOrdered;
1594 return SPIRV::OpFUnordEqual;
1596 return SPIRV::OpFUnordGreaterThanEqual;
1598 return SPIRV::OpFUnordGreaterThan;
1600 return SPIRV::OpFUnordLessThanEqual;
1602 return SPIRV::OpFUnordLessThan;
1604 return SPIRV::OpFUnordNotEqual;
1606 return SPIRV::OpUnordered;
1616 return SPIRV::OpIEqual;
1618 return SPIRV::OpINotEqual;
1620 return SPIRV::OpSGreaterThanEqual;
1622 return SPIRV::OpSGreaterThan;
1624 return SPIRV::OpSLessThanEqual;
1626 return SPIRV::OpSLessThan;
1628 return SPIRV::OpUGreaterThanEqual;
1630 return SPIRV::OpUGreaterThan;
1632 return SPIRV::OpULessThanEqual;
1634 return SPIRV::OpULessThan;
1643 return SPIRV::OpPtrEqual;
1645 return SPIRV::OpPtrNotEqual;
1656 return SPIRV::OpLogicalEqual;
1658 return SPIRV::OpLogicalNotEqual;
1692bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1695 unsigned OpAnyOrAll)
const {
1696 assert(
I.getNumOperands() == 3);
1697 assert(
I.getOperand(2).isReg());
1699 Register InputRegister =
I.getOperand(2).getReg();
1700 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1705 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1706 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1707 if (IsBoolTy && !IsVectorTy) {
1708 assert(ResVReg ==
I.getOperand(0).getReg());
1709 return BuildCOPY(ResVReg, InputRegister,
I);
1712 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1713 unsigned SpirvNotEqualId =
1714 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1715 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1720 NotEqualReg = IsBoolTy ? InputRegister
1721 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1723 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1729 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1733 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1744 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1749bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1752 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1755bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1758 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1762bool SPIRVInstructionSelector::selectFloatDot(
Register ResVReg,
1765 assert(
I.getNumOperands() == 4);
1766 assert(
I.getOperand(2).isReg());
1767 assert(
I.getOperand(3).isReg());
1770 GR.getSPIRVTypeForVReg(
I.getOperand(2).getReg());
1773 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1774 "dot product requires a vector of at least 2 components");
1777 GR.getSPIRVTypeForVReg(
VecType->getOperand(1).getReg());
1782 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpDot))
1784 .
addUse(GR.getSPIRVTypeID(ResType))
1785 .
addUse(
I.getOperand(2).getReg())
1786 .
addUse(
I.getOperand(3).getReg())
1790bool SPIRVInstructionSelector::selectIntegerDot(
Register ResVReg,
1794 assert(
I.getNumOperands() == 4);
1795 assert(
I.getOperand(2).isReg());
1796 assert(
I.getOperand(3).isReg());
1799 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1802 .
addUse(GR.getSPIRVTypeID(ResType))
1803 .
addUse(
I.getOperand(2).getReg())
1804 .
addUse(
I.getOperand(3).getReg())
1810bool SPIRVInstructionSelector::selectIntegerDotExpansion(
1812 assert(
I.getNumOperands() == 4);
1813 assert(
I.getOperand(2).isReg());
1814 assert(
I.getOperand(3).isReg());
1818 Register Vec0 =
I.getOperand(2).getReg();
1819 Register Vec1 =
I.getOperand(3).getReg();
1820 Register TmpVec =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1825 .
addUse(GR.getSPIRVTypeID(VecType))
1831 GR.getScalarOrVectorComponentCount(VecType) > 1 &&
1832 "dot product requires a vector of at least 2 components");
1834 Register Res =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1837 .
addUse(GR.getSPIRVTypeID(ResType))
1842 for (
unsigned i = 1; i < GR.getScalarOrVectorComponentCount(VecType); i++) {
1843 Register Elt =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1846 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1848 .
addUse(GR.getSPIRVTypeID(ResType))
1853 Register Sum = i < GR.getScalarOrVectorComponentCount(VecType) - 1
1854 ?
MRI->createVirtualRegister(GR.getRegClass(ResType))
1859 .
addUse(GR.getSPIRVTypeID(ResType))
1869template <
bool Signed>
1870bool SPIRVInstructionSelector::selectDot4AddPacked(
Register ResVReg,
1873 assert(
I.getNumOperands() == 5);
1874 assert(
I.getOperand(2).isReg());
1875 assert(
I.getOperand(3).isReg());
1876 assert(
I.getOperand(4).isReg());
1879 auto DotOp =
Signed ? SPIRV::OpSDot : SPIRV::OpUDot;
1880 Register Dot =
MRI->createVirtualRegister(GR.getRegClass(ResType));
1883 .
addUse(GR.getSPIRVTypeID(ResType))
1884 .
addUse(
I.getOperand(2).getReg())
1885 .
addUse(
I.getOperand(3).getReg())
1890 .
addUse(GR.getSPIRVTypeID(ResType))
1892 .
addUse(
I.getOperand(4).getReg())
1899template <
bool Signed>
1900bool SPIRVInstructionSelector::selectDot4AddPackedExpansion(
1902 assert(
I.getNumOperands() == 5);
1903 assert(
I.getOperand(2).isReg());
1904 assert(
I.getOperand(3).isReg());
1905 assert(
I.getOperand(4).isReg());
1911 Register Acc =
I.getOperand(4).getReg();
1912 SPIRVType *EltType = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1914 Signed ? SPIRV::OpBitFieldSExtract : SPIRV::OpBitFieldUExtract;
1917 for (
unsigned i = 0; i < 4; i++) {
1919 Register AElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1922 .
addUse(GR.getSPIRVTypeID(ResType))
1923 .
addUse(
I.getOperand(2).getReg())
1924 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1925 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1929 Register BElt =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1932 .
addUse(GR.getSPIRVTypeID(ResType))
1933 .
addUse(
I.getOperand(3).getReg())
1934 .
addUse(GR.getOrCreateConstInt(i * 8,
I, EltType,
TII))
1935 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1942 .
addUse(GR.getSPIRVTypeID(ResType))
1948 Register MaskMul =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1951 .
addUse(GR.getSPIRVTypeID(ResType))
1953 .
addUse(GR.getOrCreateConstInt(0,
I, EltType,
TII))
1954 .
addUse(GR.getOrCreateConstInt(8,
I, EltType,
TII))
1959 i < 3 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass) : ResVReg;
1962 .
addUse(GR.getSPIRVTypeID(ResType))
1975bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
1978 assert(
I.getNumOperands() == 3);
1979 assert(
I.getOperand(2).isReg());
1981 Register VZero = buildZerosValF(ResType,
I);
1982 Register VOne = buildOnesValF(ResType,
I);
1984 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1986 .
addUse(GR.getSPIRVTypeID(ResType))
1987 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1989 .
addUse(
I.getOperand(2).getReg())
1995bool SPIRVInstructionSelector::selectSign(
Register ResVReg,
1998 assert(
I.getNumOperands() == 3);
1999 assert(
I.getOperand(2).isReg());
2001 Register InputRegister =
I.getOperand(2).getReg();
2002 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
2003 auto &
DL =
I.getDebugLoc();
2008 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
2010 unsigned SignBitWidth = GR.getScalarOrVectorBitWidth(InputType);
2011 unsigned ResBitWidth = GR.getScalarOrVectorBitWidth(ResType);
2013 bool NeedsConversion = IsFloatTy || SignBitWidth != ResBitWidth;
2015 auto SignOpcode = IsFloatTy ? GL::FSign : GL::SSign;
2017 ?
MRI->createVirtualRegister(&SPIRV::IDRegClass)
2023 .
addUse(GR.getSPIRVTypeID(InputType))
2024 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2029 if (NeedsConversion) {
2030 auto ConvertOpcode = IsFloatTy ? SPIRV::OpConvertFToS : SPIRV::OpSConvert;
2033 .
addUse(GR.getSPIRVTypeID(ResType))
2041bool SPIRVInstructionSelector::selectWaveOpInst(
Register ResVReg,
2044 unsigned Opcode)
const {
2046 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2048 auto BMI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2050 .
addUse(GR.getSPIRVTypeID(ResType))
2051 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I,
2054 for (
unsigned J = 2; J <
I.getNumOperands(); J++) {
2055 BMI.
addUse(
I.getOperand(J).getReg());
2061bool SPIRVInstructionSelector::selectWaveActiveCountBits(
2064 SPIRVType *IntTy = GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2065 SPIRVType *BallotType = GR.getOrCreateSPIRVVectorType(IntTy, 4,
I,
TII);
2066 Register BallotReg =
MRI->createVirtualRegister(GR.getRegClass(BallotType));
2067 bool Result = selectWaveOpInst(BallotReg, BallotType,
I,
2068 SPIRV::OpGroupNonUniformBallot);
2073 TII.get(SPIRV::OpGroupNonUniformBallotBitCount))
2075 .
addUse(GR.getSPIRVTypeID(ResType))
2076 .
addUse(GR.getOrCreateConstInt(SPIRV::Scope::Subgroup,
I, IntTy,
TII))
2077 .
addImm(SPIRV::GroupOperation::Reduce)
2084bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
2088 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
2090 .
addUse(GR.getSPIRVTypeID(ResType))
2091 .
addUse(
I.getOperand(1).getReg())
2095bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
2103 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
2105 Register OpReg =
I.getOperand(1).getReg();
2108 switch (
Def->getOpcode()) {
2109 case SPIRV::ASSIGN_TYPE:
2111 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
2112 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
2113 Reg =
Def->getOperand(2).getReg();
2116 case SPIRV::OpUndef:
2117 Reg =
Def->getOperand(1).getReg();
2120 unsigned DestOpCode;
2121 if (
Reg.isValid()) {
2122 DestOpCode = SPIRV::OpConstantNull;
2124 DestOpCode = TargetOpcode::COPY;
2127 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
2128 .
addDef(
I.getOperand(0).getReg())
2141 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2146 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
2155 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
2167 case TargetOpcode::G_CONSTANT:
2168 case TargetOpcode::G_FCONSTANT:
2170 case TargetOpcode::G_INTRINSIC:
2171 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
2172 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
2173 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
2174 Intrinsic::spv_const_composite;
2175 case TargetOpcode::G_BUILD_VECTOR:
2176 case TargetOpcode::G_SPLAT_VECTOR: {
2200bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
2204 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2205 N = GR.getScalarOrVectorComponentCount(ResType);
2206 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2210 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
2215 for (
unsigned i =
I.getNumExplicitDefs();
2216 i <
I.getNumExplicitOperands() && IsConst; ++i)
2220 if (!IsConst &&
N < 2)
2222 "There must be at least two constituent operands in a vector");
2224 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2225 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2226 TII.get(IsConst ? SPIRV::OpConstantComposite
2227 : SPIRV::OpCompositeConstruct))
2229 .
addUse(GR.getSPIRVTypeID(ResType));
2230 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
2231 MIB.
addUse(
I.getOperand(i).getReg());
2235bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
2239 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2240 N = GR.getScalarOrVectorComponentCount(ResType);
2241 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
2246 unsigned OpIdx =
I.getNumExplicitDefs();
2247 if (!
I.getOperand(OpIdx).isReg())
2251 Register OpReg =
I.getOperand(OpIdx).getReg();
2254 if (!IsConst &&
N < 2)
2256 "There must be at least two constituent operands in a vector");
2258 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2259 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
2260 TII.get(IsConst ? SPIRV::OpConstantComposite
2261 : SPIRV::OpCompositeConstruct))
2263 .
addUse(GR.getSPIRVTypeID(ResType));
2264 for (
unsigned i = 0; i <
N; ++i)
2269bool SPIRVInstructionSelector::selectDiscard(
Register ResVReg,
2275 if (STI.canUseExtension(
2276 SPIRV::Extension::SPV_EXT_demote_to_helper_invocation) ||
2278 Opcode = SPIRV::OpDemoteToHelperInvocation;
2280 Opcode = SPIRV::OpKill;
2283 GR.invalidateMachineInstr(NextI);
2284 NextI->removeFromParent();
2289 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2293bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
2297 Register Cmp0 =
I.getOperand(2).getReg();
2298 Register Cmp1 =
I.getOperand(3).getReg();
2299 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
2300 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
2301 "CMP operands should have the same type");
2302 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
2304 .
addUse(GR.getSPIRVTypeID(ResType))
2310bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
2313 auto Pred =
I.getOperand(1).getPredicate();
2316 Register CmpOperand =
I.getOperand(2).getReg();
2317 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
2319 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
2323 return selectCmp(ResVReg, ResType, CmpOpc,
I);
2329 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
2330 "Expected G_FCONSTANT");
2331 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
2338 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
2339 "Expected G_CONSTANT");
2340 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
2343std::pair<Register, bool>
2348 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
2350 auto ConstInt = ConstantInt::get(LLVMTy, Val);
2351 Register NewReg = GR.find(ConstInt, GR.CurMF);
2355 GR.add(ConstInt, GR.CurMF, NewReg);
2359 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2361 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
2363 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2365 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
2373bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
2377 return selectCmp(ResVReg, ResType, CmpOp,
I);
2383 bool ZeroAsNull = STI.isOpenCLEnv();
2384 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2385 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
2386 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
2392 bool ZeroAsNull = STI.isOpenCLEnv();
2394 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2395 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
2396 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
2402 bool ZeroAsNull = STI.isOpenCLEnv();
2404 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2405 return GR.getOrCreateConstVector(VOne,
I, ResType,
TII, ZeroAsNull);
2406 return GR.getOrCreateConstFP(VOne,
I, ResType,
TII, ZeroAsNull);
2412 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2415 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
2420bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
2423 bool IsSigned)
const {
2425 Register ZeroReg = buildZerosVal(ResType,
I);
2426 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
2428 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
2430 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectVIVCond;
2431 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2433 .
addUse(GR.getSPIRVTypeID(ResType))
2434 .
addUse(
I.getOperand(1).getReg())
2440bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
2443 unsigned Opcode)
const {
2444 Register SrcReg =
I.getOperand(1).getReg();
2447 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
2448 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
2450 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
2452 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
2454 SrcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2455 selectSelect(SrcReg, TmpType,
I,
false);
2457 return selectOpWithSrcs(ResVReg, ResType,
I, {SrcReg}, Opcode);
2460bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
2463 Register SrcReg =
I.getOperand(1).getReg();
2464 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
2465 return selectSelect(ResVReg, ResType,
I, IsSigned);
2467 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
2468 if (SrcType == ResType)
2469 return BuildCOPY(ResVReg, SrcReg,
I);
2471 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2472 return selectUnOp(ResVReg, ResType,
I, Opcode);
2475bool SPIRVInstructionSelector::selectSUCmp(
Register ResVReg,
2478 bool IsSigned)
const {
2484 unsigned N = GR.getScalarOrVectorComponentCount(ResType);
2486 BoolType = GR.getOrCreateSPIRVVectorType(BoolType,
N,
I,
TII);
2487 Register BoolTypeReg = GR.getSPIRVTypeID(BoolType);
2491 Register IsLessEqReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2493 GR.assignSPIRVTypeToVReg(ResType, IsLessEqReg, MIRBuilder.getMF());
2495 TII.get(IsSigned ? SPIRV::OpSLessThanEqual
2496 : SPIRV::OpULessThanEqual))
2499 .
addUse(
I.getOperand(1).getReg())
2500 .
addUse(
I.getOperand(2).getReg())
2502 Register IsLessReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2504 GR.assignSPIRVTypeToVReg(ResType, IsLessReg, MIRBuilder.getMF());
2506 TII.get(IsSigned ? SPIRV::OpSLessThan : SPIRV::OpULessThan))
2509 .
addUse(
I.getOperand(1).getReg())
2510 .
addUse(
I.getOperand(2).getReg())
2513 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
2515 MRI->createVirtualRegister(GR.getRegClass(ResType));
2517 GR.assignSPIRVTypeToVReg(ResType, NegOneOrZeroReg, MIRBuilder.getMF());
2518 unsigned SelectOpcode =
2519 N > 1 ? SPIRV::OpSelectVIVCond : SPIRV::OpSelectSISCond;
2524 .
addUse(buildOnesVal(
true, ResType,
I))
2525 .
addUse(buildZerosVal(ResType,
I))
2532 .
addUse(buildOnesVal(
false, ResType,
I))
2536bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
2542 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2543 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
2544 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
2546 Register One = buildOnesVal(
false, IntTy,
I);
2550 .
addUse(GR.getSPIRVTypeID(IntTy))
2556 .
addUse(GR.getSPIRVTypeID(BoolTy))
2562bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
2565 Register IntReg =
I.getOperand(1).getReg();
2566 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
2567 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
2568 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
2569 if (ArgType == ResType)
2570 return BuildCOPY(ResVReg, IntReg,
I);
2571 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
2572 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
2573 return selectUnOp(ResVReg, ResType,
I, Opcode);
2576bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
2580 unsigned TyOpcode = ResType->
getOpcode();
2581 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
2583 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
2585 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2587 .
addUse(GR.getSPIRVTypeID(ResType))
2589 if (TyOpcode == SPIRV::OpTypeInt) {
2590 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
2592 return Reg == ResVReg ?
true : BuildCOPY(ResVReg, Reg,
I);
2594 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
2596 .
addUse(GR.getSPIRVTypeID(ResType));
2603bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
2606 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2608 .
addUse(GR.getSPIRVTypeID(ResType))
2615 if (TypeInst->
getOpcode() == SPIRV::ASSIGN_TYPE) {
2618 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
2620 return TypeInst->
getOpcode() == SPIRV::OpConstantI;
2625 if (TypeInst->
getOpcode() == SPIRV::OpConstantI)
2632bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
2636 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
2638 .
addUse(GR.getSPIRVTypeID(ResType))
2640 .
addUse(
I.getOperand(3).getReg())
2642 .
addUse(
I.getOperand(2).getReg());
2643 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2648bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2652 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2654 .
addUse(GR.getSPIRVTypeID(ResType))
2655 .
addUse(
I.getOperand(2).getReg());
2656 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2661bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2665 return selectInsertVal(ResVReg, ResType,
I);
2667 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
2669 .
addUse(GR.getSPIRVTypeID(ResType))
2670 .
addUse(
I.getOperand(2).getReg())
2671 .
addUse(
I.getOperand(3).getReg())
2672 .
addUse(
I.getOperand(4).getReg())
2676bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
2680 return selectExtractVal(ResVReg, ResType,
I);
2682 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
2684 .
addUse(GR.getSPIRVTypeID(ResType))
2685 .
addUse(
I.getOperand(2).getReg())
2686 .
addUse(
I.getOperand(3).getReg())
2690bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
2693 const bool IsGEPInBounds =
I.getOperand(2).getImm();
2698 const unsigned Opcode = STI.isVulkanEnv()
2699 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2700 : SPIRV::OpAccessChain)
2701 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2702 : SPIRV::OpPtrAccessChain);
2704 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2706 .
addUse(GR.getSPIRVTypeID(ResType))
2708 .
addUse(
I.getOperand(3).getReg());
2710 const unsigned StartingIndex =
2711 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2714 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
2715 Res.addUse(
I.getOperand(i).getReg());
2716 return Res.constrainAllUses(
TII,
TRI, RBI);
2720bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2723 unsigned Lim =
I.getNumExplicitOperands();
2724 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2725 Register OpReg =
I.getOperand(i).getReg();
2727 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2729 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
2730 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2731 GR.isAggregateType(OpType)) {
2738 Register WrapReg = GR.find(OpDefine, MF);
2744 WrapReg =
MRI->createVirtualRegister(GR.getRegClass(OpType));
2745 GR.add(OpDefine, MF, WrapReg);
2749 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2753 .
addUse(GR.getSPIRVTypeID(OpType))
2763bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
2769 case Intrinsic::spv_load:
2770 return selectLoad(ResVReg, ResType,
I);
2771 case Intrinsic::spv_store:
2772 return selectStore(
I);
2773 case Intrinsic::spv_extractv:
2774 return selectExtractVal(ResVReg, ResType,
I);
2775 case Intrinsic::spv_insertv:
2776 return selectInsertVal(ResVReg, ResType,
I);
2777 case Intrinsic::spv_extractelt:
2778 return selectExtractElt(ResVReg, ResType,
I);
2779 case Intrinsic::spv_insertelt:
2780 return selectInsertElt(ResVReg, ResType,
I);
2781 case Intrinsic::spv_gep:
2782 return selectGEP(ResVReg, ResType,
I);
2783 case Intrinsic::spv_unref_global:
2784 case Intrinsic::spv_init_global: {
2787 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
2790 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
2792 case Intrinsic::spv_undef: {
2793 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2795 .
addUse(GR.getSPIRVTypeID(ResType));
2798 case Intrinsic::spv_const_composite: {
2800 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
2802 unsigned Opcode = SPIRV::OpConstantNull;
2805 Opcode = SPIRV::OpConstantComposite;
2806 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
2809 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
2810 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2812 .
addUse(GR.getSPIRVTypeID(ResType));
2815 for (
Register OpReg : CompositeArgs)
2820 case Intrinsic::spv_assign_name: {
2821 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
2822 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
2823 for (
unsigned i =
I.getNumExplicitDefs() + 2;
2824 i <
I.getNumExplicitOperands(); ++i) {
2825 MIB.
addImm(
I.getOperand(i).getImm());
2829 case Intrinsic::spv_switch: {
2830 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
2831 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2832 if (
I.getOperand(i).isReg())
2833 MIB.
addReg(
I.getOperand(i).getReg());
2834 else if (
I.getOperand(i).isCImm())
2835 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
2836 else if (
I.getOperand(i).isMBB())
2837 MIB.
addMBB(
I.getOperand(i).getMBB());
2843 case Intrinsic::spv_loop_merge: {
2844 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoopMerge));
2845 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2846 assert(
I.getOperand(i).isMBB());
2847 MIB.
addMBB(
I.getOperand(i).getMBB());
2849 MIB.
addImm(SPIRV::SelectionControl::None);
2852 case Intrinsic::spv_selection_merge: {
2854 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSelectionMerge));
2855 assert(
I.getOperand(1).isMBB() &&
2856 "operand 1 to spv_selection_merge must be a basic block");
2857 MIB.
addMBB(
I.getOperand(1).getMBB());
2858 MIB.
addImm(getSelectionOperandForImm(
I.getOperand(2).getImm()));
2861 case Intrinsic::spv_cmpxchg:
2862 return selectAtomicCmpXchg(ResVReg, ResType,
I);
2863 case Intrinsic::spv_unreachable:
2864 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable))
2866 case Intrinsic::spv_alloca:
2867 return selectFrameIndex(ResVReg, ResType,
I);
2868 case Intrinsic::spv_alloca_array:
2869 return selectAllocaArray(ResVReg, ResType,
I);
2870 case Intrinsic::spv_assume:
2871 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2872 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
2873 .
addUse(
I.getOperand(1).getReg())
2876 case Intrinsic::spv_expect:
2877 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2878 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
2880 .
addUse(GR.getSPIRVTypeID(ResType))
2881 .
addUse(
I.getOperand(2).getReg())
2882 .
addUse(
I.getOperand(3).getReg())
2885 case Intrinsic::arithmetic_fence:
2886 if (STI.canUseExtension(SPIRV::Extension::SPV_EXT_arithmetic_fence))
2888 TII.get(SPIRV::OpArithmeticFenceEXT))
2890 .
addUse(GR.getSPIRVTypeID(ResType))
2891 .
addUse(
I.getOperand(2).getReg())
2894 return BuildCOPY(ResVReg,
I.getOperand(2).getReg(),
I);
2896 case Intrinsic::spv_thread_id:
2902 return loadVec3BuiltinInputID(SPIRV::BuiltIn::GlobalInvocationId, ResVReg,
2904 case Intrinsic::spv_thread_id_in_group:
2910 return loadVec3BuiltinInputID(SPIRV::BuiltIn::LocalInvocationId, ResVReg,
2912 case Intrinsic::spv_group_id:
2918 return loadVec3BuiltinInputID(SPIRV::BuiltIn::WorkgroupId, ResVReg, ResType,
2920 case Intrinsic::spv_fdot:
2921 return selectFloatDot(ResVReg, ResType,
I);
2922 case Intrinsic::spv_udot:
2923 case Intrinsic::spv_sdot:
2924 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2926 return selectIntegerDot(ResVReg, ResType,
I,
2927 IID == Intrinsic::spv_sdot);
2928 return selectIntegerDotExpansion(ResVReg, ResType,
I);
2929 case Intrinsic::spv_dot4add_i8packed:
2930 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2932 return selectDot4AddPacked<true>(ResVReg, ResType,
I);
2933 return selectDot4AddPackedExpansion<true>(ResVReg, ResType,
I);
2934 case Intrinsic::spv_dot4add_u8packed:
2935 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_integer_dot_product) ||
2937 return selectDot4AddPacked<false>(ResVReg, ResType,
I);
2938 return selectDot4AddPackedExpansion<false>(ResVReg, ResType,
I);
2939 case Intrinsic::spv_all:
2940 return selectAll(ResVReg, ResType,
I);
2941 case Intrinsic::spv_any:
2942 return selectAny(ResVReg, ResType,
I);
2943 case Intrinsic::spv_cross:
2944 return selectExtInst(ResVReg, ResType,
I, CL::cross, GL::Cross);
2945 case Intrinsic::spv_distance:
2946 return selectExtInst(ResVReg, ResType,
I, CL::distance, GL::Distance);
2947 case Intrinsic::spv_lerp:
2948 return selectExtInst(ResVReg, ResType,
I, CL::mix, GL::FMix);
2949 case Intrinsic::spv_length:
2950 return selectExtInst(ResVReg, ResType,
I, CL::length, GL::Length);
2951 case Intrinsic::spv_degrees:
2952 return selectExtInst(ResVReg, ResType,
I, CL::degrees, GL::Degrees);
2953 case Intrinsic::spv_frac:
2954 return selectExtInst(ResVReg, ResType,
I, CL::fract, GL::Fract);
2955 case Intrinsic::spv_normalize:
2956 return selectExtInst(ResVReg, ResType,
I, CL::normalize, GL::Normalize);
2957 case Intrinsic::spv_rsqrt:
2958 return selectExtInst(ResVReg, ResType,
I, CL::rsqrt, GL::InverseSqrt);
2959 case Intrinsic::spv_sign:
2960 return selectSign(ResVReg, ResType,
I);
2961 case Intrinsic::spv_firstbituhigh:
2962 return selectFirstBitHigh(ResVReg, ResType,
I,
false);
2963 case Intrinsic::spv_firstbitshigh:
2964 return selectFirstBitHigh(ResVReg, ResType,
I,
true);
2965 case Intrinsic::spv_firstbitlow:
2966 return selectFirstBitLow(ResVReg, ResType,
I);
2967 case Intrinsic::spv_group_memory_barrier_with_group_sync: {
2969 auto MemSemConstant =
2970 buildI32Constant(SPIRV::MemorySemantics::SequentiallyConsistent,
I);
2971 Register MemSemReg = MemSemConstant.first;
2972 Result &= MemSemConstant.second;
2973 auto ScopeConstant = buildI32Constant(SPIRV::Scope::Workgroup,
I);
2974 Register ScopeReg = ScopeConstant.first;
2975 Result &= ScopeConstant.second;
2978 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpControlBarrier))
2984 case Intrinsic::spv_lifetime_start:
2985 case Intrinsic::spv_lifetime_end: {
2986 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2987 : SPIRV::OpLifetimeStop;
2988 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
2989 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
2997 case Intrinsic::spv_saturate:
2998 return selectSaturate(ResVReg, ResType,
I);
2999 case Intrinsic::spv_nclamp:
3000 return selectExtInst(ResVReg, ResType,
I, CL::fclamp, GL::NClamp);
3001 case Intrinsic::spv_uclamp:
3002 return selectExtInst(ResVReg, ResType,
I, CL::u_clamp, GL::UClamp);
3003 case Intrinsic::spv_sclamp:
3004 return selectExtInst(ResVReg, ResType,
I, CL::s_clamp, GL::SClamp);
3005 case Intrinsic::spv_wave_active_countbits:
3006 return selectWaveActiveCountBits(ResVReg, ResType,
I);
3007 case Intrinsic::spv_wave_all:
3008 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAll);
3009 case Intrinsic::spv_wave_any:
3010 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformAny);
3011 case Intrinsic::spv_wave_is_first_lane:
3012 return selectWaveOpInst(ResVReg, ResType,
I, SPIRV::OpGroupNonUniformElect);
3013 case Intrinsic::spv_wave_readlane:
3014 return selectWaveOpInst(ResVReg, ResType,
I,
3015 SPIRV::OpGroupNonUniformShuffle);
3016 case Intrinsic::spv_step:
3017 return selectExtInst(ResVReg, ResType,
I, CL::step, GL::Step);
3018 case Intrinsic::spv_radians:
3019 return selectExtInst(ResVReg, ResType,
I, CL::radians, GL::Radians);
3023 case Intrinsic::instrprof_increment:
3024 case Intrinsic::instrprof_increment_step:
3025 case Intrinsic::instrprof_value_profile:
3028 case Intrinsic::spv_value_md:
3030 case Intrinsic::spv_resource_handlefrombinding: {
3031 return selectHandleFromBinding(ResVReg, ResType,
I);
3033 case Intrinsic::spv_resource_store_typedbuffer: {
3034 return selectImageWriteIntrinsic(
I);
3036 case Intrinsic::spv_resource_load_typedbuffer: {
3037 return selectReadImageIntrinsic(ResVReg, ResType,
I);
3039 case Intrinsic::spv_discard: {
3040 return selectDiscard(ResVReg, ResType,
I);
3043 std::string DiagMsg;
3046 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
3053bool SPIRVInstructionSelector::selectHandleFromBinding(
Register &ResVReg,
3060 Register IndexReg =
I.getOperand(5).getReg();
3061 bool IsNonUniform = ArraySize > 1 &&
foldImm(
I.getOperand(6),
MRI);
3064 Register VarReg = buildPointerToResource(ResType, Set, Binding, ArraySize,
3065 IndexReg, IsNonUniform, MIRBuilder);
3072 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
3074 .
addUse(GR.getSPIRVTypeID(ResType))
3079bool SPIRVInstructionSelector::selectReadImageIntrinsic(
3088 Register ImageReg =
I.getOperand(2).getReg();
3089 assert(
MRI->getVRegDef(ImageReg)->getParent() ==
I.getParent() &&
3090 "The image must be loaded in the same basic block as its use.");
3092 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3093 if (ResultSize == 4) {
3094 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3095 TII.get(SPIRV::OpImageRead))
3097 .
addUse(GR.getSPIRVTypeID(ResType))
3099 .
addUse(
I.getOperand(3).getReg())
3103 SPIRVType *ReadType = widenTypeToVec4(ResType,
I);
3104 Register ReadReg =
MRI->createVirtualRegister(GR.getRegClass(ReadType));
3106 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpImageRead))
3108 .
addUse(GR.getSPIRVTypeID(ReadType))
3110 .
addUse(
I.getOperand(3).getReg())
3115 if (ResultSize == 1) {
3116 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3117 TII.get(SPIRV::OpCompositeExtract))
3119 .
addUse(GR.getSPIRVTypeID(ResType))
3124 return extractSubvector(ResVReg, ResType, ReadReg,
I);
3127bool SPIRVInstructionSelector::extractSubvector(
3130 SPIRVType *InputType = GR.getResultType(ReadReg);
3131 [[maybe_unused]]
uint64_t InputSize =
3132 GR.getScalarOrVectorComponentCount(InputType);
3133 uint64_t ResultSize = GR.getScalarOrVectorComponentCount(ResType);
3134 assert(InputSize > 1 &&
"The input must be a vector.");
3135 assert(ResultSize > 1 &&
"The result must be a vector.");
3136 assert(ResultSize < InputSize &&
3137 "Cannot extract more element than there are in the input.");
3139 SPIRVType *ScalarType = GR.getScalarOrVectorComponentType(ResType);
3142 Register ComponentReg =
MRI->createVirtualRegister(ScalarRegClass);
3145 TII.get(SPIRV::OpCompositeExtract))
3158 TII.get(SPIRV::OpCompositeConstruct))
3160 .
addUse(GR.getSPIRVTypeID(ResType));
3162 for (
Register ComponentReg : ComponentRegisters)
3163 MIB.
addUse(ComponentReg);
3167bool SPIRVInstructionSelector::selectImageWriteIntrinsic(
3175 Register ImageReg =
I.getOperand(1).getReg();
3176 assert(
MRI->getVRegDef(ImageReg)->getParent() ==
I.getParent() &&
3177 "The image must be loaded in the same basic block as its use.");
3178 Register CoordinateReg =
I.getOperand(2).getReg();
3179 Register DataReg =
I.getOperand(3).getReg();
3180 assert(GR.getResultType(DataReg)->getOpcode() == SPIRV::OpTypeVector);
3181 assert(GR.getScalarOrVectorComponentCount(GR.getResultType(DataReg)) == 4);
3182 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3183 TII.get(SPIRV::OpImageWrite))
3190Register SPIRVInstructionSelector::buildPointerToResource(
3195 return GR.getOrCreateGlobalVariableWithBinding(ResType, Set, Binding,
3198 const SPIRVType *VarType = GR.getOrCreateSPIRVArrayType(
3200 Register VarReg = GR.getOrCreateGlobalVariableWithBinding(
3201 VarType, Set, Binding, MIRBuilder);
3203 SPIRVType *ResPointerType = GR.getOrCreateSPIRVPointerType(
3204 ResType, MIRBuilder, SPIRV::StorageClass::UniformConstant);
3206 Register AcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3210 buildOpDecorate(IndexReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3211 buildOpDecorate(AcReg, MIRBuilder, SPIRV::Decoration::NonUniformEXT, {});
3216 .
addUse(GR.getSPIRVTypeID(ResPointerType))
3223bool SPIRVInstructionSelector::selectFirstBitSet16(
3225 unsigned ExtendOpcode,
unsigned BitSetOpcode)
const {
3226 Register ExtReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3227 bool Result = selectOpWithSrcs(ExtReg, ResType,
I, {
I.getOperand(2).
getReg()},
3231 selectFirstBitSet32(ResVReg, ResType,
I, ExtReg, BitSetOpcode);
3234bool SPIRVInstructionSelector::selectFirstBitSet32(
3236 Register SrcReg,
unsigned BitSetOpcode)
const {
3237 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3239 .
addUse(GR.getSPIRVTypeID(ResType))
3240 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3246bool SPIRVInstructionSelector::selectFirstBitSet64Overflow(
3248 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3254 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3255 assert(ComponentCount < 5 &&
"Vec 5+ will generate invalid SPIR-V ops");
3259 SPIRVType *I64Type = GR.getOrCreateSPIRVIntegerType(64, MIRBuilder);
3260 SPIRVType *I64x2Type = GR.getOrCreateSPIRVVectorType(I64Type, 2, MIRBuilder);
3262 GR.getOrCreateSPIRVVectorType(
BaseType, 2, MIRBuilder);
3264 std::vector<Register> PartialRegs;
3267 unsigned CurrentComponent = 0;
3268 for (; CurrentComponent + 1 < ComponentCount; CurrentComponent += 2) {
3272 MRI->createVirtualRegister(GR.getRegClass(I64x2Type));
3274 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3275 TII.get(SPIRV::OpVectorShuffle))
3277 .
addUse(GR.getSPIRVTypeID(I64x2Type))
3280 .
addImm(CurrentComponent)
3281 .
addImm(CurrentComponent + 1);
3287 MRI->createVirtualRegister(GR.getRegClass(Vec2ResType));
3289 if (!selectFirstBitSet64(SubVecBitSetReg, Vec2ResType,
I, BitSetResult,
3290 BitSetOpcode, SwapPrimarySide))
3293 PartialRegs.push_back(SubVecBitSetReg);
3297 if (CurrentComponent != ComponentCount) {
3298 bool ZeroAsNull = STI.isOpenCLEnv();
3299 Register FinalElemReg =
MRI->createVirtualRegister(GR.getRegClass(I64Type));
3300 Register ConstIntLastIdx = GR.getOrCreateConstInt(
3303 if (!selectOpWithSrcs(FinalElemReg, I64Type,
I, {SrcReg, ConstIntLastIdx},
3304 SPIRV::OpVectorExtractDynamic))
3308 MRI->createVirtualRegister(GR.getRegClass(
BaseType));
3310 if (!selectFirstBitSet64(FinalElemBitSetReg,
BaseType,
I, FinalElemReg,
3311 BitSetOpcode, SwapPrimarySide))
3314 PartialRegs.push_back(FinalElemBitSetReg);
3319 return selectOpWithSrcs(ResVReg, ResType,
I, PartialRegs,
3320 SPIRV::OpCompositeConstruct);
3323bool SPIRVInstructionSelector::selectFirstBitSet64(
3325 Register SrcReg,
unsigned BitSetOpcode,
bool SwapPrimarySide)
const {
3326 unsigned ComponentCount = GR.getScalarOrVectorComponentCount(ResType);
3328 bool ZeroAsNull = STI.isOpenCLEnv();
3330 GR.getOrCreateConstInt(0,
I,
BaseType,
TII, ZeroAsNull);
3332 GR.getOrCreateConstInt(1,
I,
BaseType,
TII, ZeroAsNull);
3338 if (ComponentCount > 2) {
3339 return selectFirstBitSet64Overflow(ResVReg, ResType,
I, SrcReg,
3340 BitSetOpcode, SwapPrimarySide);
3346 GR.getOrCreateSPIRVVectorType(
BaseType, 2 * ComponentCount, MIRBuilder);
3348 MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3350 if (!selectOpWithSrcs(BitcastReg, PostCastType,
I, {SrcReg},
3355 Register FBSReg =
MRI->createVirtualRegister(GR.getRegClass(PostCastType));
3356 if (!selectFirstBitSet32(FBSReg, PostCastType,
I, BitcastReg, BitSetOpcode))
3360 Register HighReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3361 Register LowReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3363 bool IsScalarRes = ResType->
getOpcode() != SPIRV::OpTypeVector;
3366 if (!selectOpWithSrcs(HighReg, ResType,
I, {FBSReg, ConstIntZero},
3367 SPIRV::OpVectorExtractDynamic))
3369 if (!selectOpWithSrcs(LowReg, ResType,
I, {FBSReg, ConstIntOne},
3370 SPIRV::OpVectorExtractDynamic))
3374 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3375 TII.get(SPIRV::OpVectorShuffle))
3377 .
addUse(GR.getSPIRVTypeID(ResType))
3383 for (
unsigned J = 0; J < ComponentCount * 2; J += 2) {
3390 MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
3391 TII.get(SPIRV::OpVectorShuffle))
3393 .
addUse(GR.getSPIRVTypeID(ResType))
3399 for (
unsigned J = 1; J < ComponentCount * 2; J += 2) {
3417 GR.getOrCreateConstInt((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3418 Reg0 = GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
3419 Reg32 = GR.getOrCreateConstInt(32,
I, ResType,
TII, ZeroAsNull);
3420 SelectOp = SPIRV::OpSelectSISCond;
3421 AddOp = SPIRV::OpIAddS;
3424 GR.getOrCreateSPIRVVectorType(BoolType, ComponentCount, MIRBuilder);
3426 GR.getOrCreateConstVector((
unsigned)-1,
I, ResType,
TII, ZeroAsNull);
3427 Reg0 = GR.getOrCreateConstVector(0,
I, ResType,
TII, ZeroAsNull);
3428 Reg32 = GR.getOrCreateConstVector(32,
I, ResType,
TII, ZeroAsNull);
3429 SelectOp = SPIRV::OpSelectVIVCond;
3430 AddOp = SPIRV::OpIAddV;
3440 if (SwapPrimarySide) {
3441 PrimaryReg = LowReg;
3442 SecondaryReg = HighReg;
3443 PrimaryShiftReg = Reg0;
3444 SecondaryShiftReg = Reg32;
3448 Register BReg =
MRI->createVirtualRegister(GR.getRegClass(BoolType));
3449 if (!selectOpWithSrcs(BReg, BoolType,
I, {PrimaryReg, NegOneReg},
3454 Register TmpReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3455 if (!selectOpWithSrcs(TmpReg, ResType,
I, {BReg, SecondaryReg, PrimaryReg},
3460 Register ValReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3461 if (!selectOpWithSrcs(ValReg, ResType,
I,
3462 {BReg, SecondaryShiftReg, PrimaryShiftReg}, SelectOp))
3465 return selectOpWithSrcs(ResVReg, ResType,
I, {ValReg, TmpReg}, AddOp);
3468bool SPIRVInstructionSelector::selectFirstBitHigh(
Register ResVReg,
3471 bool IsSigned)
const {
3473 Register OpReg =
I.getOperand(2).getReg();
3474 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3476 unsigned ExtendOpcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
3477 unsigned BitSetOpcode = IsSigned ? GL::FindSMsb : GL::FindUMsb;
3479 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3481 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3483 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3485 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3489 "spv_firstbituhigh and spv_firstbitshigh only support 16,32,64 bits.");
3493bool SPIRVInstructionSelector::selectFirstBitLow(
Register ResVReg,
3497 Register OpReg =
I.getOperand(2).getReg();
3498 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
3502 unsigned ExtendOpcode = SPIRV::OpUConvert;
3503 unsigned BitSetOpcode = GL::FindILsb;
3505 switch (GR.getScalarOrVectorBitWidth(OpType)) {
3507 return selectFirstBitSet16(ResVReg, ResType,
I, ExtendOpcode, BitSetOpcode);
3509 return selectFirstBitSet32(ResVReg, ResType,
I, OpReg, BitSetOpcode);
3511 return selectFirstBitSet64(ResVReg, ResType,
I, OpReg, BitSetOpcode,
3518bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
3524 bool Res =
BuildMI(BB,
I,
I.getDebugLoc(),
3525 TII.get(SPIRV::OpVariableLengthArrayINTEL))
3527 .
addUse(GR.getSPIRVTypeID(ResType))
3528 .
addUse(
I.getOperand(2).getReg())
3530 if (!STI.isVulkanEnv()) {
3531 unsigned Alignment =
I.getOperand(3).getImm();
3537bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
3543 bool Res =
BuildMI(*It->getParent(), It, It->getDebugLoc(),
3544 TII.get(SPIRV::OpVariable))
3546 .
addUse(GR.getSPIRVTypeID(ResType))
3549 if (!STI.isVulkanEnv()) {
3550 unsigned Alignment =
I.getOperand(2).getImm();
3557bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
3564 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
3565 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3568 .
addMBB(
I.getOperand(0).getMBB())
3572 .
addMBB(
I.getOperand(0).getMBB())
3576bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
3589 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
3596 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
3597 .
addUse(
I.getOperand(0).getReg())
3598 .
addMBB(
I.getOperand(1).getMBB())
3603bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
3606 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
3608 .
addUse(GR.getSPIRVTypeID(ResType));
3609 const unsigned NumOps =
I.getNumOperands();
3610 for (
unsigned i = 1; i < NumOps; i += 2) {
3611 MIB.
addUse(
I.getOperand(i + 0).getReg());
3612 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
3620bool SPIRVInstructionSelector::selectGlobalValue(
3630 SPIRV::AccessQualifier::ReadWrite,
false);
3631 PointerBaseType = GR.getOrCreateSPIRVArrayType(
3634 PointerBaseType = GR.getOrCreateSPIRVType(
3635 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
3638 std::string GlobalIdent;
3640 unsigned &
ID = UnnamedGlobalIDs[GV];
3642 ID = UnnamedGlobalIDs.size();
3643 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
3658 if (isa<Function>(GV)) {
3661 Register NewReg = GR.find(ConstVal, GR.CurMF);
3664 GR.add(ConstVal, GR.CurMF, NewReg);
3666 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
3667 ? dyn_cast<Function>(GV)
3669 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
3670 PointerBaseType,
I,
TII,
3671 GVFun ? SPIRV::StorageClass::CodeSectionINTEL
3677 Register ResTypeReg = GR.getSPIRVTypeID(ResType);
3680 MRI->createGenericVirtualRegister(GR.getRegType(ResType));
3681 MRI->setRegClass(FuncVReg, &SPIRV::pIDRegClass);
3683 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
3688 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
3697 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
3699 .
addUse(GR.getSPIRVTypeID(ResType))
3702 assert(NewReg != ResVReg);
3703 return BuildCOPY(ResVReg, NewReg,
I);
3705 auto GlobalVar = cast<GlobalVariable>(GV);
3714 SPIRV::LinkageType::LinkageType LnkType =
3716 ? SPIRV::LinkageType::Import
3718 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
3719 ? SPIRV::LinkageType::LinkOnceODR
3720 : SPIRV::LinkageType::Export);
3729 GlobalVar->isConstant(), HasLnkTy, LnkType, MIRBuilder,
true);
3730 return Reg.isValid();
3733bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
3736 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
3737 return selectExtInst(ResVReg, ResType,
I, CL::log10);
3749 Register VarReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
3751 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
3753 .
addUse(GR.getSPIRVTypeID(ResType))
3754 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
3756 .
add(
I.getOperand(1))
3761 ResType->
getOpcode() == SPIRV::OpTypeFloat);
3764 ResType->
getOpcode() == SPIRV::OpTypeVector
3768 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
3771 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
3772 ? SPIRV::OpVectorTimesScalar
3776 .
addUse(GR.getSPIRVTypeID(ResType))
3785bool SPIRVInstructionSelector::loadVec3BuiltinInputID(
3786 SPIRV::BuiltIn::BuiltIn BuiltInValue,
Register ResVReg,
3789 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
3791 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
3792 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
3793 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
3799 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.
getMF());
3803 Register Variable = GR.buildGlobalVariable(
3805 SPIRV::StorageClass::Input,
nullptr,
true,
true,
3806 SPIRV::LinkageType::Import, MIRBuilder,
false);
3810 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
3812 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.
getMF());
3816 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
3818 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
3823 assert(
I.getOperand(2).isReg());
3828 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
3830 .
addUse(GR.getSPIRVTypeID(ResType))
3839 if (
Type->getOpcode() != SPIRV::OpTypeVector)
3840 return GR.getOrCreateSPIRVVectorType(
Type, 4, MIRBuilder);
3843 if (VectorSize == 4)
3847 const SPIRVType *ScalarType = GR.getSPIRVTypeForVReg(ScalarTypeReg);
3848 return GR.getOrCreateSPIRVVectorType(ScalarType, 4, MIRBuilder);
3856 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static bool isASCastInGVar(MachineRegisterInfo *MRI, Register ResVReg)
static bool mayApplyGenericSelection(unsigned Opcode)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
bool isDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasPrivateLinkage() const
bool isDeclarationForLinker() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
bool hasInternalLinkage() const
bool hasLinkOnceODRLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineBasicBlock::iterator getInsertPt()
Current insertion point for new instructions.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineFunction & getMF()
Getter for the function we currently build.
MachineRegisterInfo * getMRI()
Getter for MRI.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
void removeOperand(unsigned OpNo)
Erase an operand from an instruction, leaving it with one fewer operand than it started with.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
constexpr bool isValid() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
reference emplace_back(ArgTypes &&... Args)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
Class to represent struct types.
static StructType * get(LLVMContext &Context, ArrayRef< Type * > Elements, bool isPacked=false)
This static method is the primary way to create a literal StructType.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
bool isStructTy() const
True if this is an instance of StructType.
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
StringRef getName() const
Return a constant reference to the value's name.
Represents a version number in the form major[.minor[.subminor[.build]]].
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
void buildOpName(Register Target, const StringRef &Name, MachineIRBuilder &MIRBuilder)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
void salvageDebugInfo(const MachineRegisterInfo &MRI, MachineInstr &MI)
Assuming the instruction MI is going to be deleted, attempt to salvage debug users of MI by writing t...
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
constexpr unsigned storageClassToAddressSpace(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
MachineBasicBlock::iterator getOpVariableMBBIt(MachineInstr &I)
Type * toTypedPointer(Type *Ty)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
SPIRV::Scope::Scope getMemScope(LLVMContext &Ctx, SyncScope::ID Id)
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
bool hasInitializer(const GlobalVariable *GV)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)
bool isTriviallyDead(const MachineInstr &MI, const MachineRegisterInfo &MRI)
Check whether an instruction MI is dead: it only defines dead virtual registers, and doesn't have oth...