32#include "llvm/IR/IntrinsicsSPIRV.h"
56#define DEBUG_TYPE "spirv-isel"
59namespace CL = SPIRV::OpenCLExtInst;
60namespace GL = SPIRV::GLSLExtInst;
63 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
67#define GET_GLOBALISEL_PREDICATE_BITSET
68#include "SPIRVGenGlobalISel.inc"
69#undef GET_GLOBALISEL_PREDICATE_BITSET
96#define GET_GLOBALISEL_PREDICATES_DECL
97#include "SPIRVGenGlobalISel.inc"
98#undef GET_GLOBALISEL_PREDICATES_DECL
100#define GET_GLOBALISEL_TEMPORARIES_DECL
101#include "SPIRVGenGlobalISel.inc"
102#undef GET_GLOBALISEL_TEMPORARIES_DECL
121 unsigned Opcode)
const;
123 unsigned Opcode)
const;
140 unsigned NegateOpcode = 0)
const;
196 bool IsSigned)
const;
198 bool IsSigned,
unsigned Opcode)
const;
200 bool IsSigned)
const;
240 GL::GLSLExtInst GLInst)
const;
259 const SPIRVType *ResType =
nullptr)
const;
273#define GET_GLOBALISEL_IMPL
274#include "SPIRVGenGlobalISel.inc"
275#undef GET_GLOBALISEL_IMPL
281 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
283#include
"SPIRVGenGlobalISel.inc"
286#include
"SPIRVGenGlobalISel.inc"
297 GR.setCurrentFunc(MF);
298 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
303 if (HasVRegsReset == &MF)
308 for (
unsigned I = 0, E =
MRI.getNumVirtRegs();
I != E; ++
I) {
310 LLT RegType =
MRI.getType(Reg);
318 for (
const auto &
MBB : MF) {
319 for (
const auto &
MI :
MBB) {
320 if (
MI.getOpcode() != SPIRV::ASSIGN_TYPE)
323 LLT DstType =
MRI.getType(DstReg);
325 LLT SrcType =
MRI.getType(SrcReg);
326 if (DstType != SrcType)
327 MRI.setType(DstReg,
MRI.getType(SrcReg));
331 if (DstRC != SrcRC && SrcRC)
332 MRI.setRegClass(DstReg, SrcRC);
343 resetVRegsType(*
I.getParent()->getParent());
345 assert(
I.getParent() &&
"Instruction should be in a basic block!");
346 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
351 if (Opcode == SPIRV::ASSIGN_TYPE) {
352 Register DstReg =
I.getOperand(0).getReg();
353 Register SrcReg =
I.getOperand(1).getReg();
354 auto *
Def =
MRI->getVRegDef(SrcReg);
356 bool Res = selectImpl(
I, *CoverageInfo);
358 if (!Res &&
Def->getOpcode() != TargetOpcode::G_CONSTANT) {
359 dbgs() <<
"Unexpected pattern in ASSIGN_TYPE.\nInstruction: ";
363 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
367 MRI->setRegClass(SrcReg,
MRI->getRegClass(DstReg));
368 MRI->replaceRegWith(SrcReg, DstReg);
369 I.removeFromParent();
371 }
else if (
I.getNumDefs() == 1) {
378 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
379 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
385 bool HasDefs =
I.getNumDefs() > 0;
387 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
388 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
389 if (spvSelect(ResVReg, ResType,
I)) {
391 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
393 I.removeFromParent();
399bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
402 const unsigned Opcode =
I.getOpcode();
404 return selectImpl(
I, *CoverageInfo);
406 case TargetOpcode::G_CONSTANT:
407 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
409 case TargetOpcode::G_GLOBAL_VALUE:
410 return selectGlobalValue(ResVReg,
I);
411 case TargetOpcode::G_IMPLICIT_DEF:
412 return selectOpUndef(ResVReg, ResType,
I);
413 case TargetOpcode::G_FREEZE:
414 return selectFreeze(ResVReg, ResType,
I);
416 case TargetOpcode::G_INTRINSIC:
417 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
418 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
419 return selectIntrinsic(ResVReg, ResType,
I);
420 case TargetOpcode::G_BITREVERSE:
421 return selectBitreverse(ResVReg, ResType,
I);
423 case TargetOpcode::G_BUILD_VECTOR:
424 return selectBuildVector(ResVReg, ResType,
I);
425 case TargetOpcode::G_SPLAT_VECTOR:
426 return selectSplatVector(ResVReg, ResType,
I);
428 case TargetOpcode::G_SHUFFLE_VECTOR: {
430 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
432 .
addUse(GR.getSPIRVTypeID(ResType))
433 .
addUse(
I.getOperand(1).getReg())
434 .
addUse(
I.getOperand(2).getReg());
435 for (
auto V :
I.getOperand(3).getShuffleMask())
439 case TargetOpcode::G_MEMMOVE:
440 case TargetOpcode::G_MEMCPY:
441 case TargetOpcode::G_MEMSET:
442 return selectMemOperation(ResVReg,
I);
444 case TargetOpcode::G_ICMP:
445 return selectICmp(ResVReg, ResType,
I);
446 case TargetOpcode::G_FCMP:
447 return selectFCmp(ResVReg, ResType,
I);
449 case TargetOpcode::G_FRAME_INDEX:
450 return selectFrameIndex(ResVReg, ResType,
I);
452 case TargetOpcode::G_LOAD:
453 return selectLoad(ResVReg, ResType,
I);
454 case TargetOpcode::G_STORE:
455 return selectStore(
I);
457 case TargetOpcode::G_BR:
458 return selectBranch(
I);
459 case TargetOpcode::G_BRCOND:
460 return selectBranchCond(
I);
462 case TargetOpcode::G_PHI:
463 return selectPhi(ResVReg, ResType,
I);
465 case TargetOpcode::G_FPTOSI:
466 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
467 case TargetOpcode::G_FPTOUI:
468 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
470 case TargetOpcode::G_SITOFP:
471 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
472 case TargetOpcode::G_UITOFP:
473 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
475 case TargetOpcode::G_CTPOP:
476 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
477 case TargetOpcode::G_SMIN:
478 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
479 case TargetOpcode::G_UMIN:
480 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
482 case TargetOpcode::G_SMAX:
483 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
484 case TargetOpcode::G_UMAX:
485 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
487 case TargetOpcode::G_FMA:
488 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
490 case TargetOpcode::G_FPOW:
491 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
492 case TargetOpcode::G_FPOWI:
493 return selectExtInst(ResVReg, ResType,
I, CL::pown);
495 case TargetOpcode::G_FEXP:
496 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
497 case TargetOpcode::G_FEXP2:
498 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
500 case TargetOpcode::G_FLOG:
501 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
502 case TargetOpcode::G_FLOG2:
503 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
504 case TargetOpcode::G_FLOG10:
505 return selectLog10(ResVReg, ResType,
I);
507 case TargetOpcode::G_FABS:
508 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
509 case TargetOpcode::G_ABS:
510 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
512 case TargetOpcode::G_FMINNUM:
513 case TargetOpcode::G_FMINIMUM:
514 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
515 case TargetOpcode::G_FMAXNUM:
516 case TargetOpcode::G_FMAXIMUM:
517 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
519 case TargetOpcode::G_FCOPYSIGN:
520 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
522 case TargetOpcode::G_FCEIL:
523 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
524 case TargetOpcode::G_FFLOOR:
525 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
527 case TargetOpcode::G_FCOS:
528 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
529 case TargetOpcode::G_FSIN:
530 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
531 case TargetOpcode::G_FTAN:
532 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
533 case TargetOpcode::G_FACOS:
534 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
535 case TargetOpcode::G_FASIN:
536 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
537 case TargetOpcode::G_FATAN:
538 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
539 case TargetOpcode::G_FCOSH:
540 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
541 case TargetOpcode::G_FSINH:
542 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
543 case TargetOpcode::G_FTANH:
544 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
546 case TargetOpcode::G_FSQRT:
547 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
549 case TargetOpcode::G_CTTZ:
550 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
551 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
552 case TargetOpcode::G_CTLZ:
553 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
554 return selectExtInst(ResVReg, ResType,
I, CL::clz);
556 case TargetOpcode::G_INTRINSIC_ROUND:
557 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
558 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
559 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
560 case TargetOpcode::G_INTRINSIC_TRUNC:
561 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
562 case TargetOpcode::G_FRINT:
563 case TargetOpcode::G_FNEARBYINT:
564 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
566 case TargetOpcode::G_SMULH:
567 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
568 case TargetOpcode::G_UMULH:
569 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
571 case TargetOpcode::G_SADDSAT:
572 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
573 case TargetOpcode::G_UADDSAT:
574 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
575 case TargetOpcode::G_SSUBSAT:
576 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
577 case TargetOpcode::G_USUBSAT:
578 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
580 case TargetOpcode::G_SEXT:
581 return selectExt(ResVReg, ResType,
I,
true);
582 case TargetOpcode::G_ANYEXT:
583 case TargetOpcode::G_ZEXT:
584 return selectExt(ResVReg, ResType,
I,
false);
585 case TargetOpcode::G_TRUNC:
586 return selectTrunc(ResVReg, ResType,
I);
587 case TargetOpcode::G_FPTRUNC:
588 case TargetOpcode::G_FPEXT:
589 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
591 case TargetOpcode::G_PTRTOINT:
592 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
593 case TargetOpcode::G_INTTOPTR:
594 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
595 case TargetOpcode::G_BITCAST:
596 return selectBitcast(ResVReg, ResType,
I);
597 case TargetOpcode::G_ADDRSPACE_CAST:
598 return selectAddrSpaceCast(ResVReg, ResType,
I);
599 case TargetOpcode::G_PTR_ADD: {
604 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
608 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
609 (*II).getOpcode() == TargetOpcode::COPY ||
610 (*II).getOpcode() == SPIRV::OpVariable) &&
612 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
614 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
616 .
addUse(GR.getSPIRVTypeID(ResType))
618 SPIRV::Opcode::InBoundsPtrAccessChain))
621 .
addUse(
I.getOperand(2).getReg());
625 case TargetOpcode::G_ATOMICRMW_OR:
626 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
627 case TargetOpcode::G_ATOMICRMW_ADD:
628 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
629 case TargetOpcode::G_ATOMICRMW_AND:
630 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
631 case TargetOpcode::G_ATOMICRMW_MAX:
632 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
633 case TargetOpcode::G_ATOMICRMW_MIN:
634 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
635 case TargetOpcode::G_ATOMICRMW_SUB:
636 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
637 case TargetOpcode::G_ATOMICRMW_XOR:
638 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
639 case TargetOpcode::G_ATOMICRMW_UMAX:
640 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
641 case TargetOpcode::G_ATOMICRMW_UMIN:
642 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
643 case TargetOpcode::G_ATOMICRMW_XCHG:
644 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
645 case TargetOpcode::G_ATOMIC_CMPXCHG:
646 return selectAtomicCmpXchg(ResVReg, ResType,
I);
648 case TargetOpcode::G_ATOMICRMW_FADD:
649 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
650 case TargetOpcode::G_ATOMICRMW_FSUB:
652 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
654 case TargetOpcode::G_ATOMICRMW_FMIN:
655 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
656 case TargetOpcode::G_ATOMICRMW_FMAX:
657 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
659 case TargetOpcode::G_FENCE:
660 return selectFence(
I);
662 case TargetOpcode::G_STACKSAVE:
663 return selectStackSave(ResVReg, ResType,
I);
664 case TargetOpcode::G_STACKRESTORE:
665 return selectStackRestore(
I);
667 case TargetOpcode::G_UNMERGE_VALUES:
675bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
678 CL::OpenCLExtInst CLInst)
const {
679 return selectExtInst(ResVReg, ResType,
I,
680 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
683bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
686 CL::OpenCLExtInst CLInst,
687 GL::GLSLExtInst GLInst)
const {
688 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
689 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
690 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
693bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
698 for (
const auto &Ex : Insts) {
699 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
701 if (STI.canUseExtInstSet(Set)) {
703 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
705 .
addUse(GR.getSPIRVTypeID(ResType))
708 const unsigned NumOps =
I.getNumOperands();
709 for (
unsigned i = 1; i < NumOps; ++i)
710 MIB.add(
I.getOperand(i));
711 return MIB.constrainAllUses(
TII,
TRI, RBI);
717bool SPIRVInstructionSelector::selectUnOpWithSrc(
Register ResVReg,
721 unsigned Opcode)
const {
722 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
724 .
addUse(GR.getSPIRVTypeID(ResType))
729bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
732 unsigned Opcode)
const {
733 if (STI.isOpenCLEnv() &&
I.getOperand(1).isReg()) {
734 Register SrcReg =
I.getOperand(1).getReg();
737 MRI->def_instr_begin(SrcReg);
738 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
739 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
747 case SPIRV::OpConvertPtrToU:
748 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
750 case SPIRV::OpConvertUToPtr:
751 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
755 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
756 TII.get(SPIRV::OpSpecConstantOp))
758 .
addUse(GR.getSPIRVTypeID(ResType))
764 return selectUnOpWithSrc(ResVReg, ResType,
I,
I.getOperand(1).getReg(),
768bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
771 Register OpReg =
I.getOperand(1).getReg();
772 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
773 if (!GR.isBitcastCompatible(ResType, OpType))
775 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
779 const SyncScopeIDs &SSIDs) {
781 return SPIRV::Scope::Invocation;
783 return SPIRV::Scope::Device;
784 else if (Ord == SSIDs.WorkGroupSSID)
785 return SPIRV::Scope::Workgroup;
786 else if (Ord == SSIDs.AllSVMDevicesSSID)
787 return SPIRV::Scope::CrossDevice;
788 else if (Ord == SSIDs.SubGroupSSID)
789 return SPIRV::Scope::Subgroup;
798 return SPIRV::Scope::Device;
804 if (
MemOp->isVolatile())
805 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
806 if (
MemOp->isNonTemporal())
807 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
808 if (
MemOp->getAlign().value())
809 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
811 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
813 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
820 if (Flags & MachineMemOperand::Flags::MOVolatile)
821 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
822 if (Flags & MachineMemOperand::Flags::MONonTemporal)
823 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
825 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
829bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
832 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
834 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
836 .
addUse(GR.getSPIRVTypeID(ResType))
838 if (!
I.getNumMemOperands()) {
839 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
841 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
846 return MIB.constrainAllUses(
TII,
TRI, RBI);
849bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
850 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
851 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
854 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
857 if (!
I.getNumMemOperands()) {
858 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
860 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
865 return MIB.constrainAllUses(
TII,
TRI, RBI);
868bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
871 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
873 "llvm.stacksave intrinsic: this instruction requires the following "
874 "SPIR-V extension: SPV_INTEL_variable_length_array",
877 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
879 .
addUse(GR.getSPIRVTypeID(ResType))
883bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
884 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
886 "llvm.stackrestore intrinsic: this instruction requires the following "
887 "SPIR-V extension: SPV_INTEL_variable_length_array",
889 if (!
I.getOperand(0).isReg())
892 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
893 .
addUse(
I.getOperand(0).getReg())
897bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
900 Register SrcReg =
I.getOperand(1).getReg();
901 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
902 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
905 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
906 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
908 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
909 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
919 GR.add(GV, GR.CurMF, VarReg);
922 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
924 .
addUse(GR.getSPIRVTypeID(VarTy))
925 .
addImm(SPIRV::StorageClass::UniformConstant)
928 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
929 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
931 selectUnOpWithSrc(SrcReg, SourceTy,
I, VarReg, SPIRV::OpBitcast);
933 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
934 .
addUse(
I.getOperand(0).getReg())
936 .
addUse(
I.getOperand(2).getReg());
937 if (
I.getNumMemOperands())
940 if (ResVReg.
isValid() && ResVReg != MIB->getOperand(0).getReg())
941 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY), ResVReg)
942 .
addUse(MIB->getOperand(0).getReg());
946bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
950 unsigned NegateOpcode)
const {
955 Register ScopeReg = buildI32Constant(Scope,
I);
963 Register MemSemReg = buildI32Constant(MemSem ,
I);
966 Register ValueReg =
I.getOperand(2).getReg();
967 if (NegateOpcode != 0) {
969 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
970 Result |= selectUnOpWithSrc(TmpReg, ResType,
I, ValueReg, NegateOpcode);
976 .
addUse(GR.getSPIRVTypeID(ResType))
985bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
986 unsigned ArgI =
I.getNumOperands() - 1;
988 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
990 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
991 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
993 "cannot select G_UNMERGE_VALUES with a non-vector argument");
999 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
1000 Register ResVReg =
I.getOperand(i).getReg();
1001 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
1004 ResType = ScalarType;
1005 MRI->setRegClass(ResVReg, GR.getRegClass(ResType));
1006 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
1007 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
1010 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1012 .
addUse(GR.getSPIRVTypeID(ResType))
1014 .
addImm(
static_cast<int64_t
>(i));
1015 Res |= MIB.constrainAllUses(
TII,
TRI, RBI);
1020bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
1023 Register MemSemReg = buildI32Constant(MemSem,
I);
1026 Register ScopeReg = buildI32Constant(Scope,
I);
1028 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
1034bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
1041 if (!isa<GIntrinsic>(
I)) {
1046 ScopeReg = buildI32Constant(Scope,
I);
1048 unsigned ScSem =
static_cast<uint32_t>(
1052 MemSemEqReg = buildI32Constant(MemSemEq,
I);
1056 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq,
I);
1058 ScopeReg =
I.getOperand(5).getReg();
1059 MemSemEqReg =
I.getOperand(6).getReg();
1060 MemSemNeqReg =
I.getOperand(7).getReg();
1064 Register Val =
I.getOperand(4).getReg();
1065 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1066 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1069 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1071 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1079 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1083 .
addUse(GR.getSPIRVTypeID(BoolTy))
1087 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1090 .
addUse(GR.getSPIRVTypeID(ResType))
1092 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1097 .
addUse(GR.getSPIRVTypeID(ResType))
1107 case SPIRV::StorageClass::Workgroup:
1108 case SPIRV::StorageClass::CrossWorkgroup:
1109 case SPIRV::StorageClass::Function:
1118 case SPIRV::StorageClass::DeviceOnlyINTEL:
1119 case SPIRV::StorageClass::HostOnlyINTEL:
1131bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1136 auto UIs =
MRI->use_instructions(ResVReg);
1137 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1138 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1139 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1141 Register NewReg =
I.getOperand(1).getReg();
1143 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1144 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy,
I,
TII,
1145 SPIRV::StorageClass::Generic);
1147 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1149 .
addUse(GR.getSPIRVTypeID(ResType))
1155 Register SrcPtr =
I.getOperand(1).getReg();
1156 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1157 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1158 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1162 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1163 TII.get(TargetOpcode::COPY))
1170 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1173 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1176 Register Tmp =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1177 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1178 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1183 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1188 .
addUse(GR.getSPIRVTypeID(ResType))
1196 return selectUnOp(ResVReg, ResType,
I,
1197 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1199 return selectUnOp(ResVReg, ResType,
I,
1200 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1202 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1204 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1214 return SPIRV::OpFOrdEqual;
1216 return SPIRV::OpFOrdGreaterThanEqual;
1218 return SPIRV::OpFOrdGreaterThan;
1220 return SPIRV::OpFOrdLessThanEqual;
1222 return SPIRV::OpFOrdLessThan;
1224 return SPIRV::OpFOrdNotEqual;
1226 return SPIRV::OpOrdered;
1228 return SPIRV::OpFUnordEqual;
1230 return SPIRV::OpFUnordGreaterThanEqual;
1232 return SPIRV::OpFUnordGreaterThan;
1234 return SPIRV::OpFUnordLessThanEqual;
1236 return SPIRV::OpFUnordLessThan;
1238 return SPIRV::OpFUnordNotEqual;
1240 return SPIRV::OpUnordered;
1250 return SPIRV::OpIEqual;
1252 return SPIRV::OpINotEqual;
1254 return SPIRV::OpSGreaterThanEqual;
1256 return SPIRV::OpSGreaterThan;
1258 return SPIRV::OpSLessThanEqual;
1260 return SPIRV::OpSLessThan;
1262 return SPIRV::OpUGreaterThanEqual;
1264 return SPIRV::OpUGreaterThan;
1266 return SPIRV::OpULessThanEqual;
1268 return SPIRV::OpULessThan;
1277 return SPIRV::OpPtrEqual;
1279 return SPIRV::OpPtrNotEqual;
1290 return SPIRV::OpLogicalEqual;
1292 return SPIRV::OpLogicalNotEqual;
1326bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1329 unsigned OpAnyOrAll)
const {
1330 assert(
I.getNumOperands() == 3);
1331 assert(
I.getOperand(2).isReg());
1333 Register InputRegister =
I.getOperand(2).getReg();
1334 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1339 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1340 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1341 if (IsBoolTy && !IsVectorTy) {
1342 assert(ResVReg ==
I.getOperand(0).getReg());
1343 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1344 TII.get(TargetOpcode::COPY))
1350 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1351 unsigned SpirvNotEqualId =
1352 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1353 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1358 NotEqualReg = IsBoolTy ? InputRegister
1359 :
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1361 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1366 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1368 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SpirvNotEqualId))
1370 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1379 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(OpAnyOrAll))
1381 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1386bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1389 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1392bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1395 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1398bool SPIRVInstructionSelector::selectFmix(
Register ResVReg,
1402 assert(
I.getNumOperands() == 5);
1403 assert(
I.getOperand(2).isReg());
1404 assert(
I.getOperand(3).isReg());
1405 assert(
I.getOperand(4).isReg());
1408 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1410 .
addUse(GR.getSPIRVTypeID(ResType))
1411 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1413 .
addUse(
I.getOperand(2).getReg())
1414 .
addUse(
I.getOperand(3).getReg())
1415 .
addUse(
I.getOperand(4).getReg())
1419bool SPIRVInstructionSelector::selectLength(
Register ResVReg,
1423 assert(
I.getNumOperands() == 3);
1424 assert(
I.getOperand(2).isReg());
1427 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1429 .
addUse(GR.getSPIRVTypeID(ResType))
1430 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1432 .
addUse(
I.getOperand(2).getReg())
1436bool SPIRVInstructionSelector::selectFrac(
Register ResVReg,
1440 assert(
I.getNumOperands() == 3);
1441 assert(
I.getOperand(2).isReg());
1444 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1446 .
addUse(GR.getSPIRVTypeID(ResType))
1447 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1449 .
addUse(
I.getOperand(2).getReg())
1453bool SPIRVInstructionSelector::selectNormalize(
Register ResVReg,
1457 assert(
I.getNumOperands() == 3);
1458 assert(
I.getOperand(2).isReg());
1461 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1463 .
addUse(GR.getSPIRVTypeID(ResType))
1464 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1466 .
addUse(
I.getOperand(2).getReg())
1470bool SPIRVInstructionSelector::selectRsqrt(
Register ResVReg,
1474 assert(
I.getNumOperands() == 3);
1475 assert(
I.getOperand(2).isReg());
1478 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1480 .
addUse(GR.getSPIRVTypeID(ResType))
1481 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1483 .
addUse(
I.getOperand(2).getReg())
1489bool SPIRVInstructionSelector::selectSaturate(
Register ResVReg,
1492 assert(
I.getNumOperands() == 3);
1493 assert(
I.getOperand(2).isReg());
1495 Register VZero = buildZerosValF(ResType,
I);
1496 Register VOne = buildOnesValF(ResType,
I);
1498 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1500 .
addUse(GR.getSPIRVTypeID(ResType))
1501 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1503 .
addUse(
I.getOperand(2).getReg())
1509bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
1513 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
1515 .
addUse(GR.getSPIRVTypeID(ResType))
1516 .
addUse(
I.getOperand(1).getReg())
1520bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
1528 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
1530 Register OpReg =
I.getOperand(1).getReg();
1533 switch (
Def->getOpcode()) {
1534 case SPIRV::ASSIGN_TYPE:
1536 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
1537 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1538 Reg =
Def->getOperand(2).getReg();
1541 case SPIRV::OpUndef:
1542 Reg =
Def->getOperand(1).getReg();
1545 unsigned DestOpCode;
1546 if (
Reg.isValid()) {
1547 DestOpCode = SPIRV::OpConstantNull;
1549 DestOpCode = TargetOpcode::COPY;
1552 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
1553 .
addDef(
I.getOperand(0).getReg())
1566 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1571 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
1580 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1592 case TargetOpcode::G_CONSTANT:
1593 case TargetOpcode::G_FCONSTANT:
1595 case TargetOpcode::G_INTRINSIC:
1596 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1597 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
1598 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
1599 Intrinsic::spv_const_composite;
1600 case TargetOpcode::G_BUILD_VECTOR:
1601 case TargetOpcode::G_SPLAT_VECTOR: {
1625bool SPIRVInstructionSelector::selectBuildVector(
Register ResVReg,
1629 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1630 N = GR.getScalarOrVectorComponentCount(ResType);
1631 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
1635 if (
I.getNumExplicitOperands() -
I.getNumExplicitDefs() !=
N)
1640 for (
unsigned i =
I.getNumExplicitDefs();
1641 i <
I.getNumExplicitOperands() && IsConst; ++i)
1645 if (!IsConst &&
N < 2)
1647 "There must be at least two constituent operands in a vector");
1649 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1650 TII.get(IsConst ? SPIRV::OpConstantComposite
1651 : SPIRV::OpCompositeConstruct))
1653 .
addUse(GR.getSPIRVTypeID(ResType));
1654 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
1655 MIB.
addUse(
I.getOperand(i).getReg());
1659bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
1663 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1664 N = GR.getScalarOrVectorComponentCount(ResType);
1665 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
1670 unsigned OpIdx =
I.getNumExplicitDefs();
1671 if (!
I.getOperand(OpIdx).isReg())
1675 Register OpReg =
I.getOperand(OpIdx).getReg();
1678 if (!IsConst &&
N < 2)
1680 "There must be at least two constituent operands in a vector");
1682 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1683 TII.get(IsConst ? SPIRV::OpConstantComposite
1684 : SPIRV::OpCompositeConstruct))
1686 .
addUse(GR.getSPIRVTypeID(ResType));
1687 for (
unsigned i = 0; i <
N; ++i)
1692bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
1696 Register Cmp0 =
I.getOperand(2).getReg();
1697 Register Cmp1 =
I.getOperand(3).getReg();
1698 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1699 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1700 "CMP operands should have the same type");
1701 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
1703 .
addUse(GR.getSPIRVTypeID(ResType))
1709bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
1712 auto Pred =
I.getOperand(1).getPredicate();
1715 Register CmpOperand =
I.getOperand(2).getReg();
1716 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1718 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1722 return selectCmp(ResVReg, ResType, CmpOpc,
I);
1728 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1729 "Expected G_FCONSTANT");
1730 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
1737 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1738 "Expected G_CONSTANT");
1739 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
1747 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
1749 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1750 Register NewReg = GR.find(ConstInt, GR.CurMF);
1753 GR.add(ConstInt, GR.CurMF, NewReg);
1757 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1759 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
1761 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1763 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
1771bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
1775 return selectCmp(ResVReg, ResType, CmpOp,
I);
1781 bool ZeroAsNull = STI.isOpenCLEnv();
1782 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1783 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
1784 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
1790 bool ZeroAsNull = STI.isOpenCLEnv();
1792 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1793 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
1794 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
1800 bool ZeroAsNull = STI.isOpenCLEnv();
1802 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1803 return GR.getOrCreateConstVector(VOne,
I, ResType,
TII, ZeroAsNull);
1804 return GR.getOrCreateConstFP(VOne,
I, ResType,
TII, ZeroAsNull);
1810 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1813 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1818bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
1821 bool IsSigned)
const {
1823 Register ZeroReg = buildZerosVal(ResType,
I);
1824 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
1826 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1828 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1829 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1831 .
addUse(GR.getSPIRVTypeID(ResType))
1832 .
addUse(
I.getOperand(1).getReg())
1838bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
1841 unsigned Opcode)
const {
1842 Register SrcReg =
I.getOperand(1).getReg();
1845 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1846 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1848 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
1850 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
1852 SrcReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1853 selectSelect(SrcReg, TmpType,
I,
false);
1855 return selectUnOpWithSrc(ResVReg, ResType,
I, SrcReg, Opcode);
1858bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
1861 Register SrcReg =
I.getOperand(1).getReg();
1862 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
1863 return selectSelect(ResVReg, ResType,
I, IsSigned);
1865 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
1866 if (SrcType == ResType) {
1869 if (DstRC != SrcRC && SrcRC)
1870 MRI->setRegClass(ResVReg, SrcRC);
1871 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1872 TII.get(TargetOpcode::COPY))
1878 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1879 return selectUnOp(ResVReg, ResType,
I, Opcode);
1882bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
1888 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
1889 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
1890 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1892 Register One = buildOnesVal(
false, IntTy,
I);
1896 .
addUse(GR.getSPIRVTypeID(IntTy))
1900 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
1902 .
addUse(GR.getSPIRVTypeID(BoolTy))
1908bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
1911 Register IntReg =
I.getOperand(1).getReg();
1912 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1913 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
1914 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
1915 if (ArgType == ResType) {
1918 if (DstRC != SrcRC && SrcRC)
1919 MRI->setRegClass(ResVReg, SrcRC);
1920 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1921 TII.get(TargetOpcode::COPY))
1926 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1927 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1928 return selectUnOp(ResVReg, ResType,
I, Opcode);
1931bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
1935 unsigned TyOpcode = ResType->
getOpcode();
1936 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
1938 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1940 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1942 .
addUse(GR.getSPIRVTypeID(ResType))
1944 if (TyOpcode == SPIRV::OpTypeInt) {
1945 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
1949 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
1954 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1956 .
addUse(GR.getSPIRVTypeID(ResType));
1963bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
1966 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
1968 .
addUse(GR.getSPIRVTypeID(ResType))
1975 if (TypeInst->
getOpcode() == SPIRV::ASSIGN_TYPE) {
1978 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
1980 return TypeInst->
getOpcode() == SPIRV::OpConstantI;
1985 if (TypeInst->
getOpcode() == SPIRV::OpConstantI)
1992bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
1996 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
1998 .
addUse(GR.getSPIRVTypeID(ResType))
2000 .
addUse(
I.getOperand(3).getReg())
2002 .
addUse(
I.getOperand(2).getReg());
2003 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
2008bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
2012 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2014 .
addUse(GR.getSPIRVTypeID(ResType))
2015 .
addUse(
I.getOperand(2).getReg());
2016 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
2021bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
2025 return selectInsertVal(ResVReg, ResType,
I);
2027 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
2029 .
addUse(GR.getSPIRVTypeID(ResType))
2030 .
addUse(
I.getOperand(2).getReg())
2031 .
addUse(
I.getOperand(3).getReg())
2032 .
addUse(
I.getOperand(4).getReg())
2036bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
2040 return selectExtractVal(ResVReg, ResType,
I);
2042 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
2044 .
addUse(GR.getSPIRVTypeID(ResType))
2045 .
addUse(
I.getOperand(2).getReg())
2046 .
addUse(
I.getOperand(3).getReg())
2050bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
2053 const bool IsGEPInBounds =
I.getOperand(2).getImm();
2058 const unsigned Opcode = STI.isVulkanEnv()
2059 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
2060 : SPIRV::OpAccessChain)
2061 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
2062 : SPIRV::OpPtrAccessChain);
2064 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
2066 .
addUse(GR.getSPIRVTypeID(ResType))
2068 .
addUse(
I.getOperand(3).getReg());
2070 const unsigned StartingIndex =
2071 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
2074 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
2075 Res.addUse(
I.getOperand(i).getReg());
2076 return Res.constrainAllUses(
TII,
TRI, RBI);
2080bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
2083 unsigned Lim =
I.getNumExplicitOperands();
2084 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
2085 Register OpReg =
I.getOperand(i).getReg();
2087 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
2089 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
2090 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
2091 GR.isAggregateType(OpType)) {
2098 Register WrapReg = GR.find(OpDefine, MF);
2104 WrapReg =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2105 GR.add(OpDefine, MF, WrapReg);
2109 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
2113 .
addUse(GR.getSPIRVTypeID(OpType))
2123bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
2129 case Intrinsic::spv_load:
2130 return selectLoad(ResVReg, ResType,
I);
2131 case Intrinsic::spv_store:
2132 return selectStore(
I);
2133 case Intrinsic::spv_extractv:
2134 return selectExtractVal(ResVReg, ResType,
I);
2135 case Intrinsic::spv_insertv:
2136 return selectInsertVal(ResVReg, ResType,
I);
2137 case Intrinsic::spv_extractelt:
2138 return selectExtractElt(ResVReg, ResType,
I);
2139 case Intrinsic::spv_insertelt:
2140 return selectInsertElt(ResVReg, ResType,
I);
2141 case Intrinsic::spv_gep:
2142 return selectGEP(ResVReg, ResType,
I);
2143 case Intrinsic::spv_unref_global:
2144 case Intrinsic::spv_init_global: {
2147 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
2150 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
2152 case Intrinsic::spv_undef: {
2153 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2155 .
addUse(GR.getSPIRVTypeID(ResType));
2158 case Intrinsic::spv_const_composite: {
2160 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
2162 unsigned Opcode = SPIRV::OpConstantNull;
2165 Opcode = SPIRV::OpConstantComposite;
2166 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
2169 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2171 .
addUse(GR.getSPIRVTypeID(ResType));
2174 for (
Register OpReg : CompositeArgs)
2179 case Intrinsic::spv_assign_name: {
2180 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
2181 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
2182 for (
unsigned i =
I.getNumExplicitDefs() + 2;
2183 i <
I.getNumExplicitOperands(); ++i) {
2184 MIB.
addImm(
I.getOperand(i).getImm());
2188 case Intrinsic::spv_switch: {
2189 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
2190 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2191 if (
I.getOperand(i).isReg())
2192 MIB.
addReg(
I.getOperand(i).getReg());
2193 else if (
I.getOperand(i).isCImm())
2194 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
2195 else if (
I.getOperand(i).isMBB())
2196 MIB.
addMBB(
I.getOperand(i).getMBB());
2202 case Intrinsic::spv_cmpxchg:
2203 return selectAtomicCmpXchg(ResVReg, ResType,
I);
2204 case Intrinsic::spv_unreachable:
2205 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable));
2207 case Intrinsic::spv_alloca:
2208 return selectFrameIndex(ResVReg, ResType,
I);
2209 case Intrinsic::spv_alloca_array:
2210 return selectAllocaArray(ResVReg, ResType,
I);
2211 case Intrinsic::spv_assume:
2212 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2213 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
2214 .
addUse(
I.getOperand(1).getReg());
2216 case Intrinsic::spv_expect:
2217 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2218 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
2220 .
addUse(GR.getSPIRVTypeID(ResType))
2221 .
addUse(
I.getOperand(2).getReg())
2222 .
addUse(
I.getOperand(3).getReg());
2224 case Intrinsic::spv_thread_id:
2225 return selectSpvThreadId(ResVReg, ResType,
I);
2226 case Intrinsic::spv_all:
2227 return selectAll(ResVReg, ResType,
I);
2228 case Intrinsic::spv_any:
2229 return selectAny(ResVReg, ResType,
I);
2230 case Intrinsic::spv_lerp:
2231 return selectFmix(ResVReg, ResType,
I);
2232 case Intrinsic::spv_length:
2233 return selectLength(ResVReg, ResType,
I);
2234 case Intrinsic::spv_frac:
2235 return selectFrac(ResVReg, ResType,
I);
2236 case Intrinsic::spv_normalize:
2237 return selectNormalize(ResVReg, ResType,
I);
2238 case Intrinsic::spv_rsqrt:
2239 return selectRsqrt(ResVReg, ResType,
I);
2240 case Intrinsic::spv_lifetime_start:
2241 case Intrinsic::spv_lifetime_end: {
2242 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2243 : SPIRV::OpLifetimeStop;
2244 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
2245 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
2250 case Intrinsic::spv_saturate:
2251 return selectSaturate(ResVReg, ResType,
I);
2253 std::string DiagMsg;
2256 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
2263bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
2270 TII.get(SPIRV::OpVariableLengthArrayINTEL))
2272 .
addUse(GR.getSPIRVTypeID(ResType))
2273 .
addUse(
I.getOperand(2).getReg())
2277bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
2285 bool IsHeader =
false;
2287 for (; It != E && It !=
I; ++It) {
2288 Opcode = It->getOpcode();
2289 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2291 }
else if (IsHeader &&
2292 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2297 return BuildMI(*
MBB, It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
2299 .
addUse(GR.getSPIRVTypeID(ResType))
2304bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
2311 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
2312 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
2315 .
addMBB(
I.getOperand(0).getMBB())
2319 .
addMBB(
I.getOperand(0).getMBB())
2323bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
2336 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
2343 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
2344 .
addUse(
I.getOperand(0).getReg())
2345 .
addMBB(
I.getOperand(1).getMBB())
2350bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
2353 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
2355 .
addUse(GR.getSPIRVTypeID(ResType));
2356 const unsigned NumOps =
I.getNumOperands();
2357 for (
unsigned i = 1; i < NumOps; i += 2) {
2358 MIB.
addUse(
I.getOperand(i + 0).getReg());
2359 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
2364bool SPIRVInstructionSelector::selectGlobalValue(
2374 SPIRV::AccessQualifier::ReadWrite,
false);
2375 PointerBaseType = GR.getOrCreateSPIRVArrayType(
2378 PointerBaseType = GR.getOrCreateSPIRVType(
2379 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
2381 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2382 PointerBaseType,
I,
TII,
2385 std::string GlobalIdent;
2387 unsigned &
ID = UnnamedGlobalIDs[GV];
2389 ID = UnnamedGlobalIDs.size();
2390 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
2405 if (isa<Function>(GV)) {
2408 Register NewReg = GR.find(ConstVal, GR.CurMF);
2411 GR.add(ConstVal, GR.CurMF, NewReg);
2413 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2414 ? dyn_cast<Function>(GV)
2422 MRI->setRegClass(FuncVReg, &SPIRV::iIDRegClass);
2425 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2427 .
addUse(GR.getSPIRVTypeID(ResType))
2433 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2435 .
addUse(GR.getSPIRVTypeID(ResType))
2438 assert(NewReg != ResVReg);
2439 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
2444 auto GlobalVar = cast<GlobalVariable>(GV);
2447 bool HasInit =
GlobalVar->hasInitializer() &&
2448 !isa<UndefValue>(
GlobalVar->getInitializer());
2451 if (HasInit && !
Init)
2455 SPIRV::StorageClass::StorageClass Storage =
2458 Storage != SPIRV::StorageClass::Function;
2459 SPIRV::LinkageType::LinkageType LnkType =
2461 ? SPIRV::LinkageType::Import
2463 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2464 ? SPIRV::LinkageType::LinkOnceODR
2465 : SPIRV::LinkageType::Export);
2467 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2469 HasLnkTy, LnkType, MIRBuilder,
true);
2470 return Reg.isValid();
2473bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
2476 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2477 return selectExtInst(ResVReg, ResType,
I, CL::log10);
2489 Register VarReg =
MRI->createVirtualRegister(GR.getRegClass(ResType));
2491 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2493 .
addUse(GR.getSPIRVTypeID(ResType))
2494 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2496 .
add(
I.getOperand(1))
2501 ResType->
getOpcode() == SPIRV::OpTypeFloat);
2504 ResType->
getOpcode() == SPIRV::OpTypeVector
2508 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2511 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
2512 ? SPIRV::OpVectorTimesScalar
2516 .
addUse(GR.getSPIRVTypeID(ResType))
2524bool SPIRVInstructionSelector::selectSpvThreadId(
Register ResVReg,
2532 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2534 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2535 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2536 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2540 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::iIDRegClass);
2541 MIRBuilder.getMRI()->setType(NewRegister,
LLT::pointer(0, 64));
2542 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2545 Register Variable = GR.buildGlobalVariable(
2546 NewRegister, PtrType,
2548 SPIRV::StorageClass::Input,
nullptr,
true,
true,
2549 SPIRV::LinkageType::Import, MIRBuilder,
false);
2553 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::iIDRegClass);
2554 MIRBuilder.getMRI()->setType(LoadedRegister,
LLT::pointer(0, 64));
2555 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2558 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
2560 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
2565 assert(
I.getOperand(2).isReg());
2566 Register ThreadIdReg =
I.getOperand(2).getReg();
2572 assert(Const &&
Const->getOpcode() == TargetOpcode::G_CONSTANT);
2578 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2580 .
addUse(GR.getSPIRVTypeID(ResType))
2591 return new SPIRVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static APFloat getOneFP(const Type *LLVMFloatTy)
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, const SyncScopeIDs &SSIDs)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
static APFloat getOne(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative One.
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
LinkageTypes getLinkage() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
bool hasAvailableExternallyLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
constexpr bool isScalar() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isVector() const
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
constexpr bool isPointer() const
static constexpr LLT fixed_vector(unsigned NumElements, unsigned ScalarSizeInBits)
Get a low-level fixed-width vector of some number of elements and element width.
This is an important class for using LLVM in a threaded context.
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
static Register index2VirtReg(unsigned Index)
Convert a 0-based index to a virtual register number.
constexpr bool isValid() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Type * toTypedPointer(Type *Ty)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)