32#include "llvm/IR/IntrinsicsSPIRV.h"
57#define DEBUG_TYPE "spirv-isel"
60namespace CL = SPIRV::OpenCLExtInst;
61namespace GL = SPIRV::GLSLExtInst;
64 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
68#define GET_GLOBALISEL_PREDICATE_BITSET
69#include "SPIRVGenGlobalISel.inc"
70#undef GET_GLOBALISEL_PREDICATE_BITSET
96#define GET_GLOBALISEL_PREDICATES_DECL
97#include "SPIRVGenGlobalISel.inc"
98#undef GET_GLOBALISEL_PREDICATES_DECL
100#define GET_GLOBALISEL_TEMPORARIES_DECL
101#include "SPIRVGenGlobalISel.inc"
102#undef GET_GLOBALISEL_TEMPORARIES_DECL
119 unsigned Opcode)
const;
121 unsigned Opcode)
const;
138 unsigned NegateOpcode = 0)
const;
191 bool IsSigned)
const;
193 bool IsSigned,
unsigned Opcode)
const;
195 bool IsSigned)
const;
235 GL::GLSLExtInst GLInst)
const;
248 const SPIRVType *ResType =
nullptr)
const;
261#define GET_GLOBALISEL_IMPL
262#include "SPIRVGenGlobalISel.inc"
263#undef GET_GLOBALISEL_IMPL
269 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
271#include
"SPIRVGenGlobalISel.inc"
274#include
"SPIRVGenGlobalISel.inc"
285 GR.setCurrentFunc(MF);
286 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
295 assert(
I.getParent() &&
"Instruction should be in a basic block!");
296 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
301 if (Opcode == SPIRV::ASSIGN_TYPE) {
302 Register DstReg =
I.getOperand(0).getReg();
303 Register SrcReg =
I.getOperand(1).getReg();
304 auto *
Def =
MRI->getVRegDef(SrcReg);
306 if (
MRI->getType(DstReg).isPointer())
308 bool Res = selectImpl(
I, *CoverageInfo);
309 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
313 MRI->replaceRegWith(SrcReg, DstReg);
314 I.removeFromParent();
316 }
else if (
I.getNumDefs() == 1) {
323 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
324 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
330 bool HasDefs =
I.getNumDefs() > 0;
332 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
333 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
334 if (spvSelect(ResVReg, ResType,
I)) {
336 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
338 I.removeFromParent();
344bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
347 const unsigned Opcode =
I.getOpcode();
349 return selectImpl(
I, *CoverageInfo);
351 case TargetOpcode::G_CONSTANT:
352 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
354 case TargetOpcode::G_GLOBAL_VALUE:
355 return selectGlobalValue(ResVReg,
I);
356 case TargetOpcode::G_IMPLICIT_DEF:
357 return selectOpUndef(ResVReg, ResType,
I);
358 case TargetOpcode::G_FREEZE:
359 return selectFreeze(ResVReg, ResType,
I);
361 case TargetOpcode::G_INTRINSIC:
362 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
363 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
364 return selectIntrinsic(ResVReg, ResType,
I);
365 case TargetOpcode::G_BITREVERSE:
366 return selectBitreverse(ResVReg, ResType,
I);
368 case TargetOpcode::G_BUILD_VECTOR:
369 return selectConstVector(ResVReg, ResType,
I);
370 case TargetOpcode::G_SPLAT_VECTOR:
371 return selectSplatVector(ResVReg, ResType,
I);
373 case TargetOpcode::G_SHUFFLE_VECTOR: {
375 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
377 .
addUse(GR.getSPIRVTypeID(ResType))
378 .
addUse(
I.getOperand(1).getReg())
379 .
addUse(
I.getOperand(2).getReg());
380 for (
auto V :
I.getOperand(3).getShuffleMask())
384 case TargetOpcode::G_MEMMOVE:
385 case TargetOpcode::G_MEMCPY:
386 case TargetOpcode::G_MEMSET:
387 return selectMemOperation(ResVReg,
I);
389 case TargetOpcode::G_ICMP:
390 return selectICmp(ResVReg, ResType,
I);
391 case TargetOpcode::G_FCMP:
392 return selectFCmp(ResVReg, ResType,
I);
394 case TargetOpcode::G_FRAME_INDEX:
395 return selectFrameIndex(ResVReg, ResType,
I);
397 case TargetOpcode::G_LOAD:
398 return selectLoad(ResVReg, ResType,
I);
399 case TargetOpcode::G_STORE:
400 return selectStore(
I);
402 case TargetOpcode::G_BR:
403 return selectBranch(
I);
404 case TargetOpcode::G_BRCOND:
405 return selectBranchCond(
I);
407 case TargetOpcode::G_PHI:
408 return selectPhi(ResVReg, ResType,
I);
410 case TargetOpcode::G_FPTOSI:
411 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
412 case TargetOpcode::G_FPTOUI:
413 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
415 case TargetOpcode::G_SITOFP:
416 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
417 case TargetOpcode::G_UITOFP:
418 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
420 case TargetOpcode::G_CTPOP:
421 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
422 case TargetOpcode::G_SMIN:
423 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
424 case TargetOpcode::G_UMIN:
425 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
427 case TargetOpcode::G_SMAX:
428 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
429 case TargetOpcode::G_UMAX:
430 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
432 case TargetOpcode::G_FMA:
433 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
435 case TargetOpcode::G_FPOW:
436 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
437 case TargetOpcode::G_FPOWI:
438 return selectExtInst(ResVReg, ResType,
I, CL::pown);
440 case TargetOpcode::G_FEXP:
441 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
442 case TargetOpcode::G_FEXP2:
443 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
445 case TargetOpcode::G_FLOG:
446 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
447 case TargetOpcode::G_FLOG2:
448 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
449 case TargetOpcode::G_FLOG10:
450 return selectLog10(ResVReg, ResType,
I);
452 case TargetOpcode::G_FABS:
453 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
454 case TargetOpcode::G_ABS:
455 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
457 case TargetOpcode::G_FMINNUM:
458 case TargetOpcode::G_FMINIMUM:
459 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
460 case TargetOpcode::G_FMAXNUM:
461 case TargetOpcode::G_FMAXIMUM:
462 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
464 case TargetOpcode::G_FCOPYSIGN:
465 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
467 case TargetOpcode::G_FCEIL:
468 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
469 case TargetOpcode::G_FFLOOR:
470 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
472 case TargetOpcode::G_FCOS:
473 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
474 case TargetOpcode::G_FSIN:
475 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
476 case TargetOpcode::G_FTAN:
477 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
478 case TargetOpcode::G_FACOS:
479 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
480 case TargetOpcode::G_FASIN:
481 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
482 case TargetOpcode::G_FATAN:
483 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
484 case TargetOpcode::G_FCOSH:
485 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
486 case TargetOpcode::G_FSINH:
487 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
488 case TargetOpcode::G_FTANH:
489 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
491 case TargetOpcode::G_FSQRT:
492 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
494 case TargetOpcode::G_CTTZ:
495 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
496 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
497 case TargetOpcode::G_CTLZ:
498 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
499 return selectExtInst(ResVReg, ResType,
I, CL::clz);
501 case TargetOpcode::G_INTRINSIC_ROUND:
502 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
503 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
504 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
505 case TargetOpcode::G_INTRINSIC_TRUNC:
506 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
507 case TargetOpcode::G_FRINT:
508 case TargetOpcode::G_FNEARBYINT:
509 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
511 case TargetOpcode::G_SMULH:
512 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
513 case TargetOpcode::G_UMULH:
514 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
516 case TargetOpcode::G_SADDSAT:
517 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
518 case TargetOpcode::G_UADDSAT:
519 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
520 case TargetOpcode::G_SSUBSAT:
521 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
522 case TargetOpcode::G_USUBSAT:
523 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
525 case TargetOpcode::G_SEXT:
526 return selectExt(ResVReg, ResType,
I,
true);
527 case TargetOpcode::G_ANYEXT:
528 case TargetOpcode::G_ZEXT:
529 return selectExt(ResVReg, ResType,
I,
false);
530 case TargetOpcode::G_TRUNC:
531 return selectTrunc(ResVReg, ResType,
I);
532 case TargetOpcode::G_FPTRUNC:
533 case TargetOpcode::G_FPEXT:
534 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
536 case TargetOpcode::G_PTRTOINT:
537 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
538 case TargetOpcode::G_INTTOPTR:
539 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
540 case TargetOpcode::G_BITCAST:
541 return selectBitcast(ResVReg, ResType,
I);
542 case TargetOpcode::G_ADDRSPACE_CAST:
543 return selectAddrSpaceCast(ResVReg, ResType,
I);
544 case TargetOpcode::G_PTR_ADD: {
549 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
553 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
554 (*II).getOpcode() == TargetOpcode::COPY ||
555 (*II).getOpcode() == SPIRV::OpVariable) &&
557 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
559 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
561 .
addUse(GR.getSPIRVTypeID(ResType))
563 SPIRV::Opcode::InBoundsPtrAccessChain))
566 .
addUse(
I.getOperand(2).getReg());
570 case TargetOpcode::G_ATOMICRMW_OR:
571 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
572 case TargetOpcode::G_ATOMICRMW_ADD:
573 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
574 case TargetOpcode::G_ATOMICRMW_AND:
575 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
576 case TargetOpcode::G_ATOMICRMW_MAX:
577 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
578 case TargetOpcode::G_ATOMICRMW_MIN:
579 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
580 case TargetOpcode::G_ATOMICRMW_SUB:
581 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
582 case TargetOpcode::G_ATOMICRMW_XOR:
583 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
584 case TargetOpcode::G_ATOMICRMW_UMAX:
585 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
586 case TargetOpcode::G_ATOMICRMW_UMIN:
587 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
588 case TargetOpcode::G_ATOMICRMW_XCHG:
589 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
590 case TargetOpcode::G_ATOMIC_CMPXCHG:
591 return selectAtomicCmpXchg(ResVReg, ResType,
I);
593 case TargetOpcode::G_ATOMICRMW_FADD:
594 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
595 case TargetOpcode::G_ATOMICRMW_FSUB:
597 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
599 case TargetOpcode::G_ATOMICRMW_FMIN:
600 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
601 case TargetOpcode::G_ATOMICRMW_FMAX:
602 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
604 case TargetOpcode::G_FENCE:
605 return selectFence(
I);
607 case TargetOpcode::G_STACKSAVE:
608 return selectStackSave(ResVReg, ResType,
I);
609 case TargetOpcode::G_STACKRESTORE:
610 return selectStackRestore(
I);
612 case TargetOpcode::G_UNMERGE_VALUES:
620bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
623 CL::OpenCLExtInst CLInst)
const {
624 return selectExtInst(ResVReg, ResType,
I,
625 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
628bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
631 CL::OpenCLExtInst CLInst,
632 GL::GLSLExtInst GLInst)
const {
633 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
634 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
635 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
638bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
643 for (
const auto &Ex : Insts) {
644 SPIRV::InstructionSet::InstructionSet Set = Ex.first;
646 if (STI.canUseExtInstSet(Set)) {
648 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
650 .
addUse(GR.getSPIRVTypeID(ResType))
653 const unsigned NumOps =
I.getNumOperands();
654 for (
unsigned i = 1; i < NumOps; ++i)
655 MIB.add(
I.getOperand(i));
656 return MIB.constrainAllUses(
TII,
TRI, RBI);
662bool SPIRVInstructionSelector::selectUnOpWithSrc(
Register ResVReg,
666 unsigned Opcode)
const {
667 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
669 .
addUse(GR.getSPIRVTypeID(ResType))
674bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
677 unsigned Opcode)
const {
678 if (STI.isOpenCLEnv() &&
I.getOperand(1).isReg()) {
679 Register SrcReg =
I.getOperand(1).getReg();
682 MRI->def_instr_begin(SrcReg);
683 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
684 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
692 case SPIRV::OpConvertPtrToU:
693 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
695 case SPIRV::OpConvertUToPtr:
696 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
700 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
701 TII.get(SPIRV::OpSpecConstantOp))
703 .
addUse(GR.getSPIRVTypeID(ResType))
709 return selectUnOpWithSrc(ResVReg, ResType,
I,
I.getOperand(1).getReg(),
713bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
716 Register OpReg =
I.getOperand(1).getReg();
717 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
718 if (!GR.isBitcastCompatible(ResType, OpType))
720 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
726 return SPIRV::Scope::Invocation;
728 return SPIRV::Scope::Device;
730 return SPIRV::Scope::Workgroup;
732 return SPIRV::Scope::CrossDevice;
734 return SPIRV::Scope::Subgroup;
743 return SPIRV::Scope::Device;
749 if (
MemOp->isVolatile())
750 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
751 if (
MemOp->isNonTemporal())
752 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
753 if (
MemOp->getAlign().value())
754 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
756 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
758 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
765 if (Flags & MachineMemOperand::Flags::MOVolatile)
766 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
767 if (Flags & MachineMemOperand::Flags::MONonTemporal)
768 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
770 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
774bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
777 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
779 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
781 .
addUse(GR.getSPIRVTypeID(ResType))
783 if (!
I.getNumMemOperands()) {
784 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
786 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
791 return MIB.constrainAllUses(
TII,
TRI, RBI);
794bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
795 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
796 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
799 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
802 if (!
I.getNumMemOperands()) {
803 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
805 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
810 return MIB.constrainAllUses(
TII,
TRI, RBI);
813bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
816 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
818 "llvm.stacksave intrinsic: this instruction requires the following "
819 "SPIR-V extension: SPV_INTEL_variable_length_array",
822 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
824 .
addUse(GR.getSPIRVTypeID(ResType))
828bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
829 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
831 "llvm.stackrestore intrinsic: this instruction requires the following "
832 "SPIR-V extension: SPV_INTEL_variable_length_array",
834 if (!
I.getOperand(0).isReg())
837 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
838 .
addUse(
I.getOperand(0).getReg())
842bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
845 Register SrcReg =
I.getOperand(1).getReg();
846 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
847 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
850 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
851 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
853 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
854 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
864 GR.add(GV, GR.CurMF, VarReg);
867 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
869 .
addUse(GR.getSPIRVTypeID(VarTy))
870 .
addImm(SPIRV::StorageClass::UniformConstant)
873 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
874 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
876 selectUnOpWithSrc(SrcReg, SourceTy,
I, VarReg, SPIRV::OpBitcast);
878 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
879 .
addUse(
I.getOperand(0).getReg())
881 .
addUse(
I.getOperand(2).getReg());
882 if (
I.getNumMemOperands())
885 if (ResVReg.
isValid() && ResVReg != MIB->getOperand(0).getReg())
886 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY), ResVReg)
887 .
addUse(MIB->getOperand(0).getReg());
891bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
895 unsigned NegateOpcode)
const {
900 Register ScopeReg = buildI32Constant(Scope,
I);
908 Register MemSemReg = buildI32Constant(MemSem ,
I);
911 Register ValueReg =
I.getOperand(2).getReg();
912 if (NegateOpcode != 0) {
914 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
915 Result |= selectUnOpWithSrc(TmpReg, ResType,
I, ValueReg, NegateOpcode);
921 .
addUse(GR.getSPIRVTypeID(ResType))
930bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
931 unsigned ArgI =
I.getNumOperands() - 1;
933 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
935 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
936 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
938 "cannot select G_UNMERGE_VALUES with a non-vector argument");
944 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
945 Register ResVReg =
I.getOperand(i).getReg();
946 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
949 ResType = ScalarType;
950 MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
951 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
952 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
955 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
957 .
addUse(GR.getSPIRVTypeID(ResType))
959 .
addImm(
static_cast<int64_t
>(i));
960 Res |= MIB.constrainAllUses(
TII,
TRI, RBI);
965bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
968 Register MemSemReg = buildI32Constant(MemSem,
I);
971 Register ScopeReg = buildI32Constant(Scope,
I);
973 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
979bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
986 if (!isa<GIntrinsic>(
I)) {
991 ScopeReg = buildI32Constant(Scope,
I);
993 unsigned ScSem =
static_cast<uint32_t>(
997 MemSemEqReg = buildI32Constant(MemSemEq,
I);
1001 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq,
I);
1003 ScopeReg =
I.getOperand(5).getReg();
1004 MemSemEqReg =
I.getOperand(6).getReg();
1005 MemSemNeqReg =
I.getOperand(7).getReg();
1009 Register Val =
I.getOperand(4).getReg();
1010 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1011 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1014 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1016 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1024 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1028 .
addUse(GR.getSPIRVTypeID(BoolTy))
1032 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1035 .
addUse(GR.getSPIRVTypeID(ResType))
1037 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1042 .
addUse(GR.getSPIRVTypeID(ResType))
1052 case SPIRV::StorageClass::Workgroup:
1053 case SPIRV::StorageClass::CrossWorkgroup:
1054 case SPIRV::StorageClass::Function:
1063 case SPIRV::StorageClass::DeviceOnlyINTEL:
1064 case SPIRV::StorageClass::HostOnlyINTEL:
1076bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1081 auto UIs =
MRI->use_instructions(ResVReg);
1082 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1083 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1084 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1086 Register NewReg =
I.getOperand(1).getReg();
1088 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1089 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy,
I,
TII,
1090 SPIRV::StorageClass::Generic);
1092 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1094 .
addUse(GR.getSPIRVTypeID(ResType))
1100 Register SrcPtr =
I.getOperand(1).getReg();
1101 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1102 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1103 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1107 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1108 TII.get(TargetOpcode::COPY))
1115 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1118 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1121 Register Tmp =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1122 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1123 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1128 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1133 .
addUse(GR.getSPIRVTypeID(ResType))
1141 return selectUnOp(ResVReg, ResType,
I,
1142 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1144 return selectUnOp(ResVReg, ResType,
I,
1145 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1147 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1149 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1159 return SPIRV::OpFOrdEqual;
1161 return SPIRV::OpFOrdGreaterThanEqual;
1163 return SPIRV::OpFOrdGreaterThan;
1165 return SPIRV::OpFOrdLessThanEqual;
1167 return SPIRV::OpFOrdLessThan;
1169 return SPIRV::OpFOrdNotEqual;
1171 return SPIRV::OpOrdered;
1173 return SPIRV::OpFUnordEqual;
1175 return SPIRV::OpFUnordGreaterThanEqual;
1177 return SPIRV::OpFUnordGreaterThan;
1179 return SPIRV::OpFUnordLessThanEqual;
1181 return SPIRV::OpFUnordLessThan;
1183 return SPIRV::OpFUnordNotEqual;
1185 return SPIRV::OpUnordered;
1195 return SPIRV::OpIEqual;
1197 return SPIRV::OpINotEqual;
1199 return SPIRV::OpSGreaterThanEqual;
1201 return SPIRV::OpSGreaterThan;
1203 return SPIRV::OpSLessThanEqual;
1205 return SPIRV::OpSLessThan;
1207 return SPIRV::OpUGreaterThanEqual;
1209 return SPIRV::OpUGreaterThan;
1211 return SPIRV::OpULessThanEqual;
1213 return SPIRV::OpULessThan;
1222 return SPIRV::OpPtrEqual;
1224 return SPIRV::OpPtrNotEqual;
1235 return SPIRV::OpLogicalEqual;
1237 return SPIRV::OpLogicalNotEqual;
1243bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1246 unsigned OpAnyOrAll)
const {
1247 assert(
I.getNumOperands() == 3);
1248 assert(
I.getOperand(2).isReg());
1250 Register InputRegister =
I.getOperand(2).getReg();
1251 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1256 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1257 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1258 if (IsBoolTy && !IsVectorTy) {
1259 assert(ResVReg ==
I.getOperand(0).getReg());
1260 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1261 TII.get(TargetOpcode::COPY))
1267 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1268 unsigned SpirvNotEqualId =
1269 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1270 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1275 NotEqualReg = IsBoolTy ? InputRegister
1276 :
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1278 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1283 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1285 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SpirvNotEqualId))
1287 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1296 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(OpAnyOrAll))
1298 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1303bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1306 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1309bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1312 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1315bool SPIRVInstructionSelector::selectFmix(
Register ResVReg,
1319 assert(
I.getNumOperands() == 5);
1320 assert(
I.getOperand(2).isReg());
1321 assert(
I.getOperand(3).isReg());
1322 assert(
I.getOperand(4).isReg());
1325 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1327 .
addUse(GR.getSPIRVTypeID(ResType))
1328 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1330 .
addUse(
I.getOperand(2).getReg())
1331 .
addUse(
I.getOperand(3).getReg())
1332 .
addUse(
I.getOperand(4).getReg())
1336bool SPIRVInstructionSelector::selectFrac(
Register ResVReg,
1340 assert(
I.getNumOperands() == 3);
1341 assert(
I.getOperand(2).isReg());
1344 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1346 .
addUse(GR.getSPIRVTypeID(ResType))
1347 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1349 .
addUse(
I.getOperand(2).getReg())
1353bool SPIRVInstructionSelector::selectRsqrt(
Register ResVReg,
1357 assert(
I.getNumOperands() == 3);
1358 assert(
I.getOperand(2).isReg());
1361 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1363 .
addUse(GR.getSPIRVTypeID(ResType))
1364 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1366 .
addUse(
I.getOperand(2).getReg())
1370bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
1374 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
1376 .
addUse(GR.getSPIRVTypeID(ResType))
1377 .
addUse(
I.getOperand(1).getReg())
1381bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
1389 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
1391 Register OpReg =
I.getOperand(1).getReg();
1394 switch (
Def->getOpcode()) {
1395 case SPIRV::ASSIGN_TYPE:
1397 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
1398 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1399 Reg =
Def->getOperand(2).getReg();
1402 case SPIRV::OpUndef:
1403 Reg =
Def->getOperand(1).getReg();
1406 unsigned DestOpCode;
1407 if (
Reg.isValid()) {
1408 DestOpCode = SPIRV::OpConstantNull;
1410 DestOpCode = TargetOpcode::COPY;
1413 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
1414 .
addDef(
I.getOperand(0).getReg())
1421bool SPIRVInstructionSelector::selectConstVector(
Register ResVReg,
1431 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1432 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1433 ConstTy->getOperand(1).isReg());
1434 Register ConstReg = ConstTy->getOperand(1).getReg();
1435 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1437 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1438 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1441 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1442 TII.get(SPIRV::OpConstantComposite))
1444 .
addUse(GR.getSPIRVTypeID(ResType));
1445 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
1446 MIB.
addUse(
I.getOperand(i).getReg());
1456 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1461 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
1470 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1482 case TargetOpcode::G_CONSTANT:
1483 case TargetOpcode::G_FCONSTANT:
1485 case TargetOpcode::G_INTRINSIC:
1486 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1487 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
1488 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
1489 Intrinsic::spv_const_composite;
1490 case TargetOpcode::G_BUILD_VECTOR:
1491 case TargetOpcode::G_SPLAT_VECTOR: {
1515bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
1519 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1520 N = GR.getScalarOrVectorComponentCount(ResType);
1521 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
1526 unsigned OpIdx =
I.getNumExplicitDefs();
1527 if (!
I.getOperand(OpIdx).isReg())
1531 Register OpReg =
I.getOperand(OpIdx).getReg();
1534 if (!IsConst &&
N < 2)
1536 "There must be at least two constituent operands in a vector");
1538 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1539 TII.get(IsConst ? SPIRV::OpConstantComposite
1540 : SPIRV::OpCompositeConstruct))
1542 .
addUse(GR.getSPIRVTypeID(ResType));
1543 for (
unsigned i = 0; i <
N; ++i)
1548bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
1552 Register Cmp0 =
I.getOperand(2).getReg();
1553 Register Cmp1 =
I.getOperand(3).getReg();
1554 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1555 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1556 "CMP operands should have the same type");
1557 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
1559 .
addUse(GR.getSPIRVTypeID(ResType))
1565bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
1568 auto Pred =
I.getOperand(1).getPredicate();
1571 Register CmpOperand =
I.getOperand(2).getReg();
1572 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1574 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1578 return selectCmp(ResVReg, ResType, CmpOpc,
I);
1584 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1585 "Expected G_FCONSTANT");
1586 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
1593 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1594 "Expected G_CONSTANT");
1595 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
1603 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
1605 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1606 Register NewReg = GR.find(ConstInt, GR.CurMF);
1609 GR.add(ConstInt, GR.CurMF, NewReg);
1613 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1615 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
1617 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1619 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
1627bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
1631 return selectCmp(ResVReg, ResType, CmpOp,
I);
1637 bool ZeroAsNull = STI.isOpenCLEnv();
1638 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1639 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
1640 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
1660 bool ZeroAsNull = STI.isOpenCLEnv();
1662 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1663 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
1664 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
1670 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1673 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1678bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
1681 bool IsSigned)
const {
1683 Register ZeroReg = buildZerosVal(ResType,
I);
1684 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
1686 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1688 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1689 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1691 .
addUse(GR.getSPIRVTypeID(ResType))
1692 .
addUse(
I.getOperand(1).getReg())
1698bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
1701 unsigned Opcode)
const {
1702 Register SrcReg =
I.getOperand(1).getReg();
1705 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1706 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1708 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
1710 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
1712 SrcReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1713 selectSelect(SrcReg, TmpType,
I,
false);
1715 return selectUnOpWithSrc(ResVReg, ResType,
I, SrcReg, Opcode);
1718bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
1721 Register SrcReg =
I.getOperand(1).getReg();
1722 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
1723 return selectSelect(ResVReg, ResType,
I, IsSigned);
1725 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
1726 if (SrcType == ResType)
1727 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1728 TII.get(TargetOpcode::COPY))
1733 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1734 return selectUnOp(ResVReg, ResType,
I, Opcode);
1737bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
1743 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1744 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
1745 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1747 Register One = buildOnesVal(
false, IntTy,
I);
1751 .
addUse(GR.getSPIRVTypeID(IntTy))
1755 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
1757 .
addUse(GR.getSPIRVTypeID(BoolTy))
1763bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
1766 Register IntReg =
I.getOperand(1).getReg();
1767 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1768 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
1769 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
1770 if (ArgType == ResType)
1771 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1772 TII.get(TargetOpcode::COPY))
1776 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1777 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1778 return selectUnOp(ResVReg, ResType,
I, Opcode);
1781bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
1785 unsigned TyOpcode = ResType->
getOpcode();
1786 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
1788 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1790 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1792 .
addUse(GR.getSPIRVTypeID(ResType))
1794 if (TyOpcode == SPIRV::OpTypeInt) {
1795 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
1799 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
1804 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1806 .
addUse(GR.getSPIRVTypeID(ResType));
1813bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
1816 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
1818 .
addUse(GR.getSPIRVTypeID(ResType))
1825 if (TypeInst->
getOpcode() == SPIRV::ASSIGN_TYPE) {
1828 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
1830 return TypeInst->
getOpcode() == SPIRV::OpConstantI;
1835 if (TypeInst->
getOpcode() == SPIRV::OpConstantI)
1842bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
1846 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
1848 .
addUse(GR.getSPIRVTypeID(ResType))
1850 .
addUse(
I.getOperand(3).getReg())
1852 .
addUse(
I.getOperand(2).getReg());
1853 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
1858bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
1862 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1864 .
addUse(GR.getSPIRVTypeID(ResType))
1865 .
addUse(
I.getOperand(2).getReg());
1866 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
1871bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
1875 return selectInsertVal(ResVReg, ResType,
I);
1877 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
1879 .
addUse(GR.getSPIRVTypeID(ResType))
1880 .
addUse(
I.getOperand(2).getReg())
1881 .
addUse(
I.getOperand(3).getReg())
1882 .
addUse(
I.getOperand(4).getReg())
1886bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
1890 return selectExtractVal(ResVReg, ResType,
I);
1892 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
1894 .
addUse(GR.getSPIRVTypeID(ResType))
1895 .
addUse(
I.getOperand(2).getReg())
1896 .
addUse(
I.getOperand(3).getReg())
1900bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
1903 const bool IsGEPInBounds =
I.getOperand(2).getImm();
1908 const unsigned Opcode = STI.isVulkanEnv()
1909 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1910 : SPIRV::OpAccessChain)
1911 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1912 : SPIRV::OpPtrAccessChain);
1914 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1916 .
addUse(GR.getSPIRVTypeID(ResType))
1918 .
addUse(
I.getOperand(3).getReg());
1920 const unsigned StartingIndex =
1921 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1924 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
1925 Res.addUse(
I.getOperand(i).getReg());
1926 return Res.constrainAllUses(
TII,
TRI, RBI);
1930bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1933 unsigned Lim =
I.getNumExplicitOperands();
1934 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1935 Register OpReg =
I.getOperand(i).getReg();
1937 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1939 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
1940 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
1941 GR.isAggregateType(OpType)) {
1948 Register WrapReg = GR.find(OpDefine, MF);
1954 WrapReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1955 GR.add(OpDefine, MF, WrapReg);
1959 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1963 .
addUse(GR.getSPIRVTypeID(OpType))
1973bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
1979 case Intrinsic::spv_load:
1980 return selectLoad(ResVReg, ResType,
I);
1981 case Intrinsic::spv_store:
1982 return selectStore(
I);
1983 case Intrinsic::spv_extractv:
1984 return selectExtractVal(ResVReg, ResType,
I);
1985 case Intrinsic::spv_insertv:
1986 return selectInsertVal(ResVReg, ResType,
I);
1987 case Intrinsic::spv_extractelt:
1988 return selectExtractElt(ResVReg, ResType,
I);
1989 case Intrinsic::spv_insertelt:
1990 return selectInsertElt(ResVReg, ResType,
I);
1991 case Intrinsic::spv_gep:
1992 return selectGEP(ResVReg, ResType,
I);
1993 case Intrinsic::spv_unref_global:
1994 case Intrinsic::spv_init_global: {
1997 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
2000 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
2002 case Intrinsic::spv_undef: {
2003 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2005 .
addUse(GR.getSPIRVTypeID(ResType));
2008 case Intrinsic::spv_const_composite: {
2010 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
2012 unsigned Opcode = SPIRV::OpConstantNull;
2015 Opcode = SPIRV::OpConstantComposite;
2016 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
2019 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2021 .
addUse(GR.getSPIRVTypeID(ResType));
2024 for (
Register OpReg : CompositeArgs)
2029 case Intrinsic::spv_assign_name: {
2030 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
2031 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
2032 for (
unsigned i =
I.getNumExplicitDefs() + 2;
2033 i <
I.getNumExplicitOperands(); ++i) {
2034 MIB.
addImm(
I.getOperand(i).getImm());
2038 case Intrinsic::spv_switch: {
2039 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
2040 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2041 if (
I.getOperand(i).isReg())
2042 MIB.
addReg(
I.getOperand(i).getReg());
2043 else if (
I.getOperand(i).isCImm())
2044 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
2045 else if (
I.getOperand(i).isMBB())
2046 MIB.
addMBB(
I.getOperand(i).getMBB());
2052 case Intrinsic::spv_cmpxchg:
2053 return selectAtomicCmpXchg(ResVReg, ResType,
I);
2054 case Intrinsic::spv_unreachable:
2055 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable));
2057 case Intrinsic::spv_alloca:
2058 return selectFrameIndex(ResVReg, ResType,
I);
2059 case Intrinsic::spv_alloca_array:
2060 return selectAllocaArray(ResVReg, ResType,
I);
2061 case Intrinsic::spv_assume:
2062 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2063 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
2064 .
addUse(
I.getOperand(1).getReg());
2066 case Intrinsic::spv_expect:
2067 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2068 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
2070 .
addUse(GR.getSPIRVTypeID(ResType))
2071 .
addUse(
I.getOperand(2).getReg())
2072 .
addUse(
I.getOperand(3).getReg());
2074 case Intrinsic::spv_thread_id:
2075 return selectSpvThreadId(ResVReg, ResType,
I);
2076 case Intrinsic::spv_all:
2077 return selectAll(ResVReg, ResType,
I);
2078 case Intrinsic::spv_any:
2079 return selectAny(ResVReg, ResType,
I);
2080 case Intrinsic::spv_lerp:
2081 return selectFmix(ResVReg, ResType,
I);
2082 case Intrinsic::spv_frac:
2083 return selectFrac(ResVReg, ResType,
I);
2084 case Intrinsic::spv_rsqrt:
2085 return selectRsqrt(ResVReg, ResType,
I);
2086 case Intrinsic::spv_lifetime_start:
2087 case Intrinsic::spv_lifetime_end: {
2088 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2089 : SPIRV::OpLifetimeStop;
2090 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
2091 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
2092 unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
2093 bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
2094 if (
Size == -1 || IsNonvoidPtr)
2099 std::string DiagMsg;
2102 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
2109bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
2116 TII.get(SPIRV::OpVariableLengthArrayINTEL))
2118 .
addUse(GR.getSPIRVTypeID(ResType))
2119 .
addUse(
I.getOperand(2).getReg())
2123bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
2131 bool IsHeader =
false;
2133 for (; It != E && It !=
I; ++It) {
2134 Opcode = It->getOpcode();
2135 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2137 }
else if (IsHeader &&
2138 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2143 return BuildMI(*
MBB, It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
2145 .
addUse(GR.getSPIRVTypeID(ResType))
2150bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
2157 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
2158 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
2161 .
addMBB(
I.getOperand(0).getMBB())
2165 .
addMBB(
I.getOperand(0).getMBB())
2169bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
2182 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
2189 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
2190 .
addUse(
I.getOperand(0).getReg())
2191 .
addMBB(
I.getOperand(1).getMBB())
2196bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
2199 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
2201 .
addUse(GR.getSPIRVTypeID(ResType));
2202 const unsigned NumOps =
I.getNumOperands();
2203 for (
unsigned i = 1; i < NumOps; i += 2) {
2204 MIB.
addUse(
I.getOperand(i + 0).getReg());
2205 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
2210bool SPIRVInstructionSelector::selectGlobalValue(
2220 SPIRV::AccessQualifier::ReadWrite,
false);
2221 PointerBaseType = GR.getOrCreateSPIRVArrayType(
2224 PointerBaseType = GR.getOrCreateSPIRVType(
2225 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
2227 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2228 PointerBaseType,
I,
TII,
2231 std::string GlobalIdent;
2233 unsigned &
ID = UnnamedGlobalIDs[GV];
2235 ID = UnnamedGlobalIDs.size();
2236 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
2251 if (isa<Function>(GV)) {
2254 Register NewReg = GR.find(ConstVal, GR.CurMF);
2257 GR.add(ConstVal, GR.CurMF, NewReg);
2259 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2260 ? dyn_cast<Function>(GV)
2268 MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
2271 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2273 .
addUse(GR.getSPIRVTypeID(ResType))
2279 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2281 .
addUse(GR.getSPIRVTypeID(ResType))
2284 assert(NewReg != ResVReg);
2285 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
2290 auto GlobalVar = cast<GlobalVariable>(GV);
2293 bool HasInit =
GlobalVar->hasInitializer() &&
2294 !isa<UndefValue>(
GlobalVar->getInitializer());
2297 if (HasInit && !
Init)
2301 SPIRV::StorageClass::StorageClass Storage =
2304 Storage != SPIRV::StorageClass::Function;
2305 SPIRV::LinkageType::LinkageType LnkType =
2307 ? SPIRV::LinkageType::Import
2309 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2310 ? SPIRV::LinkageType::LinkOnceODR
2311 : SPIRV::LinkageType::Export);
2313 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2315 HasLnkTy, LnkType, MIRBuilder,
true);
2316 return Reg.isValid();
2319bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
2322 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2323 return selectExtInst(ResVReg, ResType,
I, CL::log10);
2335 Register VarReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2337 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2339 .
addUse(GR.getSPIRVTypeID(ResType))
2340 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2342 .
add(
I.getOperand(1))
2347 ResType->
getOpcode() == SPIRV::OpTypeFloat);
2350 ResType->
getOpcode() == SPIRV::OpTypeVector
2354 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2357 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
2358 ? SPIRV::OpVectorTimesScalar
2362 .
addUse(GR.getSPIRVTypeID(ResType))
2370bool SPIRVInstructionSelector::selectSpvThreadId(
Register ResVReg,
2378 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2380 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2381 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2382 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2386 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2387 MIRBuilder.getMRI()->setType(NewRegister,
LLT::pointer(0, 32));
2388 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2391 Register Variable = GR.buildGlobalVariable(
2392 NewRegister, PtrType,
2394 SPIRV::StorageClass::Input,
nullptr,
true,
true,
2395 SPIRV::LinkageType::Import, MIRBuilder,
false);
2399 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2400 MIRBuilder.getMRI()->setType(LoadedRegister,
LLT::pointer(0, 32));
2401 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2404 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
2406 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
2411 assert(
I.getOperand(2).isReg());
2412 Register ThreadIdReg =
I.getOperand(2).getReg();
2418 assert(Const &&
Const->getOpcode() == TargetOpcode::G_CONSTANT);
2424 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2426 .
addUse(GR.getSPIRVTypeID(ResType))
2437 return new SPIRVInstructionSelector(
TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, SPIRVMachineModuleInfo *MMI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
LinkageTypes getLinkage() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
bool hasAvailableExternallyLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
This is an important class for using LLVM in a threaded context.
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineModuleInfo & getMMI() const
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
This class can be derived from and used by targets to hold private target-specific information for ea...
This class contains meta information specific to a module.
const Module * getModule() const
Ty & getObjFileInfo()
Keep track of various per-module pieces of information for backends that would like to do so.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
LLVMContext & getContext() const
Get the global data context.
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
SyncScope::ID SubGroupSSID
SPIRVMachineModuleInfo(const MachineModuleInfo &MMI)
SyncScope::ID AllSVMDevicesSSID
SyncScope::ID Work_ItemSSID
SyncScope::ID WorkGroupSSID
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Type * toTypedPointer(Type *Ty)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)