32#include "llvm/IR/IntrinsicsSPIRV.h"
56#define DEBUG_TYPE "spirv-isel"
59namespace CL = SPIRV::OpenCLExtInst;
60namespace GL = SPIRV::GLSLExtInst;
63 std::vector<std::pair<SPIRV::InstructionSet::InstructionSet, uint32_t>>;
67#define GET_GLOBALISEL_PREDICATE_BITSET
68#include "SPIRVGenGlobalISel.inc"
69#undef GET_GLOBALISEL_PREDICATE_BITSET
95#define GET_GLOBALISEL_PREDICATES_DECL
96#include "SPIRVGenGlobalISel.inc"
97#undef GET_GLOBALISEL_PREDICATES_DECL
99#define GET_GLOBALISEL_TEMPORARIES_DECL
100#include "SPIRVGenGlobalISel.inc"
101#undef GET_GLOBALISEL_TEMPORARIES_DECL
118 unsigned Opcode)
const;
120 unsigned Opcode)
const;
137 unsigned NegateOpcode = 0)
const;
190 bool IsSigned)
const;
192 bool IsSigned,
unsigned Opcode)
const;
194 bool IsSigned)
const;
234 GL::GLSLExtInst GLInst)
const;
247 const SPIRVType *ResType =
nullptr)
const;
260#define GET_GLOBALISEL_IMPL
261#include "SPIRVGenGlobalISel.inc"
262#undef GET_GLOBALISEL_IMPL
268 TRI(*ST.getRegisterInfo()), RBI(RBI), GR(*ST.getSPIRVGlobalRegistry()),
270#include
"SPIRVGenGlobalISel.inc"
273#include
"SPIRVGenGlobalISel.inc"
284 GR.setCurrentFunc(MF);
285 InstructionSelector::setupMF(MF, KB, CoverageInfo, PSI, BFI);
294 assert(
I.getParent() &&
"Instruction should be in a basic block!");
295 assert(
I.getParent()->getParent() &&
"Instruction should be in a function!");
300 if (Opcode == SPIRV::ASSIGN_TYPE) {
301 Register DstReg =
I.getOperand(0).getReg();
302 Register SrcReg =
I.getOperand(1).getReg();
303 auto *
Def =
MRI->getVRegDef(SrcReg);
305 if (
MRI->getType(DstReg).isPointer())
307 bool Res = selectImpl(
I, *CoverageInfo);
308 assert(Res ||
Def->getOpcode() == TargetOpcode::G_CONSTANT);
312 MRI->replaceRegWith(SrcReg, DstReg);
313 I.removeFromParent();
315 }
else if (
I.getNumDefs() == 1) {
322 if (
I.getNumOperands() !=
I.getNumExplicitOperands()) {
323 LLVM_DEBUG(
errs() <<
"Generic instr has unexpected implicit operands\n");
329 bool HasDefs =
I.getNumDefs() > 0;
331 SPIRVType *ResType = HasDefs ? GR.getSPIRVTypeForVReg(ResVReg) :
nullptr;
332 assert(!HasDefs || ResType ||
I.getOpcode() == TargetOpcode::G_GLOBAL_VALUE);
333 if (spvSelect(ResVReg, ResType,
I)) {
335 for (
unsigned i = 0; i <
I.getNumDefs(); ++i)
337 I.removeFromParent();
343bool SPIRVInstructionSelector::spvSelect(
Register ResVReg,
346 const unsigned Opcode =
I.getOpcode();
348 return selectImpl(
I, *CoverageInfo);
350 case TargetOpcode::G_CONSTANT:
351 return selectConst(ResVReg, ResType,
I.getOperand(1).getCImm()->getValue(),
353 case TargetOpcode::G_GLOBAL_VALUE:
354 return selectGlobalValue(ResVReg,
I);
355 case TargetOpcode::G_IMPLICIT_DEF:
356 return selectOpUndef(ResVReg, ResType,
I);
357 case TargetOpcode::G_FREEZE:
358 return selectFreeze(ResVReg, ResType,
I);
360 case TargetOpcode::G_INTRINSIC:
361 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
362 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
363 return selectIntrinsic(ResVReg, ResType,
I);
364 case TargetOpcode::G_BITREVERSE:
365 return selectBitreverse(ResVReg, ResType,
I);
367 case TargetOpcode::G_BUILD_VECTOR:
368 return selectConstVector(ResVReg, ResType,
I);
369 case TargetOpcode::G_SPLAT_VECTOR:
370 return selectSplatVector(ResVReg, ResType,
I);
372 case TargetOpcode::G_SHUFFLE_VECTOR: {
374 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorShuffle))
376 .
addUse(GR.getSPIRVTypeID(ResType))
377 .
addUse(
I.getOperand(1).getReg())
378 .
addUse(
I.getOperand(2).getReg());
379 for (
auto V :
I.getOperand(3).getShuffleMask())
383 case TargetOpcode::G_MEMMOVE:
384 case TargetOpcode::G_MEMCPY:
385 case TargetOpcode::G_MEMSET:
386 return selectMemOperation(ResVReg,
I);
388 case TargetOpcode::G_ICMP:
389 return selectICmp(ResVReg, ResType,
I);
390 case TargetOpcode::G_FCMP:
391 return selectFCmp(ResVReg, ResType,
I);
393 case TargetOpcode::G_FRAME_INDEX:
394 return selectFrameIndex(ResVReg, ResType,
I);
396 case TargetOpcode::G_LOAD:
397 return selectLoad(ResVReg, ResType,
I);
398 case TargetOpcode::G_STORE:
399 return selectStore(
I);
401 case TargetOpcode::G_BR:
402 return selectBranch(
I);
403 case TargetOpcode::G_BRCOND:
404 return selectBranchCond(
I);
406 case TargetOpcode::G_PHI:
407 return selectPhi(ResVReg, ResType,
I);
409 case TargetOpcode::G_FPTOSI:
410 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToS);
411 case TargetOpcode::G_FPTOUI:
412 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertFToU);
414 case TargetOpcode::G_SITOFP:
415 return selectIToF(ResVReg, ResType,
I,
true, SPIRV::OpConvertSToF);
416 case TargetOpcode::G_UITOFP:
417 return selectIToF(ResVReg, ResType,
I,
false, SPIRV::OpConvertUToF);
419 case TargetOpcode::G_CTPOP:
420 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitCount);
421 case TargetOpcode::G_SMIN:
422 return selectExtInst(ResVReg, ResType,
I, CL::s_min, GL::SMin);
423 case TargetOpcode::G_UMIN:
424 return selectExtInst(ResVReg, ResType,
I, CL::u_min, GL::UMin);
426 case TargetOpcode::G_SMAX:
427 return selectExtInst(ResVReg, ResType,
I, CL::s_max, GL::SMax);
428 case TargetOpcode::G_UMAX:
429 return selectExtInst(ResVReg, ResType,
I, CL::u_max, GL::UMax);
431 case TargetOpcode::G_FMA:
432 return selectExtInst(ResVReg, ResType,
I, CL::fma, GL::Fma);
434 case TargetOpcode::G_FPOW:
435 return selectExtInst(ResVReg, ResType,
I, CL::pow, GL::Pow);
436 case TargetOpcode::G_FPOWI:
437 return selectExtInst(ResVReg, ResType,
I, CL::pown);
439 case TargetOpcode::G_FEXP:
440 return selectExtInst(ResVReg, ResType,
I, CL::exp, GL::Exp);
441 case TargetOpcode::G_FEXP2:
442 return selectExtInst(ResVReg, ResType,
I, CL::exp2, GL::Exp2);
444 case TargetOpcode::G_FLOG:
445 return selectExtInst(ResVReg, ResType,
I, CL::log, GL::Log);
446 case TargetOpcode::G_FLOG2:
447 return selectExtInst(ResVReg, ResType,
I, CL::log2, GL::Log2);
448 case TargetOpcode::G_FLOG10:
449 return selectLog10(ResVReg, ResType,
I);
451 case TargetOpcode::G_FABS:
452 return selectExtInst(ResVReg, ResType,
I, CL::fabs, GL::FAbs);
453 case TargetOpcode::G_ABS:
454 return selectExtInst(ResVReg, ResType,
I, CL::s_abs, GL::SAbs);
456 case TargetOpcode::G_FMINNUM:
457 case TargetOpcode::G_FMINIMUM:
458 return selectExtInst(ResVReg, ResType,
I, CL::fmin, GL::NMin);
459 case TargetOpcode::G_FMAXNUM:
460 case TargetOpcode::G_FMAXIMUM:
461 return selectExtInst(ResVReg, ResType,
I, CL::fmax, GL::NMax);
463 case TargetOpcode::G_FCOPYSIGN:
464 return selectExtInst(ResVReg, ResType,
I, CL::copysign);
466 case TargetOpcode::G_FCEIL:
467 return selectExtInst(ResVReg, ResType,
I, CL::ceil, GL::Ceil);
468 case TargetOpcode::G_FFLOOR:
469 return selectExtInst(ResVReg, ResType,
I, CL::floor, GL::Floor);
471 case TargetOpcode::G_FCOS:
472 return selectExtInst(ResVReg, ResType,
I, CL::cos, GL::Cos);
473 case TargetOpcode::G_FSIN:
474 return selectExtInst(ResVReg, ResType,
I, CL::sin, GL::Sin);
475 case TargetOpcode::G_FTAN:
476 return selectExtInst(ResVReg, ResType,
I, CL::tan, GL::Tan);
477 case TargetOpcode::G_FACOS:
478 return selectExtInst(ResVReg, ResType,
I, CL::acos, GL::Acos);
479 case TargetOpcode::G_FASIN:
480 return selectExtInst(ResVReg, ResType,
I, CL::asin, GL::Asin);
481 case TargetOpcode::G_FATAN:
482 return selectExtInst(ResVReg, ResType,
I, CL::atan, GL::Atan);
483 case TargetOpcode::G_FCOSH:
484 return selectExtInst(ResVReg, ResType,
I, CL::cosh, GL::Cosh);
485 case TargetOpcode::G_FSINH:
486 return selectExtInst(ResVReg, ResType,
I, CL::sinh, GL::Sinh);
487 case TargetOpcode::G_FTANH:
488 return selectExtInst(ResVReg, ResType,
I, CL::tanh, GL::Tanh);
490 case TargetOpcode::G_FSQRT:
491 return selectExtInst(ResVReg, ResType,
I, CL::sqrt, GL::Sqrt);
493 case TargetOpcode::G_CTTZ:
494 case TargetOpcode::G_CTTZ_ZERO_UNDEF:
495 return selectExtInst(ResVReg, ResType,
I, CL::ctz);
496 case TargetOpcode::G_CTLZ:
497 case TargetOpcode::G_CTLZ_ZERO_UNDEF:
498 return selectExtInst(ResVReg, ResType,
I, CL::clz);
500 case TargetOpcode::G_INTRINSIC_ROUND:
501 return selectExtInst(ResVReg, ResType,
I, CL::round, GL::Round);
502 case TargetOpcode::G_INTRINSIC_ROUNDEVEN:
503 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
504 case TargetOpcode::G_INTRINSIC_TRUNC:
505 return selectExtInst(ResVReg, ResType,
I, CL::trunc, GL::Trunc);
506 case TargetOpcode::G_FRINT:
507 case TargetOpcode::G_FNEARBYINT:
508 return selectExtInst(ResVReg, ResType,
I, CL::rint, GL::RoundEven);
510 case TargetOpcode::G_SMULH:
511 return selectExtInst(ResVReg, ResType,
I, CL::s_mul_hi);
512 case TargetOpcode::G_UMULH:
513 return selectExtInst(ResVReg, ResType,
I, CL::u_mul_hi);
515 case TargetOpcode::G_SADDSAT:
516 return selectExtInst(ResVReg, ResType,
I, CL::s_add_sat);
517 case TargetOpcode::G_UADDSAT:
518 return selectExtInst(ResVReg, ResType,
I, CL::u_add_sat);
519 case TargetOpcode::G_SSUBSAT:
520 return selectExtInst(ResVReg, ResType,
I, CL::s_sub_sat);
521 case TargetOpcode::G_USUBSAT:
522 return selectExtInst(ResVReg, ResType,
I, CL::u_sub_sat);
524 case TargetOpcode::G_SEXT:
525 return selectExt(ResVReg, ResType,
I,
true);
526 case TargetOpcode::G_ANYEXT:
527 case TargetOpcode::G_ZEXT:
528 return selectExt(ResVReg, ResType,
I,
false);
529 case TargetOpcode::G_TRUNC:
530 return selectTrunc(ResVReg, ResType,
I);
531 case TargetOpcode::G_FPTRUNC:
532 case TargetOpcode::G_FPEXT:
533 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpFConvert);
535 case TargetOpcode::G_PTRTOINT:
536 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertPtrToU);
537 case TargetOpcode::G_INTTOPTR:
538 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpConvertUToPtr);
539 case TargetOpcode::G_BITCAST:
540 return selectBitcast(ResVReg, ResType,
I);
541 case TargetOpcode::G_ADDRSPACE_CAST:
542 return selectAddrSpaceCast(ResVReg, ResType,
I);
543 case TargetOpcode::G_PTR_ADD: {
548 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
552 assert(((*II).getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
553 (*II).getOpcode() == TargetOpcode::COPY ||
554 (*II).getOpcode() == SPIRV::OpVariable) &&
556 Register Idx = buildZerosVal(GR.getOrCreateSPIRVIntegerType(32,
I,
TII),
I);
558 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
560 .
addUse(GR.getSPIRVTypeID(ResType))
562 SPIRV::Opcode::InBoundsPtrAccessChain))
565 .
addUse(
I.getOperand(2).getReg());
569 case TargetOpcode::G_ATOMICRMW_OR:
570 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicOr);
571 case TargetOpcode::G_ATOMICRMW_ADD:
572 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicIAdd);
573 case TargetOpcode::G_ATOMICRMW_AND:
574 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicAnd);
575 case TargetOpcode::G_ATOMICRMW_MAX:
576 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMax);
577 case TargetOpcode::G_ATOMICRMW_MIN:
578 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicSMin);
579 case TargetOpcode::G_ATOMICRMW_SUB:
580 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicISub);
581 case TargetOpcode::G_ATOMICRMW_XOR:
582 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicXor);
583 case TargetOpcode::G_ATOMICRMW_UMAX:
584 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMax);
585 case TargetOpcode::G_ATOMICRMW_UMIN:
586 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicUMin);
587 case TargetOpcode::G_ATOMICRMW_XCHG:
588 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicExchange);
589 case TargetOpcode::G_ATOMIC_CMPXCHG:
590 return selectAtomicCmpXchg(ResVReg, ResType,
I);
592 case TargetOpcode::G_ATOMICRMW_FADD:
593 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT);
594 case TargetOpcode::G_ATOMICRMW_FSUB:
596 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFAddEXT,
598 case TargetOpcode::G_ATOMICRMW_FMIN:
599 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMinEXT);
600 case TargetOpcode::G_ATOMICRMW_FMAX:
601 return selectAtomicRMW(ResVReg, ResType,
I, SPIRV::OpAtomicFMaxEXT);
603 case TargetOpcode::G_FENCE:
604 return selectFence(
I);
606 case TargetOpcode::G_STACKSAVE:
607 return selectStackSave(ResVReg, ResType,
I);
608 case TargetOpcode::G_STACKRESTORE:
609 return selectStackRestore(
I);
611 case TargetOpcode::G_UNMERGE_VALUES:
619bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
622 CL::OpenCLExtInst CLInst)
const {
623 return selectExtInst(ResVReg, ResType,
I,
624 {{SPIRV::InstructionSet::OpenCL_std, CLInst}});
627bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
630 CL::OpenCLExtInst CLInst,
631 GL::GLSLExtInst GLInst)
const {
632 ExtInstList ExtInsts = {{SPIRV::InstructionSet::OpenCL_std, CLInst},
633 {SPIRV::InstructionSet::GLSL_std_450, GLInst}};
634 return selectExtInst(ResVReg, ResType,
I, ExtInsts);
637bool SPIRVInstructionSelector::selectExtInst(
Register ResVReg,
642 for (
const auto &Ex : Insts) {
643 SPIRV::InstructionSet::InstructionSet
Set = Ex.first;
645 if (STI.canUseExtInstSet(Set)) {
647 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
649 .
addUse(GR.getSPIRVTypeID(ResType))
652 const unsigned NumOps =
I.getNumOperands();
653 for (
unsigned i = 1; i < NumOps; ++i)
654 MIB.add(
I.getOperand(i));
655 return MIB.constrainAllUses(
TII,
TRI, RBI);
661bool SPIRVInstructionSelector::selectUnOpWithSrc(
Register ResVReg,
665 unsigned Opcode)
const {
666 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
668 .
addUse(GR.getSPIRVTypeID(ResType))
673bool SPIRVInstructionSelector::selectUnOp(
Register ResVReg,
676 unsigned Opcode)
const {
677 if (STI.isOpenCLEnv() &&
I.getOperand(1).isReg()) {
678 Register SrcReg =
I.getOperand(1).getReg();
681 MRI->def_instr_begin(SrcReg);
682 DefIt !=
MRI->def_instr_end(); DefIt = std::next(DefIt)) {
683 if ((*DefIt).getOpcode() == TargetOpcode::G_GLOBAL_VALUE) {
691 case SPIRV::OpConvertPtrToU:
692 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertPtrToU);
694 case SPIRV::OpConvertUToPtr:
695 SpecOpcode =
static_cast<uint32_t>(SPIRV::Opcode::ConvertUToPtr);
699 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
700 TII.get(SPIRV::OpSpecConstantOp))
702 .
addUse(GR.getSPIRVTypeID(ResType))
708 return selectUnOpWithSrc(ResVReg, ResType,
I,
I.getOperand(1).getReg(),
712bool SPIRVInstructionSelector::selectBitcast(
Register ResVReg,
715 Register OpReg =
I.getOperand(1).getReg();
716 SPIRVType *OpType = OpReg.
isValid() ? GR.getSPIRVTypeForVReg(OpReg) :
nullptr;
717 if (!GR.isBitcastCompatible(ResType, OpType))
719 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpBitcast);
723 const SyncScopeIDs &SSIDs) {
725 return SPIRV::Scope::Invocation;
727 return SPIRV::Scope::Device;
728 else if (Ord == SSIDs.WorkGroupSSID)
729 return SPIRV::Scope::Workgroup;
730 else if (Ord == SSIDs.AllSVMDevicesSSID)
731 return SPIRV::Scope::CrossDevice;
732 else if (Ord == SSIDs.SubGroupSSID)
733 return SPIRV::Scope::Subgroup;
742 return SPIRV::Scope::Device;
748 if (
MemOp->isVolatile())
749 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
750 if (
MemOp->isNonTemporal())
751 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
752 if (
MemOp->getAlign().value())
753 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned);
755 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None)) {
757 if (SpvMemOp &
static_cast<uint32_t>(SPIRV::MemoryOperand::Aligned))
764 if (Flags & MachineMemOperand::Flags::MOVolatile)
765 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Volatile);
766 if (Flags & MachineMemOperand::Flags::MONonTemporal)
767 SpvMemOp |=
static_cast<uint32_t>(SPIRV::MemoryOperand::Nontemporal);
769 if (SpvMemOp !=
static_cast<uint32_t>(SPIRV::MemoryOperand::None))
773bool SPIRVInstructionSelector::selectLoad(
Register ResVReg,
776 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
778 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
780 .
addUse(GR.getSPIRVTypeID(ResType))
782 if (!
I.getNumMemOperands()) {
783 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
785 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
790 return MIB.constrainAllUses(
TII,
TRI, RBI);
793bool SPIRVInstructionSelector::selectStore(
MachineInstr &
I)
const {
794 unsigned OpOffset = isa<GIntrinsic>(
I) ? 1 : 0;
795 Register StoreVal =
I.getOperand(0 + OpOffset).getReg();
798 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpStore))
801 if (!
I.getNumMemOperands()) {
802 assert(
I.getOpcode() == TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS ||
804 TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS);
809 return MIB.constrainAllUses(
TII,
TRI, RBI);
812bool SPIRVInstructionSelector::selectStackSave(
Register ResVReg,
815 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
817 "llvm.stacksave intrinsic: this instruction requires the following "
818 "SPIR-V extension: SPV_INTEL_variable_length_array",
821 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSaveMemoryINTEL))
823 .
addUse(GR.getSPIRVTypeID(ResType))
827bool SPIRVInstructionSelector::selectStackRestore(
MachineInstr &
I)
const {
828 if (!STI.canUseExtension(SPIRV::Extension::SPV_INTEL_variable_length_array))
830 "llvm.stackrestore intrinsic: this instruction requires the following "
831 "SPIR-V extension: SPV_INTEL_variable_length_array",
833 if (!
I.getOperand(0).isReg())
836 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpRestoreMemoryINTEL))
837 .
addUse(
I.getOperand(0).getReg())
841bool SPIRVInstructionSelector::selectMemOperation(
Register ResVReg,
844 Register SrcReg =
I.getOperand(1).getReg();
845 if (
I.getOpcode() == TargetOpcode::G_MEMSET) {
846 assert(
I.getOperand(1).isReg() &&
I.getOperand(2).isReg());
849 SPIRVType *ValTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
850 SPIRVType *ArrTy = GR.getOrCreateSPIRVArrayType(ValTy, Num,
I,
TII);
852 SPIRVType *VarTy = GR.getOrCreateSPIRVPointerType(
853 ArrTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
863 GR.add(GV, GR.CurMF, VarReg);
866 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVariable))
868 .
addUse(GR.getSPIRVTypeID(VarTy))
869 .
addImm(SPIRV::StorageClass::UniformConstant)
872 SPIRVType *SourceTy = GR.getOrCreateSPIRVPointerType(
873 ValTy,
I,
TII, SPIRV::StorageClass::UniformConstant);
875 selectUnOpWithSrc(SrcReg, SourceTy,
I, VarReg, SPIRV::OpBitcast);
877 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCopyMemorySized))
878 .
addUse(
I.getOperand(0).getReg())
880 .
addUse(
I.getOperand(2).getReg());
881 if (
I.getNumMemOperands())
884 if (ResVReg.
isValid() && ResVReg != MIB->getOperand(0).getReg())
885 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY), ResVReg)
886 .
addUse(MIB->getOperand(0).getReg());
890bool SPIRVInstructionSelector::selectAtomicRMW(
Register ResVReg,
894 unsigned NegateOpcode)
const {
899 Register ScopeReg = buildI32Constant(Scope,
I);
907 Register MemSemReg = buildI32Constant(MemSem ,
I);
910 Register ValueReg =
I.getOperand(2).getReg();
911 if (NegateOpcode != 0) {
913 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
914 Result |= selectUnOpWithSrc(TmpReg, ResType,
I, ValueReg, NegateOpcode);
920 .
addUse(GR.getSPIRVTypeID(ResType))
929bool SPIRVInstructionSelector::selectUnmergeValues(
MachineInstr &
I)
const {
930 unsigned ArgI =
I.getNumOperands() - 1;
932 I.getOperand(ArgI).isReg() ?
I.getOperand(ArgI).getReg() :
Register(0);
934 SrcReg.
isValid() ? GR.getSPIRVTypeForVReg(SrcReg) :
nullptr;
935 if (!DefType || DefType->
getOpcode() != SPIRV::OpTypeVector)
937 "cannot select G_UNMERGE_VALUES with a non-vector argument");
943 for (
unsigned i = 0; i <
I.getNumDefs(); ++i) {
944 Register ResVReg =
I.getOperand(i).getReg();
945 SPIRVType *ResType = GR.getSPIRVTypeForVReg(ResVReg);
948 ResType = ScalarType;
949 MRI->setRegClass(ResVReg, &SPIRV::IDRegClass);
950 MRI->setType(ResVReg,
LLT::scalar(GR.getScalarOrVectorBitWidth(ResType)));
951 GR.assignSPIRVTypeToVReg(ResType, ResVReg, *GR.CurMF);
954 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
956 .
addUse(GR.getSPIRVTypeID(ResType))
958 .
addImm(
static_cast<int64_t
>(i));
959 Res |= MIB.constrainAllUses(
TII,
TRI, RBI);
964bool SPIRVInstructionSelector::selectFence(
MachineInstr &
I)
const {
967 Register MemSemReg = buildI32Constant(MemSem,
I);
970 Register ScopeReg = buildI32Constant(Scope,
I);
972 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpMemoryBarrier))
978bool SPIRVInstructionSelector::selectAtomicCmpXchg(
Register ResVReg,
985 if (!isa<GIntrinsic>(
I)) {
990 ScopeReg = buildI32Constant(Scope,
I);
992 unsigned ScSem =
static_cast<uint32_t>(
996 MemSemEqReg = buildI32Constant(MemSemEq,
I);
1000 MemSemEq == MemSemNeq ? MemSemEqReg : buildI32Constant(MemSemNeq,
I);
1002 ScopeReg =
I.getOperand(5).getReg();
1003 MemSemEqReg =
I.getOperand(6).getReg();
1004 MemSemNeqReg =
I.getOperand(7).getReg();
1008 Register Val =
I.getOperand(4).getReg();
1009 SPIRVType *SpvValTy = GR.getSPIRVTypeForVReg(Val);
1010 Register ACmpRes =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1013 BuildMI(*
I.getParent(),
I,
DL,
TII.get(SPIRV::OpAtomicCompareExchange))
1015 .
addUse(GR.getSPIRVTypeID(SpvValTy))
1023 Register CmpSuccReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1027 .
addUse(GR.getSPIRVTypeID(BoolTy))
1031 Register TmpReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1034 .
addUse(GR.getSPIRVTypeID(ResType))
1036 .
addUse(GR.getOrCreateUndef(
I, ResType,
TII))
1041 .
addUse(GR.getSPIRVTypeID(ResType))
1051 case SPIRV::StorageClass::Workgroup:
1052 case SPIRV::StorageClass::CrossWorkgroup:
1053 case SPIRV::StorageClass::Function:
1062 case SPIRV::StorageClass::DeviceOnlyINTEL:
1063 case SPIRV::StorageClass::HostOnlyINTEL:
1075bool SPIRVInstructionSelector::selectAddrSpaceCast(
Register ResVReg,
1080 auto UIs =
MRI->use_instructions(ResVReg);
1081 if (!UIs.empty() && ++UIs.begin() == UIs.end() &&
1082 (UIs.begin()->getOpcode() == SPIRV::OpConstantComposite ||
1083 UIs.begin()->getOpcode() == SPIRV::OpVariable ||
1085 Register NewReg =
I.getOperand(1).getReg();
1087 SPIRVType *SpvBaseTy = GR.getOrCreateSPIRVIntegerType(8,
I,
TII);
1088 ResType = GR.getOrCreateSPIRVPointerType(SpvBaseTy,
I,
TII,
1089 SPIRV::StorageClass::Generic);
1091 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSpecConstantOp))
1093 .
addUse(GR.getSPIRVTypeID(ResType))
1099 Register SrcPtr =
I.getOperand(1).getReg();
1100 SPIRVType *SrcPtrTy = GR.getSPIRVTypeForVReg(SrcPtr);
1101 SPIRV::StorageClass::StorageClass SrcSC = GR.getPointerStorageClass(SrcPtr);
1102 SPIRV::StorageClass::StorageClass DstSC = GR.getPointerStorageClass(ResVReg);
1106 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1107 TII.get(TargetOpcode::COPY))
1114 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1117 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1120 Register Tmp =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1121 SPIRVType *GenericPtrTy = GR.getOrCreateSPIRVPointerType(
1122 GR.getPointeeType(SrcPtrTy),
I,
TII, SPIRV::StorageClass::Generic);
1127 .
addUse(GR.getSPIRVTypeID(GenericPtrTy))
1132 .
addUse(GR.getSPIRVTypeID(ResType))
1140 return selectUnOp(ResVReg, ResType,
I,
1141 SPIRV::OpPtrCastToCrossWorkgroupINTEL);
1143 return selectUnOp(ResVReg, ResType,
I,
1144 SPIRV::OpCrossWorkgroupCastToPtrINTEL);
1146 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpPtrCastToGeneric);
1148 return selectUnOp(ResVReg, ResType,
I, SPIRV::OpGenericCastToPtr);
1158 return SPIRV::OpFOrdEqual;
1160 return SPIRV::OpFOrdGreaterThanEqual;
1162 return SPIRV::OpFOrdGreaterThan;
1164 return SPIRV::OpFOrdLessThanEqual;
1166 return SPIRV::OpFOrdLessThan;
1168 return SPIRV::OpFOrdNotEqual;
1170 return SPIRV::OpOrdered;
1172 return SPIRV::OpFUnordEqual;
1174 return SPIRV::OpFUnordGreaterThanEqual;
1176 return SPIRV::OpFUnordGreaterThan;
1178 return SPIRV::OpFUnordLessThanEqual;
1180 return SPIRV::OpFUnordLessThan;
1182 return SPIRV::OpFUnordNotEqual;
1184 return SPIRV::OpUnordered;
1194 return SPIRV::OpIEqual;
1196 return SPIRV::OpINotEqual;
1198 return SPIRV::OpSGreaterThanEqual;
1200 return SPIRV::OpSGreaterThan;
1202 return SPIRV::OpSLessThanEqual;
1204 return SPIRV::OpSLessThan;
1206 return SPIRV::OpUGreaterThanEqual;
1208 return SPIRV::OpUGreaterThan;
1210 return SPIRV::OpULessThanEqual;
1212 return SPIRV::OpULessThan;
1221 return SPIRV::OpPtrEqual;
1223 return SPIRV::OpPtrNotEqual;
1234 return SPIRV::OpLogicalEqual;
1236 return SPIRV::OpLogicalNotEqual;
1242bool SPIRVInstructionSelector::selectAnyOrAll(
Register ResVReg,
1245 unsigned OpAnyOrAll)
const {
1246 assert(
I.getNumOperands() == 3);
1247 assert(
I.getOperand(2).isReg());
1249 Register InputRegister =
I.getOperand(2).getReg();
1250 SPIRVType *InputType = GR.getSPIRVTypeForVReg(InputRegister);
1255 bool IsBoolTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeBool);
1256 bool IsVectorTy = InputType->
getOpcode() == SPIRV::OpTypeVector;
1257 if (IsBoolTy && !IsVectorTy) {
1258 assert(ResVReg ==
I.getOperand(0).getReg());
1259 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1260 TII.get(TargetOpcode::COPY))
1266 bool IsFloatTy = GR.isScalarOrVectorOfType(InputRegister, SPIRV::OpTypeFloat);
1267 unsigned SpirvNotEqualId =
1268 IsFloatTy ? SPIRV::OpFOrdNotEqual : SPIRV::OpINotEqual;
1269 SPIRVType *SpvBoolScalarTy = GR.getOrCreateSPIRVBoolType(
I,
TII);
1274 NotEqualReg = IsBoolTy ? InputRegister
1275 :
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1277 SpvBoolTy = GR.getOrCreateSPIRVVectorType(SpvBoolTy, NumElts,
I,
TII);
1282 IsFloatTy ? buildZerosValF(InputType,
I) : buildZerosVal(InputType,
I);
1284 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SpirvNotEqualId))
1286 .
addUse(GR.getSPIRVTypeID(SpvBoolTy))
1295 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(OpAnyOrAll))
1297 .
addUse(GR.getSPIRVTypeID(SpvBoolScalarTy))
1302bool SPIRVInstructionSelector::selectAll(
Register ResVReg,
1305 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAll);
1308bool SPIRVInstructionSelector::selectAny(
Register ResVReg,
1311 return selectAnyOrAll(ResVReg, ResType,
I, SPIRV::OpAny);
1314bool SPIRVInstructionSelector::selectFmix(
Register ResVReg,
1318 assert(
I.getNumOperands() == 5);
1319 assert(
I.getOperand(2).isReg());
1320 assert(
I.getOperand(3).isReg());
1321 assert(
I.getOperand(4).isReg());
1324 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1326 .
addUse(GR.getSPIRVTypeID(ResType))
1327 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1329 .
addUse(
I.getOperand(2).getReg())
1330 .
addUse(
I.getOperand(3).getReg())
1331 .
addUse(
I.getOperand(4).getReg())
1335bool SPIRVInstructionSelector::selectFrac(
Register ResVReg,
1339 assert(
I.getNumOperands() == 3);
1340 assert(
I.getOperand(2).isReg());
1343 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1345 .
addUse(GR.getSPIRVTypeID(ResType))
1346 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1348 .
addUse(
I.getOperand(2).getReg())
1352bool SPIRVInstructionSelector::selectRsqrt(
Register ResVReg,
1356 assert(
I.getNumOperands() == 3);
1357 assert(
I.getOperand(2).isReg());
1360 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
1362 .
addUse(GR.getSPIRVTypeID(ResType))
1363 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
1365 .
addUse(
I.getOperand(2).getReg())
1369bool SPIRVInstructionSelector::selectBitreverse(
Register ResVReg,
1373 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBitReverse))
1375 .
addUse(GR.getSPIRVTypeID(ResType))
1376 .
addUse(
I.getOperand(1).getReg())
1380bool SPIRVInstructionSelector::selectFreeze(
Register ResVReg,
1388 if (!
I.getOperand(0).isReg() || !
I.getOperand(1).isReg())
1390 Register OpReg =
I.getOperand(1).getReg();
1393 switch (
Def->getOpcode()) {
1394 case SPIRV::ASSIGN_TYPE:
1396 MRI->getVRegDef(
Def->getOperand(1).getReg())) {
1397 if (AssignToDef->getOpcode() == TargetOpcode::G_IMPLICIT_DEF)
1398 Reg =
Def->getOperand(2).getReg();
1401 case SPIRV::OpUndef:
1402 Reg =
Def->getOperand(1).getReg();
1405 unsigned DestOpCode;
1406 if (
Reg.isValid()) {
1407 DestOpCode = SPIRV::OpConstantNull;
1409 DestOpCode = TargetOpcode::COPY;
1412 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(DestOpCode))
1413 .
addDef(
I.getOperand(0).getReg())
1420bool SPIRVInstructionSelector::selectConstVector(
Register ResVReg,
1430 SPIRVType *ConstTy = this->MRI->getVRegDef(MO.getReg());
1431 assert(ConstTy && ConstTy->getOpcode() == SPIRV::ASSIGN_TYPE &&
1432 ConstTy->getOperand(1).isReg());
1433 Register ConstReg = ConstTy->getOperand(1).getReg();
1434 const MachineInstr *Const = this->MRI->getVRegDef(ConstReg);
1436 return (Const->getOpcode() == TargetOpcode::G_CONSTANT ||
1437 Const->getOpcode() == TargetOpcode::G_FCONSTANT);
1440 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1441 TII.get(SPIRV::OpConstantComposite))
1443 .
addUse(GR.getSPIRVTypeID(ResType));
1444 for (
unsigned i =
I.getNumExplicitDefs(); i <
I.getNumExplicitOperands(); ++i)
1445 MIB.
addUse(
I.getOperand(i).getReg());
1455 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1460 unsigned N = OpDef->
getOpcode() == TargetOpcode::G_CONSTANT
1469 if (OpDef->
getOpcode() == SPIRV::ASSIGN_TYPE &&
1481 case TargetOpcode::G_CONSTANT:
1482 case TargetOpcode::G_FCONSTANT:
1484 case TargetOpcode::G_INTRINSIC:
1485 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1486 case TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS:
1487 return cast<GIntrinsic>(*OpDef).getIntrinsicID() ==
1488 Intrinsic::spv_const_composite;
1489 case TargetOpcode::G_BUILD_VECTOR:
1490 case TargetOpcode::G_SPLAT_VECTOR: {
1514bool SPIRVInstructionSelector::selectSplatVector(
Register ResVReg,
1518 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1519 N = GR.getScalarOrVectorComponentCount(ResType);
1520 else if (ResType->
getOpcode() == SPIRV::OpTypeArray)
1525 unsigned OpIdx =
I.getNumExplicitDefs();
1526 if (!
I.getOperand(OpIdx).isReg())
1530 Register OpReg =
I.getOperand(OpIdx).getReg();
1533 if (!IsConst &&
N < 2)
1535 "There must be at least two constituent operands in a vector");
1537 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1538 TII.get(IsConst ? SPIRV::OpConstantComposite
1539 : SPIRV::OpCompositeConstruct))
1541 .
addUse(GR.getSPIRVTypeID(ResType));
1542 for (
unsigned i = 0; i <
N; ++i)
1547bool SPIRVInstructionSelector::selectCmp(
Register ResVReg,
1551 Register Cmp0 =
I.getOperand(2).getReg();
1552 Register Cmp1 =
I.getOperand(3).getReg();
1553 assert(GR.getSPIRVTypeForVReg(Cmp0)->getOpcode() ==
1554 GR.getSPIRVTypeForVReg(Cmp1)->getOpcode() &&
1555 "CMP operands should have the same type");
1556 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(CmpOpc))
1558 .
addUse(GR.getSPIRVTypeID(ResType))
1564bool SPIRVInstructionSelector::selectICmp(
Register ResVReg,
1567 auto Pred =
I.getOperand(1).getPredicate();
1570 Register CmpOperand =
I.getOperand(2).getReg();
1571 if (GR.isScalarOfType(CmpOperand, SPIRV::OpTypePointer))
1573 else if (GR.isScalarOrVectorOfType(CmpOperand, SPIRV::OpTypeBool))
1577 return selectCmp(ResVReg, ResType, CmpOpc,
I);
1583 assert(
I.getOpcode() == TargetOpcode::G_FCONSTANT && OpIdx == -1 &&
1584 "Expected G_FCONSTANT");
1585 const ConstantFP *FPImm =
I.getOperand(1).getFPImm();
1592 assert(
I.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
1593 "Expected G_CONSTANT");
1594 addNumImm(
I.getOperand(1).getCImm()->getValue(), MIB);
1602 ResType ? ResType : GR.getOrCreateSPIRVIntegerType(32,
I,
TII);
1604 auto ConstInt = ConstantInt::get(LLVMTy, Val);
1605 Register NewReg = GR.find(ConstInt, GR.CurMF);
1608 GR.add(ConstInt, GR.CurMF, NewReg);
1612 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1614 .
addUse(GR.getSPIRVTypeID(SpvI32Ty));
1616 MI =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1618 .
addUse(GR.getSPIRVTypeID(SpvI32Ty))
1626bool SPIRVInstructionSelector::selectFCmp(
Register ResVReg,
1630 return selectCmp(ResVReg, ResType, CmpOp,
I);
1636 bool ZeroAsNull = STI.isOpenCLEnv();
1637 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1638 return GR.getOrCreateConstVector(0UL,
I, ResType,
TII, ZeroAsNull);
1639 return GR.getOrCreateConstInt(0,
I, ResType,
TII, ZeroAsNull);
1659 bool ZeroAsNull = STI.isOpenCLEnv();
1661 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1662 return GR.getOrCreateConstVector(VZero,
I, ResType,
TII, ZeroAsNull);
1663 return GR.getOrCreateConstFP(VZero,
I, ResType,
TII, ZeroAsNull);
1669 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1672 if (ResType->
getOpcode() == SPIRV::OpTypeVector)
1677bool SPIRVInstructionSelector::selectSelect(
Register ResVReg,
1680 bool IsSigned)
const {
1682 Register ZeroReg = buildZerosVal(ResType,
I);
1683 Register OneReg = buildOnesVal(IsSigned, ResType,
I);
1685 GR.isScalarOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool);
1687 IsScalarBool ? SPIRV::OpSelectSISCond : SPIRV::OpSelectSIVCond;
1688 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1690 .
addUse(GR.getSPIRVTypeID(ResType))
1691 .
addUse(
I.getOperand(1).getReg())
1697bool SPIRVInstructionSelector::selectIToF(
Register ResVReg,
1700 unsigned Opcode)
const {
1701 Register SrcReg =
I.getOperand(1).getReg();
1704 if (GR.isScalarOrVectorOfType(
I.getOperand(1).getReg(), SPIRV::OpTypeBool)) {
1705 unsigned BitWidth = GR.getScalarOrVectorBitWidth(ResType);
1707 if (ResType->
getOpcode() == SPIRV::OpTypeVector) {
1709 TmpType = GR.getOrCreateSPIRVVectorType(TmpType, NumElts,
I,
TII);
1711 SrcReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1712 selectSelect(SrcReg, TmpType,
I,
false);
1714 return selectUnOpWithSrc(ResVReg, ResType,
I, SrcReg, Opcode);
1717bool SPIRVInstructionSelector::selectExt(
Register ResVReg,
1720 Register SrcReg =
I.getOperand(1).getReg();
1721 if (GR.isScalarOrVectorOfType(SrcReg, SPIRV::OpTypeBool))
1722 return selectSelect(ResVReg, ResType,
I, IsSigned);
1724 SPIRVType *SrcType = GR.getSPIRVTypeForVReg(SrcReg);
1725 if (SrcType == ResType)
1726 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1727 TII.get(TargetOpcode::COPY))
1732 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1733 return selectUnOp(ResVReg, ResType,
I, Opcode);
1736bool SPIRVInstructionSelector::selectIntToBool(
Register IntReg,
1742 Register BitIntReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1743 bool IsVectorTy = IntTy->
getOpcode() == SPIRV::OpTypeVector;
1744 unsigned Opcode = IsVectorTy ? SPIRV::OpBitwiseAndV : SPIRV::OpBitwiseAndS;
1746 Register One = buildOnesVal(
false, IntTy,
I);
1750 .
addUse(GR.getSPIRVTypeID(IntTy))
1754 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpINotEqual))
1756 .
addUse(GR.getSPIRVTypeID(BoolTy))
1762bool SPIRVInstructionSelector::selectTrunc(
Register ResVReg,
1765 Register IntReg =
I.getOperand(1).getReg();
1766 const SPIRVType *ArgType = GR.getSPIRVTypeForVReg(IntReg);
1767 if (GR.isScalarOrVectorOfType(ResVReg, SPIRV::OpTypeBool))
1768 return selectIntToBool(IntReg, ResVReg,
I, ArgType, ResType);
1769 if (ArgType == ResType)
1770 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
1771 TII.get(TargetOpcode::COPY))
1775 bool IsSigned = GR.isScalarOrVectorSigned(ResType);
1776 unsigned Opcode = IsSigned ? SPIRV::OpSConvert : SPIRV::OpUConvert;
1777 return selectUnOp(ResVReg, ResType,
I, Opcode);
1780bool SPIRVInstructionSelector::selectConst(
Register ResVReg,
1784 unsigned TyOpcode = ResType->
getOpcode();
1785 assert(TyOpcode != SPIRV::OpTypePointer ||
Imm.isZero());
1787 if ((TyOpcode == SPIRV::OpTypePointer || TyOpcode == SPIRV::OpTypeEvent) &&
1789 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
1791 .
addUse(GR.getSPIRVTypeID(ResType))
1793 if (TyOpcode == SPIRV::OpTypeInt) {
1794 assert(
Imm.getBitWidth() <= 64 &&
"Unsupported integer width!");
1798 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
1803 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantI))
1805 .
addUse(GR.getSPIRVTypeID(ResType));
1812bool SPIRVInstructionSelector::selectOpUndef(
Register ResVReg,
1815 return BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
1817 .
addUse(GR.getSPIRVTypeID(ResType))
1824 if (TypeInst->
getOpcode() == SPIRV::ASSIGN_TYPE) {
1827 return ImmInst->
getOpcode() == TargetOpcode::G_CONSTANT;
1829 return TypeInst->
getOpcode() == SPIRV::OpConstantI;
1834 if (TypeInst->
getOpcode() == SPIRV::OpConstantI)
1841bool SPIRVInstructionSelector::selectInsertVal(
Register ResVReg,
1845 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeInsert))
1847 .
addUse(GR.getSPIRVTypeID(ResType))
1849 .
addUse(
I.getOperand(3).getReg())
1851 .
addUse(
I.getOperand(2).getReg());
1852 for (
unsigned i = 4; i <
I.getNumOperands(); i++)
1857bool SPIRVInstructionSelector::selectExtractVal(
Register ResVReg,
1861 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
1863 .
addUse(GR.getSPIRVTypeID(ResType))
1864 .
addUse(
I.getOperand(2).getReg());
1865 for (
unsigned i = 3; i <
I.getNumOperands(); i++)
1870bool SPIRVInstructionSelector::selectInsertElt(
Register ResVReg,
1874 return selectInsertVal(ResVReg, ResType,
I);
1876 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorInsertDynamic))
1878 .
addUse(GR.getSPIRVTypeID(ResType))
1879 .
addUse(
I.getOperand(2).getReg())
1880 .
addUse(
I.getOperand(3).getReg())
1881 .
addUse(
I.getOperand(4).getReg())
1885bool SPIRVInstructionSelector::selectExtractElt(
Register ResVReg,
1889 return selectExtractVal(ResVReg, ResType,
I);
1891 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpVectorExtractDynamic))
1893 .
addUse(GR.getSPIRVTypeID(ResType))
1894 .
addUse(
I.getOperand(2).getReg())
1895 .
addUse(
I.getOperand(3).getReg())
1899bool SPIRVInstructionSelector::selectGEP(
Register ResVReg,
1902 const bool IsGEPInBounds =
I.getOperand(2).getImm();
1907 const unsigned Opcode = STI.isVulkanEnv()
1908 ? (IsGEPInBounds ? SPIRV::OpInBoundsAccessChain
1909 : SPIRV::OpAccessChain)
1910 : (IsGEPInBounds ? SPIRV::OpInBoundsPtrAccessChain
1911 : SPIRV::OpPtrAccessChain);
1913 auto Res =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode))
1915 .
addUse(GR.getSPIRVTypeID(ResType))
1917 .
addUse(
I.getOperand(3).getReg());
1919 const unsigned StartingIndex =
1920 (Opcode == SPIRV::OpAccessChain || Opcode == SPIRV::OpInBoundsAccessChain)
1923 for (
unsigned i = StartingIndex; i <
I.getNumExplicitOperands(); ++i)
1924 Res.addUse(
I.getOperand(i).getReg());
1925 return Res.constrainAllUses(
TII,
TRI, RBI);
1929bool SPIRVInstructionSelector::wrapIntoSpecConstantOp(
1932 unsigned Lim =
I.getNumExplicitOperands();
1933 for (
unsigned i =
I.getNumExplicitDefs() + 1; i < Lim; ++i) {
1934 Register OpReg =
I.getOperand(i).getReg();
1936 SPIRVType *OpType = GR.getSPIRVTypeForVReg(OpReg);
1938 if (!OpDefine || !OpType ||
isConstReg(
MRI, OpDefine, Visited) ||
1939 OpDefine->
getOpcode() == TargetOpcode::G_ADDRSPACE_CAST ||
1940 GR.isAggregateType(OpType)) {
1947 Register WrapReg = GR.find(OpDefine, MF);
1953 WrapReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
1954 GR.add(OpDefine, MF, WrapReg);
1958 GR.assignSPIRVTypeToVReg(OpType, WrapReg, *MF);
1962 .
addUse(GR.getSPIRVTypeID(OpType))
1972bool SPIRVInstructionSelector::selectIntrinsic(
Register ResVReg,
1978 case Intrinsic::spv_load:
1979 return selectLoad(ResVReg, ResType,
I);
1980 case Intrinsic::spv_store:
1981 return selectStore(
I);
1982 case Intrinsic::spv_extractv:
1983 return selectExtractVal(ResVReg, ResType,
I);
1984 case Intrinsic::spv_insertv:
1985 return selectInsertVal(ResVReg, ResType,
I);
1986 case Intrinsic::spv_extractelt:
1987 return selectExtractElt(ResVReg, ResType,
I);
1988 case Intrinsic::spv_insertelt:
1989 return selectInsertElt(ResVReg, ResType,
I);
1990 case Intrinsic::spv_gep:
1991 return selectGEP(ResVReg, ResType,
I);
1992 case Intrinsic::spv_unref_global:
1993 case Intrinsic::spv_init_global: {
1996 ?
MRI->getVRegDef(
I.getOperand(2).getReg())
1999 return selectGlobalValue(
MI->getOperand(0).getReg(), *
MI,
Init);
2001 case Intrinsic::spv_undef: {
2002 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUndef))
2004 .
addUse(GR.getSPIRVTypeID(ResType));
2007 case Intrinsic::spv_const_composite: {
2009 bool IsNull =
I.getNumExplicitDefs() + 1 ==
I.getNumExplicitOperands();
2011 unsigned Opcode = SPIRV::OpConstantNull;
2014 Opcode = SPIRV::OpConstantComposite;
2015 if (!wrapIntoSpecConstantOp(
I, CompositeArgs))
2018 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(Opcode))
2020 .
addUse(GR.getSPIRVTypeID(ResType));
2023 for (
Register OpReg : CompositeArgs)
2028 case Intrinsic::spv_assign_name: {
2029 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpName));
2030 MIB.
addUse(
I.getOperand(
I.getNumExplicitDefs() + 1).getReg());
2031 for (
unsigned i =
I.getNumExplicitDefs() + 2;
2032 i <
I.getNumExplicitOperands(); ++i) {
2033 MIB.
addImm(
I.getOperand(i).getImm());
2037 case Intrinsic::spv_switch: {
2038 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpSwitch));
2039 for (
unsigned i = 1; i <
I.getNumExplicitOperands(); ++i) {
2040 if (
I.getOperand(i).isReg())
2041 MIB.
addReg(
I.getOperand(i).getReg());
2042 else if (
I.getOperand(i).isCImm())
2043 addNumImm(
I.getOperand(i).getCImm()->getValue(), MIB);
2044 else if (
I.getOperand(i).isMBB())
2045 MIB.
addMBB(
I.getOperand(i).getMBB());
2051 case Intrinsic::spv_cmpxchg:
2052 return selectAtomicCmpXchg(ResVReg, ResType,
I);
2053 case Intrinsic::spv_unreachable:
2054 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpUnreachable));
2056 case Intrinsic::spv_alloca:
2057 return selectFrameIndex(ResVReg, ResType,
I);
2058 case Intrinsic::spv_alloca_array:
2059 return selectAllocaArray(ResVReg, ResType,
I);
2060 case Intrinsic::spv_assume:
2061 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2062 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpAssumeTrueKHR))
2063 .
addUse(
I.getOperand(1).getReg());
2065 case Intrinsic::spv_expect:
2066 if (STI.canUseExtension(SPIRV::Extension::SPV_KHR_expect_assume))
2067 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExpectKHR))
2069 .
addUse(GR.getSPIRVTypeID(ResType))
2070 .
addUse(
I.getOperand(2).getReg())
2071 .
addUse(
I.getOperand(3).getReg());
2073 case Intrinsic::spv_thread_id:
2074 return selectSpvThreadId(ResVReg, ResType,
I);
2075 case Intrinsic::spv_all:
2076 return selectAll(ResVReg, ResType,
I);
2077 case Intrinsic::spv_any:
2078 return selectAny(ResVReg, ResType,
I);
2079 case Intrinsic::spv_lerp:
2080 return selectFmix(ResVReg, ResType,
I);
2081 case Intrinsic::spv_frac:
2082 return selectFrac(ResVReg, ResType,
I);
2083 case Intrinsic::spv_rsqrt:
2084 return selectRsqrt(ResVReg, ResType,
I);
2085 case Intrinsic::spv_lifetime_start:
2086 case Intrinsic::spv_lifetime_end: {
2087 unsigned Op = IID == Intrinsic::spv_lifetime_start ? SPIRV::OpLifetimeStart
2088 : SPIRV::OpLifetimeStop;
2089 int64_t
Size =
I.getOperand(
I.getNumExplicitDefs() + 1).getImm();
2090 Register PtrReg =
I.getOperand(
I.getNumExplicitDefs() + 2).getReg();
2091 unsigned PonteeOpType = GR.getPointeeTypeOp(PtrReg);
2092 bool IsNonvoidPtr = PonteeOpType != 0 && PonteeOpType != SPIRV::OpTypeVoid;
2093 if (
Size == -1 || IsNonvoidPtr)
2098 std::string DiagMsg;
2101 DiagMsg =
"Intrinsic selection not implemented: " + DiagMsg;
2108bool SPIRVInstructionSelector::selectAllocaArray(
Register ResVReg,
2115 TII.get(SPIRV::OpVariableLengthArrayINTEL))
2117 .
addUse(GR.getSPIRVTypeID(ResType))
2118 .
addUse(
I.getOperand(2).getReg())
2122bool SPIRVInstructionSelector::selectFrameIndex(
Register ResVReg,
2130 bool IsHeader =
false;
2132 for (; It != E && It !=
I; ++It) {
2133 Opcode = It->getOpcode();
2134 if (Opcode == SPIRV::OpFunction || Opcode == SPIRV::OpFunctionParameter) {
2136 }
else if (IsHeader &&
2137 !(Opcode == SPIRV::ASSIGN_TYPE || Opcode == SPIRV::OpLabel)) {
2142 return BuildMI(*
MBB, It, It->getDebugLoc(),
TII.get(SPIRV::OpVariable))
2144 .
addUse(GR.getSPIRVTypeID(ResType))
2149bool SPIRVInstructionSelector::selectBranch(
MachineInstr &
I)
const {
2156 if (PrevI !=
nullptr && PrevI->
getOpcode() == TargetOpcode::G_BRCOND) {
2157 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
2160 .
addMBB(
I.getOperand(0).getMBB())
2164 .
addMBB(
I.getOperand(0).getMBB())
2168bool SPIRVInstructionSelector::selectBranchCond(
MachineInstr &
I)
const {
2181 if (NextI !=
nullptr && NextI->
getOpcode() == SPIRV::OpBranchConditional)
2188 return BuildMI(
MBB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpBranchConditional))
2189 .
addUse(
I.getOperand(0).getReg())
2190 .
addMBB(
I.getOperand(1).getMBB())
2195bool SPIRVInstructionSelector::selectPhi(
Register ResVReg,
2198 auto MIB =
BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpPhi))
2200 .
addUse(GR.getSPIRVTypeID(ResType));
2201 const unsigned NumOps =
I.getNumOperands();
2202 for (
unsigned i = 1; i < NumOps; i += 2) {
2203 MIB.
addUse(
I.getOperand(i + 0).getReg());
2204 MIB.
addMBB(
I.getOperand(i + 1).getMBB());
2209bool SPIRVInstructionSelector::selectGlobalValue(
2219 SPIRV::AccessQualifier::ReadWrite,
false);
2220 PointerBaseType = GR.getOrCreateSPIRVArrayType(
2223 PointerBaseType = GR.getOrCreateSPIRVType(
2224 GVType, MIRBuilder, SPIRV::AccessQualifier::ReadWrite,
false);
2226 SPIRVType *ResType = GR.getOrCreateSPIRVPointerType(
2227 PointerBaseType,
I,
TII,
2230 std::string GlobalIdent;
2232 unsigned &
ID = UnnamedGlobalIDs[GV];
2234 ID = UnnamedGlobalIDs.size();
2235 GlobalIdent =
"__unnamed_" +
Twine(
ID).
str();
2250 if (isa<Function>(GV)) {
2253 Register NewReg = GR.find(ConstVal, GR.CurMF);
2256 GR.add(ConstVal, GR.CurMF, NewReg);
2258 STI.canUseExtension(SPIRV::Extension::SPV_INTEL_function_pointers)
2259 ? dyn_cast<Function>(GV)
2267 MRI->setRegClass(FuncVReg, &SPIRV::IDRegClass);
2270 TII.get(SPIRV::OpConstantFunctionPointerINTEL))
2272 .
addUse(GR.getSPIRVTypeID(ResType))
2278 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpConstantNull))
2280 .
addUse(GR.getSPIRVTypeID(ResType))
2283 assert(NewReg != ResVReg);
2284 return BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(TargetOpcode::COPY))
2289 auto GlobalVar = cast<GlobalVariable>(GV);
2292 bool HasInit =
GlobalVar->hasInitializer() &&
2293 !isa<UndefValue>(
GlobalVar->getInitializer());
2296 if (HasInit && !
Init)
2300 SPIRV::StorageClass::StorageClass Storage =
2303 Storage != SPIRV::StorageClass::Function;
2304 SPIRV::LinkageType::LinkageType LnkType =
2306 ? SPIRV::LinkageType::Import
2308 STI.canUseExtension(SPIRV::Extension::SPV_KHR_linkonce_odr)
2309 ? SPIRV::LinkageType::LinkOnceODR
2310 : SPIRV::LinkageType::Export);
2312 Register Reg = GR.buildGlobalVariable(ResVReg, ResType, GlobalIdent, GV,
2314 HasLnkTy, LnkType, MIRBuilder,
true);
2315 return Reg.isValid();
2318bool SPIRVInstructionSelector::selectLog10(
Register ResVReg,
2321 if (STI.canUseExtInstSet(SPIRV::InstructionSet::OpenCL_std)) {
2322 return selectExtInst(ResVReg, ResType,
I, CL::log10);
2334 Register VarReg =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2336 BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpExtInst))
2338 .
addUse(GR.getSPIRVTypeID(ResType))
2339 .
addImm(
static_cast<uint32_t>(SPIRV::InstructionSet::GLSL_std_450))
2341 .
add(
I.getOperand(1))
2346 ResType->
getOpcode() == SPIRV::OpTypeFloat);
2349 ResType->
getOpcode() == SPIRV::OpTypeVector
2353 GR.buildConstantFP(
APFloat(0.30103f), MIRBuilder, SpirvScalarType);
2356 auto Opcode = ResType->
getOpcode() == SPIRV::OpTypeVector
2357 ? SPIRV::OpVectorTimesScalar
2361 .
addUse(GR.getSPIRVTypeID(ResType))
2369bool SPIRVInstructionSelector::selectSpvThreadId(
Register ResVReg,
2377 const SPIRVType *U32Type = GR.getOrCreateSPIRVIntegerType(32, MIRBuilder);
2379 GR.getOrCreateSPIRVVectorType(U32Type, 3, MIRBuilder);
2380 const SPIRVType *PtrType = GR.getOrCreateSPIRVPointerType(
2381 Vec3Ty, MIRBuilder, SPIRV::StorageClass::Input);
2385 MIRBuilder.getMRI()->createVirtualRegister(&SPIRV::IDRegClass);
2386 MIRBuilder.getMRI()->setType(NewRegister,
LLT::pointer(0, 32));
2387 GR.assignSPIRVTypeToVReg(PtrType, NewRegister, MIRBuilder.getMF());
2390 Register Variable = GR.buildGlobalVariable(
2391 NewRegister, PtrType,
2393 SPIRV::StorageClass::Input,
nullptr,
true,
true,
2394 SPIRV::LinkageType::Import, MIRBuilder,
false);
2398 Register LoadedRegister =
MRI->createVirtualRegister(&SPIRV::IDRegClass);
2399 MIRBuilder.getMRI()->setType(LoadedRegister,
LLT::pointer(0, 32));
2400 GR.assignSPIRVTypeToVReg(Vec3Ty, LoadedRegister, MIRBuilder.getMF());
2403 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(SPIRV::OpLoad))
2405 .
addUse(GR.getSPIRVTypeID(Vec3Ty))
2410 assert(
I.getOperand(2).isReg());
2411 Register ThreadIdReg =
I.getOperand(2).getReg();
2417 assert(Const &&
Const->getOpcode() == TargetOpcode::G_CONSTANT);
2423 auto MIB =
BuildMI(BB,
I,
I.getDebugLoc(),
TII.get(SPIRV::OpCompositeExtract))
2425 .
addUse(GR.getSPIRVTypeID(ResType))
2436 return new SPIRVInstructionSelector(
TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
This file declares a class to represent arbitrary precision floating point values and provide a varie...
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
unsigned const TargetRegisterInfo * TRI
uint64_t IntrinsicInst * II
const char LLVMTargetMachineRef TM
static StringRef getName(Value *V)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::vector< std::pair< SPIRV::InstructionSet::InstructionSet, uint32_t > > ExtInstList
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool isUSMStorageClass(SPIRV::StorageClass::StorageClass SC)
static APFloat getZeroFP(const Type *LLVMFloatTy)
static void addMemoryOperands(MachineMemOperand *MemOp, MachineInstrBuilder &MIB)
static unsigned getFCmpOpcode(unsigned PredNum)
bool isTypeFoldingSupported(unsigned Opcode)
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static unsigned getBoolCmpOpcode(unsigned PredNum)
static unsigned getICmpOpcode(unsigned PredNum)
static bool isConstReg(MachineRegisterInfo *MRI, SPIRVType *OpDef, SmallPtrSet< SPIRVType *, 4 > &Visited)
static int64_t foldImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool isGenericCastablePtr(SPIRV::StorageClass::StorageClass SC)
static SPIRV::Scope::Scope getScope(SyncScope::ID Ord, const SyncScopeIDs &SSIDs)
static unsigned getPtrCmpOpcode(unsigned Pred)
static unsigned getArrayComponentCount(MachineRegisterInfo *MRI, const SPIRVType *ResType)
APInt bitcastToAPInt() const
static APFloat getZero(const fltSemantics &Sem, bool Negative=false)
Factory for Positive and Negative Zero.
Class for arbitrary precision integers.
static APInt getAllOnes(unsigned numBits)
Return an APInt of a specified width with all bits set.
uint64_t getZExtValue() const
Get zero extended value.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ULT
1 1 0 0 True if unordered or less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
ConstantFP - Floating Point Values [float, double].
const APFloat & getValueAPF() const
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
const APInt & getValue() const
Return the constant as an APInt value reference.
This is an important base class in LLVM.
static Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
This class represents an Operation in the Expression.
const Function & getFunction() const
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool isDeclaration() const
Return true if the primary definition of this global value is outside of the current translation unit...
LinkageTypes getLinkage() const
unsigned getAddressSpace() const
Module * getParent()
Get the module that this global value is contained inside of...
static std::string getGlobalIdentifier(StringRef Name, GlobalValue::LinkageTypes Linkage, StringRef FileName)
Return the modified name for a global value suitable to be used as the key for a global lookup (e....
bool hasAvailableExternallyLinkage() const
@ InternalLinkage
Rename collisions when linking (static functions).
@ LinkOnceODRLinkage
Same, but only replaced by something equivalent.
static IntegerType * get(LLVMContext &C, unsigned NumBits)
This static method is the primary way of constructing an IntegerType.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
static constexpr LLT pointer(unsigned AddressSpace, unsigned SizeInBits)
Get a low-level pointer in the given address space.
This is an important class for using LLVM in a threaded context.
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN)
getOrInsertSyncScopeID - Maps synchronization scope name to synchronization scope ID.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
iterator SkipPHIsAndLabels(iterator I)
Return the first instruction in MBB after I that is not a PHI or a label.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const MachineBasicBlock & front() const
Helper class to build MachineInstr.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getNumExplicitDefs() const
Returns the number of non-implicit definitions.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
Register getReg() const
getReg - Returns the register number.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
Analysis providing profile information.
Holds all the information related to register banks.
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
std::string str() const
Return the twine contents as a std::string.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isArrayTy() const
True if this is an instance of ArrayType.
Type * getArrayElementType() const
uint64_t getArrayNumElements() const
@ HalfTyID
16-bit floating point type
@ FloatTyID
32-bit floating point type
@ DoubleTyID
64-bit floating point type
TypeID getTypeID() const
Return the type id for the type.
Type * getScalarType() const
If this is a vector type, return the element type, otherwise return 'this'.
NodeTy * getNextNode()
Get the next node, or nullptr for the list tail.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char IsConst[]
Key for Kernel::Arg::Metadata::mIsConst.
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
Reg
All possible values of the reg field in the ModR/M byte.
Scope
Defines the scope in which this symbol should be visible: Default – Visible in the public interface o...
NodeAddr< DefNode * > Def
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addNumImm(const APInt &Imm, MachineInstrBuilder &MIB)
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
bool isPreISelGenericOpcode(unsigned Opcode)
Check whether the given Opcode is a generic opcode that is not supposed to appear after ISel.
uint64_t getIConstVal(Register ConstReg, const MachineRegisterInfo *MRI)
SPIRV::MemorySemantics::MemorySemantics getMemSemanticsForStorageClass(SPIRV::StorageClass::StorageClass SC)
void buildOpDecorate(Register Reg, MachineIRBuilder &MIRBuilder, SPIRV::Decoration::Decoration Dec, const std::vector< uint32_t > &DecArgs, StringRef StrImm)
Type * toTypedPointer(Type *Ty)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
SPIRV::StorageClass::StorageClass addressSpaceToStorageClass(unsigned AddrSpace, const SPIRVSubtarget &STI)
AtomicOrdering
Atomic ordering for LLVM's memory model.
InstructionSelector * createSPIRVInstructionSelector(const SPIRVTargetMachine &TM, const SPIRVSubtarget &Subtarget, const RegisterBankInfo &RBI)
constexpr unsigned BitWidth
bool isSpvIntrinsic(const MachineInstr &MI, Intrinsic::ID IntrinsicID)
SPIRV::MemorySemantics::MemorySemantics getMemSemantics(AtomicOrdering Ord)
std::string getLinkStringForBuiltIn(SPIRV::BuiltIn::BuiltIn BuiltInValue)