25#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
31using namespace MIPatternMatch;
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
74 bool IsExternWeak =
false)
const;
90 ComplexRendererFns selectSHXADDOp(
MachineOperand &Root,
unsigned ShAmt)
const;
91 template <
unsigned ShAmt>
93 return selectSHXADDOp(Root, ShAmt);
97 unsigned ShAmt)
const;
98 template <
unsigned ShAmt>
100 return selectSHXADD_UWOp(Root, ShAmt);
129#define GET_GLOBALISEL_PREDICATES_DECL
130#include "RISCVGenGlobalISel.inc"
131#undef GET_GLOBALISEL_PREDICATES_DECL
133#define GET_GLOBALISEL_TEMPORARIES_DECL
134#include "RISCVGenGlobalISel.inc"
135#undef GET_GLOBALISEL_TEMPORARIES_DECL
140#define GET_GLOBALISEL_IMPL
141#include "RISCVGenGlobalISel.inc"
142#undef GET_GLOBALISEL_IMPL
144RISCVInstructionSelector::RISCVInstructionSelector(
147 : STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()), RBI(RBI),
151#include
"RISCVGenGlobalISel.inc"
154#include
"RISCVGenGlobalISel.inc"
160RISCVInstructionSelector::selectShiftMask(
MachineOperand &Root)
const {
169 const LLT ShiftLLT =
MRI.getType(RootReg);
175 ShAmtReg = ZExtSrcReg;
196 if (ShMask.isSubsetOf(AndMask)) {
197 ShAmtReg = AndSrcReg;
201 KnownBits Known = KB->getKnownBits(AndSrcReg);
202 if (ShMask.isSubsetOf(AndMask | Known.
Zero))
203 ShAmtReg = AndSrcReg;
210 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0)
215 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0) {
218 ShAmtReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
219 unsigned NegOpc = Subtarget->is64Bit() ? RISCV::SUBW : RISCV::SUB;
226 if (
Imm.urem(ShiftWidth) == ShiftWidth - 1) {
229 ShAmtReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
244 unsigned ShAmt)
const {
253 const unsigned XLen = STI.getXLen();
272 if (
Mask.isShiftedMask()) {
273 unsigned Leading = XLen -
Mask.getActiveBits();
274 unsigned Trailing =
Mask.countr_zero();
277 if (*LeftShift && Leading == 0 && C2.
ult(Trailing) && Trailing == ShAmt) {
278 Register DstReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
289 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
290 Register DstReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
294 .addImm(Leading + Trailing);
315 unsigned Leading = XLen -
Mask.getActiveBits();
316 unsigned Trailing =
Mask.countr_zero();
329 Register DstReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
344 unsigned ShAmt)
const {
364 if (
Mask.isShiftedMask()) {
365 unsigned Leading =
Mask.countl_zero();
366 unsigned Trailing =
Mask.countr_zero();
367 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
368 Register DstReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
383RISCVInstructionSelector::selectAddrRegImm(
MachineOperand &Root)
const {
391 if (RootDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
398 if (isBaseWithConstantOffset(Root,
MRI)) {
405 if (isInt<12>(RHSC)) {
406 if (LHSDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
429 case CmpInst::Predicate::ICMP_EQ:
431 case CmpInst::Predicate::ICMP_NE:
433 case CmpInst::Predicate::ICMP_ULT:
435 case CmpInst::Predicate::ICMP_SLT:
437 case CmpInst::Predicate::ICMP_UGE:
439 case CmpInst::Predicate::ICMP_SGE:
461 case CmpInst::Predicate::ICMP_SGT:
469 case CmpInst::Predicate::ICMP_SLT:
486 case CmpInst::Predicate::ICMP_EQ:
487 case CmpInst::Predicate::ICMP_NE:
488 case CmpInst::Predicate::ICMP_ULT:
489 case CmpInst::Predicate::ICMP_SLT:
490 case CmpInst::Predicate::ICMP_UGE:
491 case CmpInst::Predicate::ICMP_SGE:
494 case CmpInst::Predicate::ICMP_SGT:
495 case CmpInst::Predicate::ICMP_SLE:
496 case CmpInst::Predicate::ICMP_UGT:
497 case CmpInst::Predicate::ICMP_ULE:
505 CC = getRISCVCCFromICmp(Pred);
515 preISelLower(
MI, MIB,
MRI);
516 const unsigned Opc =
MI.getOpcode();
518 if (!
MI.isPreISelOpcode() || Opc == TargetOpcode::G_PHI) {
519 if (Opc == TargetOpcode::PHI || Opc == TargetOpcode::G_PHI) {
520 const Register DefReg =
MI.getOperand(0).getReg();
521 const LLT DefTy =
MRI.getType(DefReg);
524 MRI.getRegClassOrRegBank(DefReg);
535 DefRC = getRegClassForTypeOnBank(DefTy, RB);
542 MI.setDesc(
TII.get(TargetOpcode::PHI));
543 return RBI.constrainGenericRegister(DefReg, *DefRC,
MRI);
553 if (selectImpl(
MI, *CoverageInfo))
557 case TargetOpcode::G_ANYEXT:
558 case TargetOpcode::G_PTRTOINT:
559 case TargetOpcode::G_INTTOPTR:
560 case TargetOpcode::G_TRUNC:
561 case TargetOpcode::G_FREEZE:
563 case TargetOpcode::G_CONSTANT: {
565 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
567 if (!materializeImm(DstReg, Imm, MIB))
570 MI.eraseFromParent();
573 case TargetOpcode::G_FCONSTANT: {
577 const APFloat &FPimm =
MI.getOperand(1).getFPImm()->getValueAPF();
579 unsigned Size =
MRI.getType(DstReg).getSizeInBits();
580 if (
Size == 16 ||
Size == 32 || (
Size == 64 && Subtarget->is64Bit())) {
581 Register GPRReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
582 if (!materializeImm(GPRReg,
Imm.getSExtValue(), MIB))
585 unsigned Opcode =
Size == 64 ? RISCV::FMV_D_X
586 :
Size == 32 ? RISCV::FMV_W_X
588 auto FMV = MIB.buildInstr(Opcode, {DstReg}, {GPRReg});
589 if (!FMV.constrainAllUses(
TII,
TRI, RBI))
593 "Unexpected size or subtarget");
595 Register GPRRegHigh =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
596 Register GPRRegLow =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
597 if (!materializeImm(GPRRegHigh,
Imm.extractBits(32, 32).getSExtValue(),
600 if (!materializeImm(GPRRegLow,
Imm.trunc(32).getSExtValue(), MIB))
603 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
608 MI.eraseFromParent();
611 case TargetOpcode::G_GLOBAL_VALUE: {
612 auto *GV =
MI.getOperand(1).getGlobal();
613 if (GV->isThreadLocal()) {
618 return selectAddr(
MI, MIB,
MRI, GV->isDSOLocal(),
619 GV->hasExternalWeakLinkage());
621 case TargetOpcode::G_JUMP_TABLE:
622 case TargetOpcode::G_CONSTANT_POOL:
623 return selectAddr(
MI, MIB,
MRI);
624 case TargetOpcode::G_BRCOND: {
630 .addMBB(
MI.getOperand(1).getMBB());
631 MI.eraseFromParent();
634 case TargetOpcode::G_BRJT: {
638 assert((EntrySize == 4 || (Subtarget->is64Bit() && EntrySize == 8)) &&
639 "Unsupported jump-table entry size");
644 "Unexpected jump-table entry kind");
647 MIB.buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {
MI.getOperand(2)})
649 if (!SLL.constrainAllUses(
TII,
TRI, RBI))
653 auto ADD = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
654 {
MI.getOperand(0), SLL.getReg(0)});
655 if (!
ADD.constrainAllUses(
TII,
TRI, RBI))
658 unsigned LdOpc = EntrySize == 8 ? RISCV::LD : RISCV::LW;
660 MIB.buildInstr(LdOpc, {&RISCV::GPRRegClass}, {
ADD.getReg(0)})
665 if (!Dest.constrainAllUses(
TII,
TRI, RBI))
672 Dest = MIB.buildInstr(RISCV::ADD, {&RISCV::GPRRegClass},
673 {Dest.getReg(0),
MI.getOperand(0)});
674 if (!Dest.constrainAllUses(
TII,
TRI, RBI))
679 MIB.buildInstr(RISCV::PseudoBRIND, {}, {Dest.getReg(0)}).addImm(0);
683 MI.eraseFromParent();
686 case TargetOpcode::G_BRINDIRECT:
687 MI.setDesc(
TII.get(RISCV::PseudoBRIND));
690 case TargetOpcode::G_SEXT_INREG:
691 return selectSExtInreg(
MI, MIB);
692 case TargetOpcode::G_FRAME_INDEX: {
696 MI.setDesc(
TII.get(RISCV::ADDI));
700 case TargetOpcode::G_SELECT:
701 return selectSelect(
MI, MIB,
MRI);
702 case TargetOpcode::G_FCMP:
703 return selectFPCompare(
MI, MIB,
MRI);
704 case TargetOpcode::G_FENCE: {
709 emitFence(FenceOrdering, FenceSSID, MIB);
710 MI.eraseFromParent();
713 case TargetOpcode::G_IMPLICIT_DEF:
714 return selectImplicitDef(
MI, MIB,
MRI);
715 case TargetOpcode::G_MERGE_VALUES:
717 case TargetOpcode::G_UNMERGE_VALUES:
724bool RISCVInstructionSelector::selectMergeValues(
726 assert(
MI.getOpcode() == TargetOpcode::G_MERGE_VALUES);
729 if (
MI.getNumOperands() != 3)
734 if (!isRegInFprb(Dst,
MRI) || !isRegInGprb(
Lo,
MRI) || !isRegInGprb(
Hi,
MRI))
736 MI.setDesc(
TII.get(RISCV::BuildPairF64Pseudo));
740bool RISCVInstructionSelector::selectUnmergeValues(
742 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
745 if (
MI.getNumOperands() != 3)
750 if (!isRegInFprb(Src,
MRI) || !isRegInGprb(
Lo,
MRI) || !isRegInGprb(
Hi,
MRI))
752 MI.setDesc(
TII.get(RISCV::SplitF64Pseudo));
760 assert(
MRI.getType(PtrReg).isPointer() &&
"Operand is not a pointer!");
764 MRI.setRegBank(PtrToInt.getReg(0), RBI.getRegBank(RISCV::GPRBRegBankID));
765 Op.setReg(PtrToInt.getReg(0));
766 return select(*PtrToInt);
772 switch (
MI.getOpcode()) {
773 case TargetOpcode::G_PTR_ADD: {
777 replacePtrWithInt(
MI.getOperand(1), MIB,
MRI);
778 MI.setDesc(
TII.get(TargetOpcode::G_ADD));
779 MRI.setType(DstReg, sXLen);
782 case TargetOpcode::G_PTRMASK: {
785 replacePtrWithInt(
MI.getOperand(1), MIB,
MRI);
786 MI.setDesc(
TII.get(TargetOpcode::G_AND));
787 MRI.setType(DstReg, sXLen);
795 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
796 "Expected G_CONSTANT");
797 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
804 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
805 "Expected G_CONSTANT");
806 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
807 MIB.
addImm(STI.getXLen() - CstVal);
813 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
814 "Expected G_CONSTANT");
815 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
822 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
823 "Expected G_CONSTANT");
824 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
831 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
832 "Expected G_CONSTANT");
833 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
840 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT && OpIdx == -1 &&
841 "Expected G_CONSTANT");
842 uint64_t C =
MI.getOperand(1).getCImm()->getZExtValue();
848 if (RB.
getID() == RISCV::GPRBRegBankID) {
850 return &RISCV::GPRRegClass;
853 if (RB.
getID() == RISCV::FPRBRegBankID) {
855 return &RISCV::FPR16RegClass;
857 return &RISCV::FPR32RegClass;
859 return &RISCV::FPR64RegClass;
862 if (RB.
getID() == RISCV::VRBRegBankID) {
864 return &RISCV::VRRegClass;
867 return &RISCV::VRM2RegClass;
870 return &RISCV::VRM4RegClass;
873 return &RISCV::VRM8RegClass;
879bool RISCVInstructionSelector::isRegInGprb(
Register Reg,
881 return RBI.getRegBank(Reg,
MRI,
TRI)->
getID() == RISCV::GPRBRegBankID;
884bool RISCVInstructionSelector::isRegInFprb(
Register Reg,
886 return RBI.getRegBank(Reg,
MRI,
TRI)->getID() == RISCV::FPRBRegBankID;
897 MRI.getType(DstReg), *RBI.getRegBank(DstReg,
MRI,
TRI));
899 "Register class not available for LLT, register bank combination");
904 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
910 MI.setDesc(
TII.get(RISCV::COPY));
914bool RISCVInstructionSelector::selectImplicitDef(
916 assert(
MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
918 const Register DstReg =
MI.getOperand(0).getReg();
920 MRI.getType(DstReg), *RBI.getRegBank(DstReg,
MRI,
TRI));
923 "Register class not available for LLT, register bank combination");
925 if (!RBI.constrainGenericRegister(DstReg, *DstRC,
MRI)) {
929 MI.setDesc(
TII.get(TargetOpcode::IMPLICIT_DEF));
933bool RISCVInstructionSelector::materializeImm(
Register DstReg, int64_t Imm,
939 RBI.constrainGenericRegister(DstReg, RISCV::GPRRegClass,
MRI);
944 unsigned NumInsts = Seq.
size();
947 for (
unsigned i = 0; i < NumInsts; i++) {
949 ?
MRI.createVirtualRegister(&RISCV::GPRRegClass)
954 switch (
I.getOpndKind()) {
963 {SrcReg, Register(RISCV::X0)});
987 bool IsExternWeak)
const {
988 assert((
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
989 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
990 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
991 "Unexpected opcode");
996 const LLT DefTy =
MRI.getType(DefReg);
1002 if (
TM.isPositionIndependent() || Subtarget->allowTaggedGlobals()) {
1003 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1007 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1029 MI.eraseFromParent();
1033 switch (
TM.getCodeModel()) {
1036 getName(),
"Unsupported code model for lowering",
MI);
1043 Register AddrHiDest =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
1056 MI.eraseFromParent();
1083 MI.eraseFromParent();
1090 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1101 unsigned SrcSize =
MI.getOperand(2).getImm();
1104 if (SrcSize == 32) {
1105 assert(Subtarget->is64Bit() &&
"Unexpected extend");
1107 NewMI = MIB.
buildInstr(RISCV::ADDIW, {DstReg}, {SrcReg}).addImm(0U);
1109 assert(Subtarget->hasStdExtZbb() &&
"Unexpected extension");
1110 assert((SrcSize == 8 || SrcSize == 16) &&
"Unexpected size");
1111 unsigned Opc = SrcSize == 16 ? RISCV::SEXT_H : RISCV::SEXT_B;
1112 NewMI = MIB.
buildInstr(Opc, {DstReg}, {SrcReg});
1118 MI.eraseFromParent();
1125 auto &SelectMI = cast<GSelect>(
MI);
1131 Register DstReg = SelectMI.getReg(0);
1133 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1134 if (RBI.getRegBank(DstReg,
MRI,
TRI)->getID() == RISCV::FPRBRegBankID) {
1135 unsigned Size =
MRI.getType(DstReg).getSizeInBits();
1136 Opc =
Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1137 : RISCV::Select_FPR64_Using_CC_GPR;
1145 .
addReg(SelectMI.getTrueReg())
1146 .
addReg(SelectMI.getFalseReg());
1147 MI.eraseFromParent();
1158 return Size == 16 ? RISCV::FLT_H :
Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1160 return Size == 16 ? RISCV::FLE_H :
Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1162 return Size == 16 ? RISCV::FEQ_H :
Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1175 assert(!isLegalFCmpPredicate(Pred) &&
"Predicate already legal?");
1178 if (isLegalFCmpPredicate(InvPred)) {
1186 if (isLegalFCmpPredicate(InvPred)) {
1191 if (isLegalFCmpPredicate(InvPred)) {
1206 auto &CmpMI = cast<GFCmp>(
MI);
1213 unsigned Size =
MRI.getType(LHS).getSizeInBits();
1218 bool NeedInvert =
false;
1222 TmpReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
1224 if (!
Cmp.constrainAllUses(
TII,
TRI, RBI))
1230 {&RISCV::GPRRegClass}, {
LHS,
RHS});
1231 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1234 {&RISCV::GPRRegClass}, {
RHS,
LHS});
1235 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1238 TmpReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
1240 MIB.
buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1241 if (!
Or.constrainAllUses(
TII,
TRI, RBI))
1248 {&RISCV::GPRRegClass}, {
LHS,
LHS});
1249 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1252 {&RISCV::GPRRegClass}, {
RHS,
RHS});
1253 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1256 TmpReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
1258 MIB.
buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1259 if (!
And.constrainAllUses(
TII,
TRI, RBI))
1266 auto Xor = MIB.
buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1267 if (!
Xor.constrainAllUses(
TII,
TRI, RBI))
1271 MI.eraseFromParent();
1275void RISCVInstructionSelector::emitFence(
AtomicOrdering FenceOrdering,
1278 if (STI.hasStdExtZtso()) {
1281 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1291 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1299 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1305 unsigned Pred, Succ;
1306 switch (FenceOrdering) {
1309 case AtomicOrdering::AcquireRelease:
1313 case AtomicOrdering::Acquire:
1318 case AtomicOrdering::Release:
1323 case AtomicOrdering::SequentiallyConsistent:
1337 return new RISCVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectMergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
unsigned const TargetRegisterInfo * TRI
static StringRef getName(Value *V)
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static void getOperandsForBranch(Register CondReg, MachineRegisterInfo &MRI, RISCVCC::CondCode &CC, Register &LHS, Register &RHS)
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
support::ulittle16_t & Lo
support::ulittle16_t & Hi
APInt bitcastToAPInt() const
Class for arbitrary precision integers.
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This is an important base class in LLVM.
This class represents an Operation in the Expression.
std::optional< SmallVector< std::function< void(MachineInstrBuilder &)>, 4 > > ComplexRendererFns
virtual bool select(MachineInstr &I)=0
Select the (possibly generic) instruction I to only use target-specific opcodes.
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
const MachineJumpTableInfo * getJumpTableInfo() const
getJumpTableInfo - Return the jump table info object for the current function.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineRegisterInfo * getMRI()
Getter for MRI.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
const MachineOperand & getOperand(unsigned i) const
unsigned getEntrySize(const DataLayout &TD) const
getEntrySize - Return the size of each entry in the jump table.
@ EK_LabelDifference32
EK_LabelDifference32 - Each entry is the address of the block minus the address of the jump table.
@ EK_Custom32
EK_Custom32 - Each entry is a 32-bit value that is custom lowered by the TargetLowering::LowerCustomJ...
@ EK_BlockAddress
EK_BlockAddress - Each entry is a plain address of block, e.g.: .word LBB123.
unsigned getEntryAlignment(const DataLayout &TD) const
getEntryAlignment - Return the alignment of each entry in the jump table.
JTEntryKind getEntryKind() const
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
T get() const
Returns the value of the specified pointer type.
T dyn_cast() const
Returns the current pointer if it is of the specified pointer type, otherwise returns null.
This class provides the information for the target register banks.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
unsigned getID() const
Return the register class ID number.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
@ ADD
Simple integer binary arithmetic operators.
operand_type_match m_Reg()
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, bool Imm=false)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
void reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
AtomicOrdering
Atomic ordering for LLVM's memory model.
@ Xor
Bitwise or logical XOR of integers.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
static MachinePointerInfo getJumpTable(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a jump table entry.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.