25#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
57 static constexpr unsigned MaxRecursionDepth = 6;
60 const unsigned Depth = 0)
const;
86 bool IsExternWeak =
false)
const;
94 unsigned &CurOp,
bool IsMasked,
95 bool IsStridedOrIndexed,
96 LLT *IndexVT =
nullptr)
const;
102 unsigned ShiftWidth)
const;
103 ComplexRendererFns selectShiftMaskXLen(
MachineOperand &Root)
const {
104 return selectShiftMask(Root, STI.
getXLen());
106 ComplexRendererFns selectShiftMask32(
MachineOperand &Root)
const {
107 return selectShiftMask(Root, 32);
111 ComplexRendererFns selectSExtBits(
MachineOperand &Root,
unsigned Bits)
const;
112 template <
unsigned Bits>
114 return selectSExtBits(Root, Bits);
117 ComplexRendererFns selectZExtBits(
MachineOperand &Root,
unsigned Bits)
const;
118 template <
unsigned Bits>
120 return selectZExtBits(Root, Bits);
123 ComplexRendererFns selectSHXADDOp(
MachineOperand &Root,
unsigned ShAmt)
const;
124 template <
unsigned ShAmt>
126 return selectSHXADDOp(Root, ShAmt);
130 unsigned ShAmt)
const;
131 template <
unsigned ShAmt>
132 ComplexRendererFns selectSHXADD_UWOp(
MachineOperand &Root)
const {
133 return selectSHXADD_UWOp(Root, ShAmt);
173#define GET_GLOBALISEL_PREDICATES_DECL
174#include "RISCVGenGlobalISel.inc"
175#undef GET_GLOBALISEL_PREDICATES_DECL
177#define GET_GLOBALISEL_TEMPORARIES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_TEMPORARIES_DECL
184#define GET_GLOBALISEL_IMPL
185#include "RISCVGenGlobalISel.inc"
186#undef GET_GLOBALISEL_IMPL
188RISCVInstructionSelector::RISCVInstructionSelector(
191 : STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()), RBI(RBI),
195#include
"RISCVGenGlobalISel.inc"
198#include
"RISCVGenGlobalISel.inc"
204bool RISCVInstructionSelector::hasAllNBitUsers(
const MachineInstr &
MI,
206 const unsigned Depth)
const {
208 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
209 MI.getOpcode() == TargetOpcode::G_SUB ||
210 MI.getOpcode() == TargetOpcode::G_MUL ||
211 MI.getOpcode() == TargetOpcode::G_SHL ||
212 MI.getOpcode() == TargetOpcode::G_LSHR ||
213 MI.getOpcode() == TargetOpcode::G_AND ||
214 MI.getOpcode() == TargetOpcode::G_OR ||
215 MI.getOpcode() == TargetOpcode::G_XOR ||
216 MI.getOpcode() == TargetOpcode::G_SEXT_INREG ||
Depth != 0) &&
217 "Unexpected opcode");
219 if (
Depth >= RISCVInstructionSelector::MaxRecursionDepth)
222 auto DestReg =
MI.getOperand(0).getReg();
224 assert(UserOp.getParent() &&
"UserOp must have a parent");
225 const MachineInstr &UserMI = *UserOp.getParent();
234 case RISCV::FCVT_D_W:
235 case RISCV::FCVT_S_W:
278InstructionSelector::ComplexRendererFns
279RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
280 unsigned ShiftWidth)
const {
284 using namespace llvm::MIPatternMatch;
290 ShAmtReg = ZExtSrcReg;
309 APInt ShMask(AndMask.
getBitWidth(), ShiftWidth - 1);
310 if (ShMask.isSubsetOf(AndMask)) {
311 ShAmtReg = AndSrcReg;
315 KnownBits Known = VT->getKnownBits(AndSrcReg);
316 if (ShMask.isSubsetOf(AndMask | Known.
Zero))
317 ShAmtReg = AndSrcReg;
324 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0)
329 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0) {
333 unsigned NegOpc = Subtarget->
is64Bit() ? RISCV::SUBW : RISCV::SUB;
334 return {{[=](MachineInstrBuilder &MIB) {
335 MachineIRBuilder(*MIB.getInstr())
336 .buildInstr(NegOpc, {ShAmtReg}, {
Register(RISCV::X0),
Reg});
337 MIB.addReg(ShAmtReg);
340 if (
Imm.urem(ShiftWidth) == ShiftWidth - 1) {
344 return {{[=](MachineInstrBuilder &MIB) {
345 MachineIRBuilder(*MIB.getInstr())
346 .buildInstr(RISCV::XORI, {ShAmtReg}, {
Reg})
348 MIB.addReg(ShAmtReg);
353 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
356InstructionSelector::ComplexRendererFns
357RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
358 unsigned Bits)
const {
362 MachineInstr *RootDef = MRI->
getVRegDef(RootReg);
364 if (RootDef->
getOpcode() == TargetOpcode::G_SEXT_INREG &&
367 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); }}};
371 if ((
Size - VT->computeNumSignBits(RootReg)) < Bits)
372 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
377InstructionSelector::ComplexRendererFns
378RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
379 unsigned Bits)
const {
387 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
392 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
396 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
401InstructionSelector::ComplexRendererFns
402RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
403 unsigned ShAmt)
const {
404 using namespace llvm::MIPatternMatch;
410 const unsigned XLen = STI.
getXLen();
429 if (
Mask.isShiftedMask()) {
430 unsigned Leading = XLen -
Mask.getActiveBits();
431 unsigned Trailing =
Mask.countr_zero();
434 if (*LeftShift && Leading == 0 && C2.
ult(Trailing) && Trailing == ShAmt) {
436 return {{[=](MachineInstrBuilder &MIB) {
437 MachineIRBuilder(*MIB.getInstr())
438 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
446 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
448 return {{[=](MachineInstrBuilder &MIB) {
449 MachineIRBuilder(*MIB.getInstr())
450 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
451 .addImm(Leading + Trailing);
472 unsigned Leading = XLen -
Mask.getActiveBits();
473 unsigned Trailing =
Mask.countr_zero();
487 return {{[=](MachineInstrBuilder &MIB) {
488 MachineIRBuilder(*MIB.getInstr())
489 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
499InstructionSelector::ComplexRendererFns
500RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
501 unsigned ShAmt)
const {
502 using namespace llvm::MIPatternMatch;
519 if (
Mask.isShiftedMask()) {
520 unsigned Leading =
Mask.countl_zero();
521 unsigned Trailing =
Mask.countr_zero();
522 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
524 return {{[=](MachineInstrBuilder &MIB) {
525 MachineIRBuilder(*MIB.getInstr())
526 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
537InstructionSelector::ComplexRendererFns
538RISCVInstructionSelector::renderVLOp(MachineOperand &Root)
const {
539 assert(Root.
isReg() &&
"Expected operand to be a Register");
542 if (RootDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
544 if (
C->getValue().isAllOnes())
548 return {{[=](MachineInstrBuilder &MIB) {
553 uint64_t ZExtC =
C->getZExtValue();
554 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
557 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); }}};
560InstructionSelector::ComplexRendererFns
561RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root)
const {
566 if (RootDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
568 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); },
569 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
573 if (isBaseWithConstantOffset(Root, *MRI)) {
581 if (LHSDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
583 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->
getOperand(1)); },
584 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
587 return {{[=](MachineInstrBuilder &MIB) { MIB.add(
LHS); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
594 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); },
595 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
604 case CmpInst::Predicate::ICMP_EQ:
606 case CmpInst::Predicate::ICMP_NE:
608 case CmpInst::Predicate::ICMP_ULT:
610 case CmpInst::Predicate::ICMP_SLT:
612 case CmpInst::Predicate::ICMP_UGE:
614 case CmpInst::Predicate::ICMP_SGE:
680 CC = getRISCVCCFromICmp(Pred);
687 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
692 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
694 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
696 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
698 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
706 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
710 return IsStore ? RISCV::SB : RISCV::LBU;
712 return IsStore ? RISCV::SH : RISCV::LH;
714 return IsStore ? RISCV::SW : RISCV::LW;
716 return IsStore ? RISCV::SD : RISCV::LD;
722void RISCVInstructionSelector::addVectorLoadStoreOperands(
723 MachineInstr &
I, SmallVectorImpl<Register> &SrcOps,
unsigned &CurOp,
724 bool IsMasked,
bool IsStridedOrIndexed, LLT *IndexVT)
const {
726 auto PtrReg =
I.getOperand(CurOp++).getReg();
730 if (IsStridedOrIndexed) {
731 auto StrideReg =
I.getOperand(CurOp++).getReg();
734 *IndexVT = MRI->
getType(StrideReg);
739 auto MaskReg =
I.getOperand(CurOp++).getReg();
744bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
745 MachineInstr &
I)
const {
752 case Intrinsic::riscv_vlm:
753 case Intrinsic::riscv_vle:
754 case Intrinsic::riscv_vle_mask:
755 case Intrinsic::riscv_vlse:
756 case Intrinsic::riscv_vlse_mask: {
757 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
758 IntrinID == Intrinsic::riscv_vlse_mask;
759 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
760 IntrinID == Intrinsic::riscv_vlse_mask;
761 LLT VT = MRI->
getType(
I.getOperand(0).getReg());
765 const Register DstReg =
I.getOperand(0).getReg();
768 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
773 if (HasPassthruOperand) {
774 auto PassthruReg =
I.getOperand(CurOp++).getReg();
780 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
783 const RISCV::VLEPseudo *
P =
784 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
785 static_cast<unsigned>(LMUL));
787 MachineInstrBuilder PseudoMI =
788 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(
P->Pseudo), DstReg);
793 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
794 for (
auto &RenderFn : *VLOpFn)
803 Policy =
I.getOperand(CurOp++).getImm();
813 case Intrinsic::riscv_vloxei:
814 case Intrinsic::riscv_vloxei_mask:
815 case Intrinsic::riscv_vluxei:
816 case Intrinsic::riscv_vluxei_mask: {
817 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
818 IntrinID == Intrinsic::riscv_vluxei_mask;
819 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
820 IntrinID == Intrinsic::riscv_vloxei_mask;
821 LLT VT = MRI->
getType(
I.getOperand(0).getReg());
825 const Register DstReg =
I.getOperand(0).getReg();
828 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
833 if (HasPassthruOperand) {
834 auto PassthruReg =
I.getOperand(CurOp++).getReg();
841 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
847 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
849 "values when XLEN=32");
851 const RISCV::VLX_VSXPseudo *
P = RISCV::getVLXPseudo(
852 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
853 static_cast<unsigned>(IndexLMUL));
855 MachineInstrBuilder PseudoMI =
856 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(
P->Pseudo), DstReg);
861 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
862 for (
auto &RenderFn : *VLOpFn)
871 Policy =
I.getOperand(CurOp++).getImm();
881 case Intrinsic::riscv_vsm:
882 case Intrinsic::riscv_vse:
883 case Intrinsic::riscv_vse_mask:
884 case Intrinsic::riscv_vsse:
885 case Intrinsic::riscv_vsse_mask: {
886 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
887 IntrinID == Intrinsic::riscv_vsse_mask;
888 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
889 IntrinID == Intrinsic::riscv_vsse_mask;
890 LLT VT = MRI->
getType(
I.getOperand(1).getReg());
898 auto PassthruReg =
I.getOperand(CurOp++).getReg();
901 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
904 const RISCV::VSEPseudo *
P = RISCV::getVSEPseudo(
905 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
907 MachineInstrBuilder PseudoMI =
913 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
914 for (
auto &RenderFn : *VLOpFn)
927 case Intrinsic::riscv_vsoxei:
928 case Intrinsic::riscv_vsoxei_mask:
929 case Intrinsic::riscv_vsuxei:
930 case Intrinsic::riscv_vsuxei_mask: {
931 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
932 IntrinID == Intrinsic::riscv_vsuxei_mask;
933 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
934 IntrinID == Intrinsic::riscv_vsoxei_mask;
935 LLT VT = MRI->
getType(
I.getOperand(1).getReg());
943 auto PassthruReg =
I.getOperand(CurOp++).getReg();
947 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
953 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
955 "values when XLEN=32");
957 const RISCV::VLX_VSXPseudo *
P = RISCV::getVSXPseudo(
958 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
959 static_cast<unsigned>(IndexLMUL));
961 MachineInstrBuilder PseudoMI =
967 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
968 for (
auto &RenderFn : *VLOpFn)
984bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &
I)
const {
991 case Intrinsic::riscv_vsetvli:
992 case Intrinsic::riscv_vsetvlimax: {
994 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
996 unsigned Offset = VLMax ? 2 : 3;
1004 Register DstReg =
I.getOperand(0).getReg();
1007 unsigned Opcode = RISCV::PseudoVSETVLI;
1011 Register AVLReg =
I.getOperand(2).getReg();
1013 uint64_t AVL = AVLConst->Value.getZExtValue();
1020 MachineInstr *AVLDef = MRI->
getVRegDef(AVLReg);
1021 if (AVLDef && AVLDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
1023 if (
C->getValue().isAllOnes())
1030 Opcode = RISCV::PseudoVSETVLIX0;
1032 Register AVLReg =
I.getOperand(2).getReg();
1037 uint64_t AVL = AVLConst->Value.getZExtValue();
1039 MachineInstr *PseudoMI =
1041 TII.get(RISCV::PseudoVSETIVLI), DstReg)
1044 I.eraseFromParent();
1051 MachineInstr *PseudoMI =
1052 BuildMI(*
I.getParent(),
I,
I.getDebugLoc(),
TII.get(Opcode), DstReg)
1055 I.eraseFromParent();
1062bool RISCVInstructionSelector::selectExtractSubvector(MachineInstr &
MI)
const {
1063 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1068 LLT DstTy = MRI->
getType(DstReg);
1069 LLT SrcTy = MRI->
getType(SrcReg);
1071 unsigned Idx =
static_cast<unsigned>(
MI.getOperand(2).
getImm());
1077 std::tie(SubRegIdx, Idx) =
1079 SrcMVT, DstMVT, Idx, &
TRI);
1085 const TargetRegisterClass *DstRC =
TRI.getRegClass(DstRegClassID);
1090 const TargetRegisterClass *SrcRC =
TRI.getRegClass(SrcRegClassID);
1094 BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1096 .
addReg(SrcReg, {}, SubRegIdx);
1098 MI.eraseFromParent();
1102bool RISCVInstructionSelector::selectInsertSubVector(MachineInstr &
MI)
const {
1103 assert(
MI.getOpcode() == TargetOpcode::G_INSERT_SUBVECTOR);
1107 Register SubVecReg =
MI.getOperand(2).getReg();
1109 LLT VecTy = MRI->
getType(VecReg);
1110 LLT SubVecTy = MRI->
getType(SubVecReg);
1115 unsigned Idx =
static_cast<unsigned>(
MI.getOperand(3).
getImm());
1118 std::tie(SubRegIdx, Idx) =
1120 VecMVT, SubVecMVT, Idx, &
TRI);
1130 const TargetRegisterClass *DstRC =
TRI.getRegClass(DstRegClassID);
1136 if (SubRegIdx == RISCV::NoSubRegister) {
1139 "Unexpected subvector insert");
1140 BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
TII.get(TargetOpcode::COPY),
1143 MI.eraseFromParent();
1149 MachineInstr *Ins =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1150 TII.get(TargetOpcode::INSERT_SUBREG), DstReg)
1155 MI.eraseFromParent();
1160bool RISCVInstructionSelector::select(MachineInstr &
MI) {
1162 const unsigned Opc =
MI.getOpcode();
1164 if (!
MI.isPreISelOpcode() ||
Opc == TargetOpcode::G_PHI) {
1165 if (
Opc == TargetOpcode::PHI ||
Opc == TargetOpcode::G_PHI) {
1166 const Register DefReg =
MI.getOperand(0).getReg();
1167 const LLT DefTy = MRI->
getType(DefReg);
1172 const TargetRegisterClass *DefRC =
1181 DefRC =
TRI.getRegClassForTypeOnBank(DefTy, RB, STI.
is64Bit());
1188 MI.setDesc(
TII.get(TargetOpcode::PHI));
1199 if (selectImpl(
MI, *CoverageInfo))
1203 case TargetOpcode::G_ANYEXT:
1204 case TargetOpcode::G_PTRTOINT:
1205 case TargetOpcode::G_INTTOPTR:
1206 case TargetOpcode::G_TRUNC:
1207 case TargetOpcode::G_FREEZE:
1209 case TargetOpcode::G_CONSTANT: {
1211 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1213 if (!materializeImm(DstReg, Imm,
MI))
1216 MI.eraseFromParent();
1219 case TargetOpcode::G_ZEXT:
1220 case TargetOpcode::G_SEXT: {
1221 bool IsSigned =
Opc != TargetOpcode::G_ZEXT;
1224 LLT SrcTy = MRI->
getType(SrcReg);
1231 RISCV::GPRBRegBankID &&
1232 "Unexpected ext regbank");
1235 if (IsSigned && SrcSize == 32) {
1236 MI.setDesc(
TII.get(RISCV::ADDIW));
1243 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1244 MI.setDesc(
TII.get(RISCV::ADD_UW));
1251 if (SrcSize == 16 &&
1252 (STI.hasStdExtZbb() || (!IsSigned && STI.hasStdExtZbkb()))) {
1253 MI.setDesc(
TII.get(IsSigned ? RISCV::SEXT_H
1254 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1255 : RISCV::ZEXT_H_RV32));
1262 MachineInstr *ShiftLeft =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1263 TII.get(RISCV::SLLI), ShiftLeftReg)
1267 MachineInstr *ShiftRight =
1269 TII.get(IsSigned ? RISCV::SRAI : RISCV::SRLI), DstReg)
1273 MI.eraseFromParent();
1276 case TargetOpcode::G_FCONSTANT: {
1279 const APFloat &FPimm =
MI.getOperand(1).getFPImm()->getValueAPF();
1288 if (!materializeImm(GPRReg,
Imm.getSExtValue(),
MI))
1292 unsigned Opcode =
Size == 64 ? RISCV::FMV_D_X
1293 :
Size == 32 ? RISCV::FMV_W_X
1295 MachineInstr *FMV =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1296 TII.get(Opcode), DstReg)
1302 "Unexpected size or subtarget");
1306 MachineInstr *FCVT =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1307 TII.get(RISCV::FCVT_D_W), DstReg)
1312 MI.eraseFromParent();
1320 if (!materializeImm(GPRRegHigh,
Imm.extractBits(32, 32).getSExtValue(),
1323 if (!materializeImm(GPRRegLow,
Imm.trunc(32).getSExtValue(),
MI))
1325 MachineInstr *PairF64 =
1327 TII.get(RISCV::BuildPairF64Pseudo), DstReg)
1333 MI.eraseFromParent();
1336 case TargetOpcode::G_GLOBAL_VALUE: {
1337 auto *GV =
MI.getOperand(1).getGlobal();
1338 if (GV->isThreadLocal()) {
1343 return selectAddr(
MI, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1345 case TargetOpcode::G_JUMP_TABLE:
1346 case TargetOpcode::G_CONSTANT_POOL:
1347 return selectAddr(
MI);
1348 case TargetOpcode::G_BRCOND: {
1353 MachineInstr *Bcc =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1357 .
addMBB(
MI.getOperand(1).getMBB());
1358 MI.eraseFromParent();
1362 case TargetOpcode::G_BRINDIRECT:
1363 MI.setDesc(
TII.get(RISCV::PseudoBRIND));
1367 case TargetOpcode::G_SELECT:
1368 return selectSelect(
MI);
1369 case TargetOpcode::G_FCMP:
1370 return selectFPCompare(
MI);
1371 case TargetOpcode::G_FENCE: {
1376 emitFence(FenceOrdering, FenceSSID,
MI);
1377 MI.eraseFromParent();
1380 case TargetOpcode::G_IMPLICIT_DEF:
1381 return selectImplicitDef(
MI);
1382 case TargetOpcode::G_UNMERGE_VALUES:
1384 case TargetOpcode::G_LOAD:
1385 case TargetOpcode::G_STORE: {
1389 LLT PtrTy = MRI->
getType(PtrReg);
1391 const RegisterBank &RB = *RBI.
getRegBank(ValReg, *MRI,
TRI);
1392 if (RB.
getID() != RISCV::GPRBRegBankID)
1396 const RegisterBank &PtrRB = *RBI.
getRegBank(PtrReg, *MRI,
TRI);
1399 "Load/Store pointer operand isn't a GPR");
1400 assert(PtrTy.
isPointer() &&
"Load/Store pointer operand isn't a pointer");
1417 if (NewOpc ==
MI.getOpcode())
1421 auto AddrModeFns = selectAddrRegImm(
MI.getOperand(1));
1426 MachineInstrBuilder NewInst =
1434 for (
auto &Fn : *AddrModeFns)
1436 MI.eraseFromParent();
1441 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1442 return selectIntrinsicWithSideEffects(
MI);
1443 case TargetOpcode::G_INTRINSIC:
1444 return selectIntrinsic(
MI);
1445 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1446 return selectExtractSubvector(
MI);
1447 case TargetOpcode::G_INSERT_SUBVECTOR:
1448 return selectInsertSubVector(
MI);
1454bool RISCVInstructionSelector::selectUnmergeValues(MachineInstr &
MI)
const {
1455 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1457 if (!Subtarget->hasStdExtZfa())
1461 if (
MI.getNumOperands() != 3)
1466 if (!isRegInFprb(Src) || !isRegInGprb(
Lo) || !isRegInGprb(
Hi))
1469 MachineInstr *ExtractLo =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1470 TII.get(RISCV::FMV_X_W_FPR64),
Lo)
1474 MachineInstr *ExtractHi =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1475 TII.get(RISCV::FMVH_X_D),
Hi)
1479 MI.eraseFromParent();
1483bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &
Op) {
1488 MachineInstr &ParentMI = *
Op.getParent();
1491 MachineInstr *PtrToInt =
1493 TII.get(TargetOpcode::G_PTRTOINT), IntReg)
1496 return select(*PtrToInt);
1499void RISCVInstructionSelector::preISelLower(MachineInstr &
MI) {
1500 switch (
MI.getOpcode()) {
1501 case TargetOpcode::G_PTR_ADD: {
1505 replacePtrWithInt(
MI.getOperand(1));
1506 MI.setDesc(
TII.get(TargetOpcode::G_ADD));
1510 case TargetOpcode::G_PTRMASK: {
1513 replacePtrWithInt(
MI.getOperand(1));
1514 MI.setDesc(
TII.get(TargetOpcode::G_AND));
1521void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1522 const MachineInstr &
MI,
1524 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1525 "Expected G_CONSTANT");
1526 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1530void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1531 const MachineInstr &
MI,
1533 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1534 "Expected G_CONSTANT");
1535 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1539void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1540 const MachineInstr &
MI,
1542 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1543 "Expected G_CONSTANT");
1544 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1548void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1549 const MachineInstr &
MI,
1551 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1552 "Expected G_CONSTANT");
1553 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1557void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1558 const MachineInstr &
MI,
1560 assert(
MI.getOpcode() == TargetOpcode::G_FRAME_INDEX &&
OpIdx == -1 &&
1561 "Expected G_FRAME_INDEX");
1562 MIB.
add(
MI.getOperand(1));
1565void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1566 const MachineInstr &
MI,
1568 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1569 "Expected G_CONSTANT");
1570 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1574void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1575 MachineInstrBuilder &MIB,
const MachineInstr &
MI,
int OpIdx)
const {
1576 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1577 "Expected G_CONSTANT");
1578 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1582void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1583 const MachineInstr &
MI,
1585 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1586 "Expected G_CONSTANT");
1587 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1588 int64_t Adj =
Imm < 0 ? -2048 : 2047;
1592void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1593 const MachineInstr &
MI,
1595 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1596 "Expected G_CONSTANT");
1597 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1601bool RISCVInstructionSelector::isRegInGprb(
Register Reg)
const {
1605bool RISCVInstructionSelector::isRegInFprb(
Register Reg)
const {
1609bool RISCVInstructionSelector::selectCopy(MachineInstr &
MI)
const {
1610 MachineOperand Dst =
MI.getOperand(0);
1616 const TargetRegisterClass *DstRC =
1617 TRI.getConstrainedRegClassForOperand(Dst, *MRI);
1620 "Register class not available for LLT, register bank combination");
1631 MI.setDesc(
TII.get(RISCV::COPY));
1635bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &
MI)
const {
1636 assert(
MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1638 const Register DstReg =
MI.getOperand(0).getReg();
1639 const TargetRegisterClass *DstRC =
TRI.getRegClassForTypeOnBank(
1643 "Register class not available for LLT, register bank combination");
1649 MI.setDesc(
TII.get(TargetOpcode::IMPLICIT_DEF));
1653bool RISCVInstructionSelector::materializeImm(
Register DstReg, int64_t Imm,
1654 MachineInstr &
MI)
const {
1655 MachineBasicBlock &
MBB = *
MI.getParent();
1665 unsigned NumInsts = Seq.
size();
1668 for (
unsigned i = 0; i < NumInsts; i++) {
1672 const RISCVMatInt::Inst &
I = Seq[i];
1675 switch (
I.getOpndKind()) {
1707bool RISCVInstructionSelector::selectAddr(MachineInstr &
MI,
bool IsLocal,
1708 bool IsExternWeak)
const {
1709 assert((
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1710 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1711 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1712 "Unexpected opcode");
1714 const MachineOperand &DispMO =
MI.getOperand(1);
1717 const LLT DefTy = MRI->
getType(DefReg);
1724 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1728 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1737 MachineFunction &MF = *
MI.getParent()->getParent();
1745 TII.get(RISCV::PseudoLGA), DefReg)
1751 MI.eraseFromParent();
1758 "Unsupported code model for lowering",
MI);
1766 MachineInstr *AddrHi =
BuildMI(*
MI.getParent(),
MI,
MI.getDebugLoc(),
1767 TII.get(RISCV::LUI), AddrHiDest)
1773 TII.get(RISCV::ADDI), DefReg)
1779 MI.eraseFromParent();
1792 MachineFunction &MF = *
MI.getParent()->getParent();
1800 TII.get(RISCV::PseudoLGA), DefReg)
1806 MI.eraseFromParent();
1813 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1821bool RISCVInstructionSelector::selectSelect(MachineInstr &
MI)
const {
1828 Register DstReg = SelectMI.getReg(0);
1830 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1833 Opc =
Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1834 : RISCV::Select_FPR64_Using_CC_GPR;
1843 .
addReg(SelectMI.getTrueReg())
1844 .
addReg(SelectMI.getFalseReg());
1845 MI.eraseFromParent();
1857 return Size == 16 ? RISCV::FLT_H :
Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1859 return Size == 16 ? RISCV::FLE_H :
Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1861 return Size == 16 ? RISCV::FEQ_H :
Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1874 assert(!isLegalFCmpPredicate(Pred) &&
"Predicate already legal?");
1877 if (isLegalFCmpPredicate(InvPred)) {
1885 if (isLegalFCmpPredicate(InvPred)) {
1890 if (isLegalFCmpPredicate(InvPred)) {
1902bool RISCVInstructionSelector::selectFPCompare(MachineInstr &
MI)
const {
1915 bool NeedInvert =
false;
1929 MachineInstr *Cmp1 =
1936 MachineInstr *Cmp2 =
1945 TII.get(RISCV::OR), TmpReg)
1964 MachineInstr *Cmp1 =
1971 MachineInstr *Cmp2 =
1978 TII.get(RISCV::AND), TmpReg)
1989 TII.get(RISCV::XORI), DstReg)
1995 MI.eraseFromParent();
1999void RISCVInstructionSelector::emitFence(
AtomicOrdering FenceOrdering,
2001 MachineInstr &
MI)
const {
2002 MachineBasicBlock &
MBB = *
MI.getParent();
2005 if (STI.hasStdExtZtso()) {
2008 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
2032 unsigned Pred, Succ;
2033 switch (FenceOrdering) {
2036 case AtomicOrdering::AcquireRelease:
2040 case AtomicOrdering::Acquire:
2045 case AtomicOrdering::Release:
2050 case AtomicOrdering::SequentiallyConsistent:
2060InstructionSelector *
2064 return new RISCVInstructionSelector(TM, Subtarget, RBI);
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
APInt bitcastToAPInt() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This is an important base class in LLVM.
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addDisp(const MachineOperand &Disp, int64_t off, unsigned char TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
iterator_range< use_nodbg_iterator > use_nodbg_operands(Register Reg) const
const RegClassOrRegBank & getRegClassOrRegBank(Register Reg) const
Return the register bank or register class of Reg.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
LLT getType(Register Reg) const
Get the low-level type of Reg or LLT{} if Reg is not a generic (target independent) virtual register.
LLVM_ABI void setRegBank(Register Reg, const RegisterBank &RegBank)
Set the register bank to RegBank for Reg.
LLVM_ABI void setType(Register VReg, LLT Ty)
Set the low-level type of VReg to Ty.
LLVM_ABI Register createGenericVirtualRegister(LLT Ty, StringRef Name="")
Create and return a new generic virtual register with low-level type Ty.
Analysis providing profile information.
This class provides the information for the target register banks.
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.