25#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
60 static constexpr unsigned MaxRecursionDepth = 6;
63 const unsigned Depth = 0)
const;
89 bool IsExternWeak =
false)
const;
97 unsigned &CurOp,
bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT =
nullptr)
const;
106 unsigned ShiftWidth)
const;
107 ComplexRendererFns selectShiftMaskXLen(
MachineOperand &Root)
const {
108 return selectShiftMask(Root, STI.
getXLen());
110 ComplexRendererFns selectShiftMask32(
MachineOperand &Root)
const {
111 return selectShiftMask(Root, 32);
115 ComplexRendererFns selectSExtBits(
MachineOperand &Root,
unsigned Bits)
const;
116 template <
unsigned Bits>
118 return selectSExtBits(Root, Bits);
121 ComplexRendererFns selectZExtBits(
MachineOperand &Root,
unsigned Bits)
const;
122 template <
unsigned Bits>
124 return selectZExtBits(Root, Bits);
127 ComplexRendererFns selectSHXADDOp(
MachineOperand &Root,
unsigned ShAmt)
const;
128 template <
unsigned ShAmt>
130 return selectSHXADDOp(Root, ShAmt);
134 unsigned ShAmt)
const;
135 template <
unsigned ShAmt>
136 ComplexRendererFns selectSHXADD_UWOp(
MachineOperand &Root)
const {
137 return selectSHXADD_UWOp(Root, ShAmt);
177#define GET_GLOBALISEL_PREDICATES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_PREDICATES_DECL
181#define GET_GLOBALISEL_TEMPORARIES_DECL
182#include "RISCVGenGlobalISel.inc"
183#undef GET_GLOBALISEL_TEMPORARIES_DECL
188#define GET_GLOBALISEL_IMPL
189#include "RISCVGenGlobalISel.inc"
190#undef GET_GLOBALISEL_IMPL
192RISCVInstructionSelector::RISCVInstructionSelector(
195 : STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()), RBI(RBI),
199#include
"RISCVGenGlobalISel.inc"
202#include
"RISCVGenGlobalISel.inc"
208bool RISCVInstructionSelector::hasAllNBitUsers(
const MachineInstr &
MI,
210 const unsigned Depth)
const {
212 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
213 MI.getOpcode() == TargetOpcode::G_SUB ||
214 MI.getOpcode() == TargetOpcode::G_MUL ||
215 MI.getOpcode() == TargetOpcode::G_SHL ||
216 MI.getOpcode() == TargetOpcode::G_LSHR ||
217 MI.getOpcode() == TargetOpcode::G_AND ||
218 MI.getOpcode() == TargetOpcode::G_OR ||
219 MI.getOpcode() == TargetOpcode::G_XOR ||
220 MI.getOpcode() == TargetOpcode::G_SEXT_INREG ||
Depth != 0) &&
221 "Unexpected opcode");
223 if (
Depth >= RISCVInstructionSelector::MaxRecursionDepth)
226 auto DestReg =
MI.getOperand(0).getReg();
227 for (
auto &UserOp :
MRI->use_nodbg_operands(DestReg)) {
228 assert(UserOp.getParent() &&
"UserOp must have a parent");
229 const MachineInstr &UserMI = *UserOp.getParent();
238 case RISCV::FCVT_D_W:
239 case RISCV::FCVT_S_W:
282InstructionSelector::ComplexRendererFns
283RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
284 unsigned ShiftWidth)
const {
288 using namespace llvm::MIPatternMatch;
294 ShAmtReg = ZExtSrcReg;
313 APInt ShMask(AndMask.
getBitWidth(), ShiftWidth - 1);
314 if (ShMask.isSubsetOf(AndMask)) {
315 ShAmtReg = AndSrcReg;
319 KnownBits Known = VT->getKnownBits(AndSrcReg);
320 if (ShMask.isSubsetOf(AndMask | Known.
Zero))
321 ShAmtReg = AndSrcReg;
328 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0)
333 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0) {
336 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
337 unsigned NegOpc = Subtarget->
is64Bit() ? RISCV::SUBW : RISCV::SUB;
338 return {{[=](MachineInstrBuilder &MIB) {
339 MachineIRBuilder(*MIB.getInstr())
340 .buildInstr(NegOpc, {ShAmtReg}, {
Register(RISCV::X0),
Reg});
341 MIB.addReg(ShAmtReg);
344 if (
Imm.urem(ShiftWidth) == ShiftWidth - 1) {
347 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
348 return {{[=](MachineInstrBuilder &MIB) {
349 MachineIRBuilder(*MIB.getInstr())
350 .buildInstr(RISCV::XORI, {ShAmtReg}, {
Reg})
352 MIB.addReg(ShAmtReg);
357 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
360InstructionSelector::ComplexRendererFns
361RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
362 unsigned Bits)
const {
366 MachineInstr *RootDef =
MRI->getVRegDef(RootReg);
368 if (RootDef->
getOpcode() == TargetOpcode::G_SEXT_INREG &&
371 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); }}};
374 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
375 if ((
Size - VT->computeNumSignBits(RootReg)) < Bits)
376 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
381InstructionSelector::ComplexRendererFns
382RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
383 unsigned Bits)
const {
391 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
395 MRI->getType(RegX).getScalarSizeInBits() == Bits)
396 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
398 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
400 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
405InstructionSelector::ComplexRendererFns
406RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
407 unsigned ShAmt)
const {
408 using namespace llvm::MIPatternMatch;
414 const unsigned XLen = STI.
getXLen();
433 if (
Mask.isShiftedMask()) {
434 unsigned Leading = XLen -
Mask.getActiveBits();
435 unsigned Trailing =
Mask.countr_zero();
438 if (*LeftShift && Leading == 0 && C2.
ult(Trailing) && Trailing == ShAmt) {
439 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
440 return {{[=](MachineInstrBuilder &MIB) {
441 MachineIRBuilder(*MIB.getInstr())
442 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
450 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
451 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
452 return {{[=](MachineInstrBuilder &MIB) {
453 MachineIRBuilder(*MIB.getInstr())
454 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
455 .addImm(Leading + Trailing);
476 unsigned Leading = XLen -
Mask.getActiveBits();
477 unsigned Trailing =
Mask.countr_zero();
490 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
491 return {{[=](MachineInstrBuilder &MIB) {
492 MachineIRBuilder(*MIB.getInstr())
493 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
503InstructionSelector::ComplexRendererFns
504RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
505 unsigned ShAmt)
const {
506 using namespace llvm::MIPatternMatch;
523 if (
Mask.isShiftedMask()) {
524 unsigned Leading =
Mask.countl_zero();
525 unsigned Trailing =
Mask.countr_zero();
526 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
527 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
528 return {{[=](MachineInstrBuilder &MIB) {
529 MachineIRBuilder(*MIB.getInstr())
530 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
541InstructionSelector::ComplexRendererFns
542RISCVInstructionSelector::renderVLOp(MachineOperand &Root)
const {
543 assert(Root.
isReg() &&
"Expected operand to be a Register");
544 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
546 if (RootDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
548 if (
C->getValue().isAllOnes())
552 return {{[=](MachineInstrBuilder &MIB) {
557 uint64_t ZExtC =
C->getZExtValue();
558 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
561 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); }}};
564InstructionSelector::ComplexRendererFns
565RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root)
const {
569 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
570 if (RootDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
572 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); },
573 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
577 if (isBaseWithConstantOffset(Root, *
MRI)) {
580 MachineInstr *LHSDef =
MRI->getVRegDef(
LHS.getReg());
581 MachineInstr *RHSDef =
MRI->getVRegDef(
RHS.getReg());
585 if (LHSDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
587 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->
getOperand(1)); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
591 return {{[=](MachineInstrBuilder &MIB) { MIB.add(
LHS); },
592 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
598 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); },
599 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
608 case CmpInst::Predicate::ICMP_EQ:
610 case CmpInst::Predicate::ICMP_NE:
612 case CmpInst::Predicate::ICMP_ULT:
614 case CmpInst::Predicate::ICMP_SLT:
616 case CmpInst::Predicate::ICMP_UGE:
618 case CmpInst::Predicate::ICMP_SGE:
684 CC = getRISCVCCFromICmp(Pred);
691 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
696 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
698 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
700 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
702 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
710 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
714 return IsStore ? RISCV::SB : RISCV::LBU;
716 return IsStore ? RISCV::SH : RISCV::LH;
718 return IsStore ? RISCV::SW : RISCV::LW;
720 return IsStore ? RISCV::SD : RISCV::LD;
726void RISCVInstructionSelector::addVectorLoadStoreOperands(
727 MachineInstr &
I, SmallVectorImpl<SrcOp> &SrcOps,
unsigned &CurOp,
728 bool IsMasked,
bool IsStridedOrIndexed, LLT *IndexVT)
const {
730 auto PtrReg =
I.getOperand(CurOp++).getReg();
734 if (IsStridedOrIndexed) {
735 auto StrideReg =
I.getOperand(CurOp++).getReg();
738 *IndexVT =
MRI->getType(StrideReg);
743 auto MaskReg =
I.getOperand(CurOp++).getReg();
748bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
749 MachineInstr &
I, MachineIRBuilder &MIB)
const {
756 case Intrinsic::riscv_vlm:
757 case Intrinsic::riscv_vle:
758 case Intrinsic::riscv_vle_mask:
759 case Intrinsic::riscv_vlse:
760 case Intrinsic::riscv_vlse_mask: {
761 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
762 IntrinID == Intrinsic::riscv_vlse_mask;
763 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
764 IntrinID == Intrinsic::riscv_vlse_mask;
765 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
769 const Register DstReg =
I.getOperand(0).getReg();
772 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
777 if (HasPassthruOperand) {
778 auto PassthruReg =
I.getOperand(CurOp++).getReg();
784 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
787 const RISCV::VLEPseudo *
P =
788 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
789 static_cast<unsigned>(LMUL));
791 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
794 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
795 for (
auto &RenderFn : *VLOpFn)
799 PseudoMI.addImm(Log2SEW);
804 Policy =
I.getOperand(CurOp++).getImm();
805 PseudoMI.addImm(Policy);
808 PseudoMI.cloneMemRefs(
I);
814 case Intrinsic::riscv_vloxei:
815 case Intrinsic::riscv_vloxei_mask:
816 case Intrinsic::riscv_vluxei:
817 case Intrinsic::riscv_vluxei_mask: {
818 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
819 IntrinID == Intrinsic::riscv_vluxei_mask;
820 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
821 IntrinID == Intrinsic::riscv_vloxei_mask;
822 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
826 const Register DstReg =
I.getOperand(0).getReg();
829 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
834 if (HasPassthruOperand) {
835 auto PassthruReg =
I.getOperand(CurOp++).getReg();
842 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
848 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
850 "values when XLEN=32");
852 const RISCV::VLX_VSXPseudo *
P = RISCV::getVLXPseudo(
853 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
854 static_cast<unsigned>(IndexLMUL));
856 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
859 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
860 for (
auto &RenderFn : *VLOpFn)
864 PseudoMI.addImm(Log2SEW);
869 Policy =
I.getOperand(CurOp++).getImm();
870 PseudoMI.addImm(Policy);
873 PseudoMI.cloneMemRefs(
I);
879 case Intrinsic::riscv_vsm:
880 case Intrinsic::riscv_vse:
881 case Intrinsic::riscv_vse_mask:
882 case Intrinsic::riscv_vsse:
883 case Intrinsic::riscv_vsse_mask: {
884 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
885 IntrinID == Intrinsic::riscv_vsse_mask;
886 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
887 IntrinID == Intrinsic::riscv_vsse_mask;
888 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
896 auto PassthruReg =
I.getOperand(CurOp++).getReg();
899 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
902 const RISCV::VSEPseudo *
P = RISCV::getVSEPseudo(
903 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
905 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
908 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
909 for (
auto &RenderFn : *VLOpFn)
913 PseudoMI.addImm(Log2SEW);
916 PseudoMI.cloneMemRefs(
I);
922 case Intrinsic::riscv_vsoxei:
923 case Intrinsic::riscv_vsoxei_mask:
924 case Intrinsic::riscv_vsuxei:
925 case Intrinsic::riscv_vsuxei_mask: {
926 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
927 IntrinID == Intrinsic::riscv_vsuxei_mask;
928 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
929 IntrinID == Intrinsic::riscv_vsoxei_mask;
930 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
938 auto PassthruReg =
I.getOperand(CurOp++).getReg();
942 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
948 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
950 "values when XLEN=32");
952 const RISCV::VLX_VSXPseudo *
P = RISCV::getVSXPseudo(
953 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
954 static_cast<unsigned>(IndexLMUL));
956 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
959 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
960 for (
auto &RenderFn : *VLOpFn)
964 PseudoMI.addImm(Log2SEW);
967 PseudoMI.cloneMemRefs(
I);
976bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &
I,
977 MachineIRBuilder &MIB)
const {
984 case Intrinsic::riscv_vsetvli:
985 case Intrinsic::riscv_vsetvlimax: {
987 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
989 unsigned Offset = VLMax ? 2 : 3;
997 Register DstReg =
I.getOperand(0).getReg();
1000 unsigned Opcode = RISCV::PseudoVSETVLI;
1004 Register AVLReg =
I.getOperand(2).getReg();
1006 uint64_t AVL = AVLConst->Value.getZExtValue();
1013 MachineInstr *AVLDef =
MRI->getVRegDef(AVLReg);
1014 if (AVLDef && AVLDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
1016 if (
C->getValue().isAllOnes())
1023 Opcode = RISCV::PseudoVSETVLIX0;
1025 Register AVLReg =
I.getOperand(2).getReg();
1030 uint64_t AVL = AVLConst->Value.getZExtValue();
1032 auto PseudoMI = MIB.
buildInstr(RISCV::PseudoVSETIVLI, {DstReg}, {})
1035 I.eraseFromParent();
1043 MIB.
buildInstr(Opcode, {DstReg}, {VLOperand}).addImm(VTypeI);
1044 I.eraseFromParent();
1051bool RISCVInstructionSelector::selectExtractSubvector(
1052 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
1053 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1058 LLT DstTy =
MRI->getType(DstReg);
1059 LLT SrcTy =
MRI->getType(SrcReg);
1061 unsigned Idx =
static_cast<unsigned>(
MI.getOperand(2).
getImm());
1067 std::tie(SubRegIdx, Idx) =
1069 SrcMVT, DstMVT, Idx, &
TRI);
1075 const TargetRegisterClass *DstRC =
TRI.getRegClass(DstRegClassID);
1080 const TargetRegisterClass *SrcRC =
TRI.getRegClass(SrcRegClassID);
1084 MIB.
buildInstr(TargetOpcode::COPY, {DstReg}, {})
1085 .addReg(SrcReg, {}, SubRegIdx);
1087 MI.eraseFromParent();
1091bool RISCVInstructionSelector::select(MachineInstr &
MI) {
1092 MachineIRBuilder MIB(
MI);
1094 preISelLower(
MI, MIB);
1095 const unsigned Opc =
MI.getOpcode();
1097 if (!
MI.isPreISelOpcode() ||
Opc == TargetOpcode::G_PHI) {
1098 if (
Opc == TargetOpcode::PHI ||
Opc == TargetOpcode::G_PHI) {
1099 const Register DefReg =
MI.getOperand(0).getReg();
1100 const LLT DefTy =
MRI->getType(DefReg);
1103 MRI->getRegClassOrRegBank(DefReg);
1105 const TargetRegisterClass *DefRC =
1114 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1121 MI.setDesc(
TII.get(TargetOpcode::PHI));
1132 if (selectImpl(
MI, *CoverageInfo))
1136 case TargetOpcode::G_ANYEXT:
1137 case TargetOpcode::G_PTRTOINT:
1138 case TargetOpcode::G_INTTOPTR:
1139 case TargetOpcode::G_TRUNC:
1140 case TargetOpcode::G_FREEZE:
1142 case TargetOpcode::G_CONSTANT: {
1144 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1146 if (!materializeImm(DstReg, Imm, MIB))
1149 MI.eraseFromParent();
1152 case TargetOpcode::G_ZEXT:
1153 case TargetOpcode::G_SEXT: {
1154 bool IsSigned =
Opc != TargetOpcode::G_ZEXT;
1157 LLT SrcTy =
MRI->getType(SrcReg);
1164 RISCV::GPRBRegBankID &&
1165 "Unexpected ext regbank");
1168 if (IsSigned && SrcSize == 32) {
1169 MI.setDesc(
TII.get(RISCV::ADDIW));
1176 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1177 MI.setDesc(
TII.get(RISCV::ADD_UW));
1184 if (SrcSize == 16 &&
1185 (STI.hasStdExtZbb() || (!IsSigned && STI.hasStdExtZbkb()))) {
1186 MI.setDesc(
TII.get(IsSigned ? RISCV::SEXT_H
1187 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1188 : RISCV::ZEXT_H_RV32));
1195 MIB.
buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
1196 .addImm(STI.
getXLen() - SrcSize);
1198 auto ShiftRight = MIB.
buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
1199 {DstReg}, {ShiftLeft})
1200 .addImm(STI.
getXLen() - SrcSize);
1202 MI.eraseFromParent();
1205 case TargetOpcode::G_FCONSTANT: {
1208 const APFloat &FPimm =
MI.getOperand(1).getFPImm()->getValueAPF();
1209 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1215 GPRReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1217 if (!materializeImm(GPRReg,
Imm.getSExtValue(), MIB))
1221 unsigned Opcode =
Size == 64 ? RISCV::FMV_D_X
1222 :
Size == 32 ? RISCV::FMV_W_X
1224 auto FMV = MIB.
buildInstr(Opcode, {DstReg}, {GPRReg});
1229 "Unexpected size or subtarget");
1233 MachineInstrBuilder FCVT =
1238 MI.eraseFromParent();
1243 Register GPRRegHigh =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1244 Register GPRRegLow =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1246 if (!materializeImm(GPRRegHigh,
Imm.extractBits(32, 32).getSExtValue(),
1249 if (!materializeImm(GPRRegLow,
Imm.trunc(32).getSExtValue(), MIB))
1251 MachineInstrBuilder PairF64 = MIB.
buildInstr(
1252 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1256 MI.eraseFromParent();
1259 case TargetOpcode::G_GLOBAL_VALUE: {
1260 auto *GV =
MI.getOperand(1).getGlobal();
1261 if (GV->isThreadLocal()) {
1266 return selectAddr(
MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1268 case TargetOpcode::G_JUMP_TABLE:
1269 case TargetOpcode::G_CONSTANT_POOL:
1270 return selectAddr(
MI, MIB,
MRI);
1271 case TargetOpcode::G_BRCOND: {
1277 .addMBB(
MI.getOperand(1).getMBB());
1278 MI.eraseFromParent();
1282 case TargetOpcode::G_BRINDIRECT:
1283 MI.setDesc(
TII.get(RISCV::PseudoBRIND));
1287 case TargetOpcode::G_SELECT:
1288 return selectSelect(
MI, MIB);
1289 case TargetOpcode::G_FCMP:
1290 return selectFPCompare(
MI, MIB);
1291 case TargetOpcode::G_FENCE: {
1296 emitFence(FenceOrdering, FenceSSID, MIB);
1297 MI.eraseFromParent();
1300 case TargetOpcode::G_IMPLICIT_DEF:
1301 return selectImplicitDef(
MI, MIB);
1302 case TargetOpcode::G_UNMERGE_VALUES:
1304 case TargetOpcode::G_LOAD:
1305 case TargetOpcode::G_STORE: {
1309 LLT PtrTy =
MRI->getType(PtrReg);
1312 if (RB.
getID() != RISCV::GPRBRegBankID)
1319 "Load/Store pointer operand isn't a GPR");
1320 assert(PtrTy.
isPointer() &&
"Load/Store pointer operand isn't a pointer");
1337 if (NewOpc ==
MI.getOpcode())
1341 auto AddrModeFns = selectAddrRegImm(
MI.getOperand(1));
1346 auto NewInst = MIB.
buildInstr(NewOpc, {}, {},
MI.getFlags());
1352 for (
auto &Fn : *AddrModeFns)
1354 MI.eraseFromParent();
1359 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1360 return selectIntrinsicWithSideEffects(
MI, MIB);
1361 case TargetOpcode::G_INTRINSIC:
1362 return selectIntrinsic(
MI, MIB);
1363 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1364 return selectExtractSubvector(
MI, MIB);
1370bool RISCVInstructionSelector::selectUnmergeValues(
1371 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
1372 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1374 if (!Subtarget->hasStdExtZfa())
1378 if (
MI.getNumOperands() != 3)
1383 if (!isRegInFprb(Src) || !isRegInGprb(
Lo) || !isRegInGprb(
Hi))
1386 MachineInstr *ExtractLo = MIB.
buildInstr(RISCV::FMV_X_W_FPR64, {
Lo}, {Src});
1389 MachineInstr *ExtractHi = MIB.
buildInstr(RISCV::FMVH_X_D, {
Hi}, {Src});
1392 MI.eraseFromParent();
1396bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &
Op,
1397 MachineIRBuilder &MIB) {
1399 assert(
MRI->getType(PtrReg).isPointer() &&
"Operand is not a pointer!");
1403 MRI->setRegBank(PtrToInt.getReg(0), RBI.
getRegBank(RISCV::GPRBRegBankID));
1404 Op.setReg(PtrToInt.getReg(0));
1405 return select(*PtrToInt);
1408void RISCVInstructionSelector::preISelLower(MachineInstr &
MI,
1409 MachineIRBuilder &MIB) {
1410 switch (
MI.getOpcode()) {
1411 case TargetOpcode::G_PTR_ADD: {
1415 replacePtrWithInt(
MI.getOperand(1), MIB);
1416 MI.setDesc(
TII.get(TargetOpcode::G_ADD));
1417 MRI->setType(DstReg, sXLen);
1420 case TargetOpcode::G_PTRMASK: {
1423 replacePtrWithInt(
MI.getOperand(1), MIB);
1424 MI.setDesc(
TII.get(TargetOpcode::G_AND));
1425 MRI->setType(DstReg, sXLen);
1431void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1432 const MachineInstr &
MI,
1434 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1435 "Expected G_CONSTANT");
1436 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1440void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1441 const MachineInstr &
MI,
1443 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1444 "Expected G_CONSTANT");
1445 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1449void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1450 const MachineInstr &
MI,
1452 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1453 "Expected G_CONSTANT");
1454 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1458void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1459 const MachineInstr &
MI,
1461 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1462 "Expected G_CONSTANT");
1463 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1467void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1468 const MachineInstr &
MI,
1470 assert(
MI.getOpcode() == TargetOpcode::G_FRAME_INDEX &&
OpIdx == -1 &&
1471 "Expected G_FRAME_INDEX");
1472 MIB.
add(
MI.getOperand(1));
1475void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1476 const MachineInstr &
MI,
1478 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1479 "Expected G_CONSTANT");
1480 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1484void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1485 MachineInstrBuilder &MIB,
const MachineInstr &
MI,
int OpIdx)
const {
1486 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1487 "Expected G_CONSTANT");
1488 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1492void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1493 const MachineInstr &
MI,
1495 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1496 "Expected G_CONSTANT");
1497 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1498 int64_t Adj =
Imm < 0 ? -2048 : 2047;
1502void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1503 const MachineInstr &
MI,
1505 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1506 "Expected G_CONSTANT");
1507 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1511const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1512 LLT Ty,
const RegisterBank &RB)
const {
1513 if (RB.
getID() == RISCV::GPRBRegBankID) {
1515 return &RISCV::GPRRegClass;
1518 if (RB.
getID() == RISCV::FPRBRegBankID) {
1520 return &RISCV::FPR16RegClass;
1522 return &RISCV::FPR32RegClass;
1524 return &RISCV::FPR64RegClass;
1527 if (RB.
getID() == RISCV::VRBRegBankID) {
1529 return &RISCV::VRRegClass;
1532 return &RISCV::VRM2RegClass;
1535 return &RISCV::VRM4RegClass;
1538 return &RISCV::VRM8RegClass;
1544bool RISCVInstructionSelector::isRegInGprb(
Register Reg)
const {
1548bool RISCVInstructionSelector::isRegInFprb(
Register Reg)
const {
1552bool RISCVInstructionSelector::selectCopy(MachineInstr &
MI)
const {
1558 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1561 "Register class not available for LLT, register bank combination");
1572 MI.setDesc(
TII.get(RISCV::COPY));
1576bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &
MI,
1577 MachineIRBuilder &MIB)
const {
1578 assert(
MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1580 const Register DstReg =
MI.getOperand(0).getReg();
1581 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1585 "Register class not available for LLT, register bank combination");
1591 MI.setDesc(
TII.get(TargetOpcode::IMPLICIT_DEF));
1595bool RISCVInstructionSelector::materializeImm(
Register DstReg, int64_t Imm,
1596 MachineIRBuilder &MIB)
const {
1604 unsigned NumInsts = Seq.
size();
1607 for (
unsigned i = 0; i < NumInsts; i++) {
1609 ?
MRI->createVirtualRegister(&RISCV::GPRRegClass)
1611 const RISCVMatInt::Inst &
I = Seq[i];
1614 switch (
I.getOpndKind()) {
1623 {SrcReg, Register(RISCV::X0)});
1642bool RISCVInstructionSelector::selectAddr(MachineInstr &
MI,
1643 MachineIRBuilder &MIB,
bool IsLocal,
1644 bool IsExternWeak)
const {
1645 assert((
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1646 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1647 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1648 "Unexpected opcode");
1650 const MachineOperand &DispMO =
MI.getOperand(1);
1653 const LLT DefTy =
MRI->getType(DefReg);
1660 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1664 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1673 MachineFunction &MF = *
MI.getParent()->getParent();
1686 MI.eraseFromParent();
1693 "Unsupported code model for lowering",
MI);
1700 Register AddrHiDest =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1701 MachineInstr *AddrHi = MIB.
buildInstr(RISCV::LUI, {AddrHiDest}, {})
1711 MI.eraseFromParent();
1724 MachineFunction &MF = *
MI.getParent()->getParent();
1737 MI.eraseFromParent();
1744 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1752bool RISCVInstructionSelector::selectSelect(MachineInstr &
MI,
1753 MachineIRBuilder &MIB)
const {
1760 Register DstReg = SelectMI.getReg(0);
1762 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1764 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1765 Opc =
Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1766 : RISCV::Select_FPR64_Using_CC_GPR;
1774 .
addReg(SelectMI.getTrueReg())
1775 .
addReg(SelectMI.getFalseReg());
1776 MI.eraseFromParent();
1788 return Size == 16 ? RISCV::FLT_H :
Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1790 return Size == 16 ? RISCV::FLE_H :
Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1792 return Size == 16 ? RISCV::FEQ_H :
Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1805 assert(!isLegalFCmpPredicate(Pred) &&
"Predicate already legal?");
1808 if (isLegalFCmpPredicate(InvPred)) {
1816 if (isLegalFCmpPredicate(InvPred)) {
1821 if (isLegalFCmpPredicate(InvPred)) {
1833bool RISCVInstructionSelector::selectFPCompare(MachineInstr &
MI,
1834 MachineIRBuilder &MIB)
const {
1842 unsigned Size =
MRI->getType(
LHS).getSizeInBits();
1847 bool NeedInvert =
false;
1851 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1858 {&RISCV::GPRRegClass}, {
LHS,
RHS});
1861 {&RISCV::GPRRegClass}, {
RHS,
LHS});
1864 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1866 MIB.
buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1873 {&RISCV::GPRRegClass}, {
LHS,
LHS});
1876 {&RISCV::GPRRegClass}, {
RHS,
RHS});
1879 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1881 MIB.
buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1888 auto Xor = MIB.
buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1892 MI.eraseFromParent();
1896void RISCVInstructionSelector::emitFence(
AtomicOrdering FenceOrdering,
1898 MachineIRBuilder &MIB)
const {
1899 if (STI.hasStdExtZtso()) {
1902 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1912 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1920 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1926 unsigned Pred, Succ;
1927 switch (FenceOrdering) {
1930 case AtomicOrdering::AcquireRelease:
1934 case AtomicOrdering::Acquire:
1939 case AtomicOrdering::Release:
1944 case AtomicOrdering::SequentiallyConsistent:
1954InstructionSelector *
1958 return new RISCVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
APInt bitcastToAPInt() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This is an important base class in LLVM.
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
void constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addDef(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register definition operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI void constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.