25#include "llvm/IR/IntrinsicsRISCV.h"
28#define DEBUG_TYPE "riscv-isel"
33#define GET_GLOBALISEL_PREDICATE_BITSET
34#include "RISCVGenGlobalISel.inc"
35#undef GET_GLOBALISEL_PREDICATE_BITSET
60 static constexpr unsigned MaxRecursionDepth = 6;
63 const unsigned Depth = 0)
const;
89 bool IsExternWeak =
false)
const;
97 unsigned &CurOp,
bool IsMasked,
98 bool IsStridedOrIndexed,
99 LLT *IndexVT =
nullptr)
const;
106 unsigned ShiftWidth)
const;
107 ComplexRendererFns selectShiftMaskXLen(
MachineOperand &Root)
const {
108 return selectShiftMask(Root, STI.
getXLen());
110 ComplexRendererFns selectShiftMask32(
MachineOperand &Root)
const {
111 return selectShiftMask(Root, 32);
115 ComplexRendererFns selectSExtBits(
MachineOperand &Root,
unsigned Bits)
const;
116 template <
unsigned Bits>
118 return selectSExtBits(Root, Bits);
121 ComplexRendererFns selectZExtBits(
MachineOperand &Root,
unsigned Bits)
const;
122 template <
unsigned Bits>
124 return selectZExtBits(Root, Bits);
127 ComplexRendererFns selectSHXADDOp(
MachineOperand &Root,
unsigned ShAmt)
const;
128 template <
unsigned ShAmt>
130 return selectSHXADDOp(Root, ShAmt);
134 unsigned ShAmt)
const;
135 template <
unsigned ShAmt>
136 ComplexRendererFns selectSHXADD_UWOp(
MachineOperand &Root)
const {
137 return selectSHXADD_UWOp(Root, ShAmt);
177#define GET_GLOBALISEL_PREDICATES_DECL
178#include "RISCVGenGlobalISel.inc"
179#undef GET_GLOBALISEL_PREDICATES_DECL
181#define GET_GLOBALISEL_TEMPORARIES_DECL
182#include "RISCVGenGlobalISel.inc"
183#undef GET_GLOBALISEL_TEMPORARIES_DECL
188#define GET_GLOBALISEL_IMPL
189#include "RISCVGenGlobalISel.inc"
190#undef GET_GLOBALISEL_IMPL
192RISCVInstructionSelector::RISCVInstructionSelector(
195 : STI(STI),
TII(*STI.getInstrInfo()),
TRI(*STI.getRegisterInfo()), RBI(RBI),
199#include
"RISCVGenGlobalISel.inc"
202#include
"RISCVGenGlobalISel.inc"
208bool RISCVInstructionSelector::hasAllNBitUsers(
const MachineInstr &
MI,
210 const unsigned Depth)
const {
212 assert((
MI.getOpcode() == TargetOpcode::G_ADD ||
213 MI.getOpcode() == TargetOpcode::G_SUB ||
214 MI.getOpcode() == TargetOpcode::G_MUL ||
215 MI.getOpcode() == TargetOpcode::G_SHL ||
216 MI.getOpcode() == TargetOpcode::G_LSHR ||
217 MI.getOpcode() == TargetOpcode::G_AND ||
218 MI.getOpcode() == TargetOpcode::G_OR ||
219 MI.getOpcode() == TargetOpcode::G_XOR ||
220 MI.getOpcode() == TargetOpcode::G_SEXT_INREG ||
Depth != 0) &&
221 "Unexpected opcode");
223 if (
Depth >= RISCVInstructionSelector::MaxRecursionDepth)
226 auto DestReg =
MI.getOperand(0).getReg();
227 for (
auto &UserOp :
MRI->use_nodbg_operands(DestReg)) {
228 assert(UserOp.getParent() &&
"UserOp must have a parent");
229 const MachineInstr &UserMI = *UserOp.getParent();
238 case RISCV::FCVT_D_W:
239 case RISCV::FCVT_S_W:
282InstructionSelector::ComplexRendererFns
283RISCVInstructionSelector::selectShiftMask(MachineOperand &Root,
284 unsigned ShiftWidth)
const {
288 using namespace llvm::MIPatternMatch;
294 ShAmtReg = ZExtSrcReg;
313 APInt ShMask(AndMask.
getBitWidth(), ShiftWidth - 1);
314 if (ShMask.isSubsetOf(AndMask)) {
315 ShAmtReg = AndSrcReg;
319 KnownBits Known = VT->getKnownBits(AndSrcReg);
320 if (ShMask.isSubsetOf(AndMask | Known.
Zero))
321 ShAmtReg = AndSrcReg;
328 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0)
333 if (Imm != 0 &&
Imm.urem(ShiftWidth) == 0) {
336 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
337 unsigned NegOpc = Subtarget->
is64Bit() ? RISCV::SUBW : RISCV::SUB;
338 return {{[=](MachineInstrBuilder &MIB) {
339 MachineIRBuilder(*MIB.getInstr())
340 .buildInstr(NegOpc, {ShAmtReg}, {
Register(RISCV::X0),
Reg});
341 MIB.addReg(ShAmtReg);
344 if (
Imm.urem(ShiftWidth) == ShiftWidth - 1) {
347 ShAmtReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
348 return {{[=](MachineInstrBuilder &MIB) {
349 MachineIRBuilder(*MIB.getInstr())
350 .buildInstr(RISCV::XORI, {ShAmtReg}, {
Reg})
352 MIB.addReg(ShAmtReg);
357 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(ShAmtReg); }}};
360InstructionSelector::ComplexRendererFns
361RISCVInstructionSelector::selectSExtBits(MachineOperand &Root,
362 unsigned Bits)
const {
366 MachineInstr *RootDef =
MRI->getVRegDef(RootReg);
368 if (RootDef->
getOpcode() == TargetOpcode::G_SEXT_INREG &&
371 {[=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); }}};
374 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
375 if ((
Size - VT->computeNumSignBits(RootReg)) < Bits)
376 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
381InstructionSelector::ComplexRendererFns
382RISCVInstructionSelector::selectZExtBits(MachineOperand &Root,
383 unsigned Bits)
const {
391 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
395 MRI->getType(RegX).getScalarSizeInBits() == Bits)
396 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(RegX); }}};
398 unsigned Size =
MRI->getType(RootReg).getScalarSizeInBits();
400 return {{[=](MachineInstrBuilder &MIB) { MIB.add(Root); }}};
405InstructionSelector::ComplexRendererFns
406RISCVInstructionSelector::selectSHXADDOp(MachineOperand &Root,
407 unsigned ShAmt)
const {
408 using namespace llvm::MIPatternMatch;
414 const unsigned XLen = STI.
getXLen();
433 if (
Mask.isShiftedMask()) {
434 unsigned Leading = XLen -
Mask.getActiveBits();
435 unsigned Trailing =
Mask.countr_zero();
438 if (*LeftShift && Leading == 0 && C2.
ult(Trailing) && Trailing == ShAmt) {
439 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
440 return {{[=](MachineInstrBuilder &MIB) {
441 MachineIRBuilder(*MIB.getInstr())
442 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
450 if (!*LeftShift && Leading == C2 && Trailing == ShAmt) {
451 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
452 return {{[=](MachineInstrBuilder &MIB) {
453 MachineIRBuilder(*MIB.getInstr())
454 .buildInstr(RISCV::SRLI, {DstReg}, {RegY})
455 .addImm(Leading + Trailing);
476 unsigned Leading = XLen -
Mask.getActiveBits();
477 unsigned Trailing =
Mask.countr_zero();
490 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
491 return {{[=](MachineInstrBuilder &MIB) {
492 MachineIRBuilder(*MIB.getInstr())
493 .buildInstr(RISCV::SRLIW, {DstReg}, {RegY})
503InstructionSelector::ComplexRendererFns
504RISCVInstructionSelector::selectSHXADD_UWOp(MachineOperand &Root,
505 unsigned ShAmt)
const {
506 using namespace llvm::MIPatternMatch;
523 if (
Mask.isShiftedMask()) {
524 unsigned Leading =
Mask.countl_zero();
525 unsigned Trailing =
Mask.countr_zero();
526 if (Leading == 32 - ShAmt && C2 == Trailing && Trailing > ShAmt) {
527 Register DstReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
528 return {{[=](MachineInstrBuilder &MIB) {
529 MachineIRBuilder(*MIB.getInstr())
530 .buildInstr(RISCV::SLLI, {DstReg}, {RegX})
541InstructionSelector::ComplexRendererFns
542RISCVInstructionSelector::renderVLOp(MachineOperand &Root)
const {
543 assert(Root.
isReg() &&
"Expected operand to be a Register");
544 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
546 if (RootDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
548 if (
C->getValue().isAllOnes())
552 return {{[=](MachineInstrBuilder &MIB) {
557 uint64_t ZExtC =
C->getZExtValue();
558 return {{[=](MachineInstrBuilder &MIB) { MIB.addImm(ZExtC); }}};
561 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); }}};
564InstructionSelector::ComplexRendererFns
565RISCVInstructionSelector::selectAddrRegImm(MachineOperand &Root)
const {
569 MachineInstr *RootDef =
MRI->getVRegDef(Root.
getReg());
570 if (RootDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX) {
572 [=](MachineInstrBuilder &MIB) { MIB.add(RootDef->
getOperand(1)); },
573 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); },
577 if (isBaseWithConstantOffset(Root, *
MRI)) {
580 MachineInstr *LHSDef =
MRI->getVRegDef(
LHS.getReg());
581 MachineInstr *RHSDef =
MRI->getVRegDef(
RHS.getReg());
585 if (LHSDef->
getOpcode() == TargetOpcode::G_FRAME_INDEX)
587 [=](MachineInstrBuilder &MIB) { MIB.add(LHSDef->
getOperand(1)); },
588 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); },
591 return {{[=](MachineInstrBuilder &MIB) { MIB.add(
LHS); },
592 [=](MachineInstrBuilder &MIB) { MIB.addImm(RHSC); }}};
598 return {{[=](MachineInstrBuilder &MIB) { MIB.addReg(Root.
getReg()); },
599 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }}};
608 case CmpInst::Predicate::ICMP_EQ:
610 case CmpInst::Predicate::ICMP_NE:
612 case CmpInst::Predicate::ICMP_ULT:
614 case CmpInst::Predicate::ICMP_SLT:
616 case CmpInst::Predicate::ICMP_UGE:
618 case CmpInst::Predicate::ICMP_SGE:
684 CC = getRISCVCCFromICmp(Pred);
691 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
696 return IsStore ? RISCV::SB_RL : RISCV::LB_AQ;
698 return IsStore ? RISCV::SH_RL : RISCV::LH_AQ;
700 return IsStore ? RISCV::SW_RL : RISCV::LW_AQ;
702 return IsStore ? RISCV::SD_RL : RISCV::LD_AQ;
710 const bool IsStore = GenericOpc == TargetOpcode::G_STORE;
714 return IsStore ? RISCV::SB : RISCV::LBU;
716 return IsStore ? RISCV::SH : RISCV::LH;
718 return IsStore ? RISCV::SW : RISCV::LW;
720 return IsStore ? RISCV::SD : RISCV::LD;
726void RISCVInstructionSelector::addVectorLoadStoreOperands(
727 MachineInstr &
I, SmallVectorImpl<SrcOp> &SrcOps,
unsigned &CurOp,
728 bool IsMasked,
bool IsStridedOrIndexed, LLT *IndexVT)
const {
730 auto PtrReg =
I.getOperand(CurOp++).getReg();
734 if (IsStridedOrIndexed) {
735 auto StrideReg =
I.getOperand(CurOp++).getReg();
738 *IndexVT =
MRI->getType(StrideReg);
743 auto MaskReg =
I.getOperand(CurOp++).getReg();
748bool RISCVInstructionSelector::selectIntrinsicWithSideEffects(
749 MachineInstr &
I, MachineIRBuilder &MIB)
const {
756 case Intrinsic::riscv_vlm:
757 case Intrinsic::riscv_vle:
758 case Intrinsic::riscv_vle_mask:
759 case Intrinsic::riscv_vlse:
760 case Intrinsic::riscv_vlse_mask: {
761 bool IsMasked = IntrinID == Intrinsic::riscv_vle_mask ||
762 IntrinID == Intrinsic::riscv_vlse_mask;
763 bool IsStrided = IntrinID == Intrinsic::riscv_vlse ||
764 IntrinID == Intrinsic::riscv_vlse_mask;
765 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
769 const Register DstReg =
I.getOperand(0).getReg();
772 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
777 if (HasPassthruOperand) {
778 auto PassthruReg =
I.getOperand(CurOp++).getReg();
784 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
787 const RISCV::VLEPseudo *
P =
788 RISCV::getVLEPseudo(IsMasked, IsStrided,
false, Log2SEW,
789 static_cast<unsigned>(LMUL));
791 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
794 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
795 for (
auto &RenderFn : *VLOpFn)
799 PseudoMI.addImm(Log2SEW);
804 Policy =
I.getOperand(CurOp++).getImm();
805 PseudoMI.addImm(Policy);
808 PseudoMI.cloneMemRefs(
I);
813 case Intrinsic::riscv_vloxei:
814 case Intrinsic::riscv_vloxei_mask:
815 case Intrinsic::riscv_vluxei:
816 case Intrinsic::riscv_vluxei_mask: {
817 bool IsMasked = IntrinID == Intrinsic::riscv_vloxei_mask ||
818 IntrinID == Intrinsic::riscv_vluxei_mask;
819 bool IsOrdered = IntrinID == Intrinsic::riscv_vloxei ||
820 IntrinID == Intrinsic::riscv_vloxei_mask;
821 LLT VT =
MRI->getType(
I.getOperand(0).getReg());
825 const Register DstReg =
I.getOperand(0).getReg();
828 bool HasPassthruOperand = IntrinID != Intrinsic::riscv_vlm;
833 if (HasPassthruOperand) {
834 auto PassthruReg =
I.getOperand(CurOp++).getReg();
841 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
847 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
849 "values when XLEN=32");
851 const RISCV::VLX_VSXPseudo *
P = RISCV::getVLXPseudo(
852 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
853 static_cast<unsigned>(IndexLMUL));
855 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {DstReg}, SrcOps);
858 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
859 for (
auto &RenderFn : *VLOpFn)
863 PseudoMI.addImm(Log2SEW);
868 Policy =
I.getOperand(CurOp++).getImm();
869 PseudoMI.addImm(Policy);
872 PseudoMI.cloneMemRefs(
I);
877 case Intrinsic::riscv_vsm:
878 case Intrinsic::riscv_vse:
879 case Intrinsic::riscv_vse_mask:
880 case Intrinsic::riscv_vsse:
881 case Intrinsic::riscv_vsse_mask: {
882 bool IsMasked = IntrinID == Intrinsic::riscv_vse_mask ||
883 IntrinID == Intrinsic::riscv_vsse_mask;
884 bool IsStrided = IntrinID == Intrinsic::riscv_vsse ||
885 IntrinID == Intrinsic::riscv_vsse_mask;
886 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
894 auto PassthruReg =
I.getOperand(CurOp++).getReg();
897 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked, IsStrided);
900 const RISCV::VSEPseudo *
P = RISCV::getVSEPseudo(
901 IsMasked, IsStrided, Log2SEW,
static_cast<unsigned>(LMUL));
903 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
906 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
907 for (
auto &RenderFn : *VLOpFn)
911 PseudoMI.addImm(Log2SEW);
914 PseudoMI.cloneMemRefs(
I);
919 case Intrinsic::riscv_vsoxei:
920 case Intrinsic::riscv_vsoxei_mask:
921 case Intrinsic::riscv_vsuxei:
922 case Intrinsic::riscv_vsuxei_mask: {
923 bool IsMasked = IntrinID == Intrinsic::riscv_vsoxei_mask ||
924 IntrinID == Intrinsic::riscv_vsuxei_mask;
925 bool IsOrdered = IntrinID == Intrinsic::riscv_vsoxei ||
926 IntrinID == Intrinsic::riscv_vsoxei_mask;
927 LLT VT =
MRI->getType(
I.getOperand(1).getReg());
935 auto PassthruReg =
I.getOperand(CurOp++).getReg();
939 addVectorLoadStoreOperands(
I, SrcOps, CurOp, IsMasked,
true, &IndexVT);
945 if (IndexLog2EEW == 6 && !Subtarget->
is64Bit()) {
947 "values when XLEN=32");
949 const RISCV::VLX_VSXPseudo *
P = RISCV::getVSXPseudo(
950 IsMasked, IsOrdered, IndexLog2EEW,
static_cast<unsigned>(LMUL),
951 static_cast<unsigned>(IndexLMUL));
953 auto PseudoMI = MIB.
buildInstr(
P->Pseudo, {}, SrcOps);
956 auto VLOpFn = renderVLOp(
I.getOperand(CurOp++));
957 for (
auto &RenderFn : *VLOpFn)
961 PseudoMI.addImm(Log2SEW);
964 PseudoMI.cloneMemRefs(
I);
972bool RISCVInstructionSelector::selectIntrinsic(MachineInstr &
I,
973 MachineIRBuilder &MIB)
const {
980 case Intrinsic::riscv_vsetvli:
981 case Intrinsic::riscv_vsetvlimax: {
983 bool VLMax = IntrinID == Intrinsic::riscv_vsetvlimax;
985 unsigned Offset = VLMax ? 2 : 3;
993 Register DstReg =
I.getOperand(0).getReg();
996 unsigned Opcode = RISCV::PseudoVSETVLI;
1000 Register AVLReg =
I.getOperand(2).getReg();
1002 uint64_t AVL = AVLConst->Value.getZExtValue();
1009 MachineInstr *AVLDef =
MRI->getVRegDef(AVLReg);
1010 if (AVLDef && AVLDef->
getOpcode() == TargetOpcode::G_CONSTANT) {
1012 if (
C->getValue().isAllOnes())
1019 Opcode = RISCV::PseudoVSETVLIX0;
1021 Register AVLReg =
I.getOperand(2).getReg();
1026 uint64_t AVL = AVLConst->Value.getZExtValue();
1028 auto PseudoMI = MIB.
buildInstr(RISCV::PseudoVSETIVLI, {DstReg}, {})
1031 I.eraseFromParent();
1038 MIB.
buildInstr(Opcode, {DstReg}, {VLOperand}).addImm(VTypeI);
1039 I.eraseFromParent();
1045bool RISCVInstructionSelector::selectExtractSubvector(
1046 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
1047 assert(
MI.getOpcode() == TargetOpcode::G_EXTRACT_SUBVECTOR);
1052 LLT DstTy =
MRI->getType(DstReg);
1053 LLT SrcTy =
MRI->getType(SrcReg);
1055 unsigned Idx =
static_cast<unsigned>(
MI.getOperand(2).
getImm());
1061 std::tie(SubRegIdx, Idx) =
1063 SrcMVT, DstMVT, Idx, &
TRI);
1069 const TargetRegisterClass *DstRC =
TRI.getRegClass(DstRegClassID);
1074 const TargetRegisterClass *SrcRC =
TRI.getRegClass(SrcRegClassID);
1078 MIB.
buildInstr(TargetOpcode::COPY, {DstReg}, {}).addReg(SrcReg, 0, SubRegIdx);
1080 MI.eraseFromParent();
1084bool RISCVInstructionSelector::select(MachineInstr &
MI) {
1085 MachineIRBuilder MIB(
MI);
1087 preISelLower(
MI, MIB);
1088 const unsigned Opc =
MI.getOpcode();
1090 if (!
MI.isPreISelOpcode() ||
Opc == TargetOpcode::G_PHI) {
1091 if (
Opc == TargetOpcode::PHI ||
Opc == TargetOpcode::G_PHI) {
1092 const Register DefReg =
MI.getOperand(0).getReg();
1093 const LLT DefTy =
MRI->getType(DefReg);
1096 MRI->getRegClassOrRegBank(DefReg);
1098 const TargetRegisterClass *DefRC =
1107 DefRC = getRegClassForTypeOnBank(DefTy, RB);
1114 MI.setDesc(
TII.get(TargetOpcode::PHI));
1125 if (selectImpl(
MI, *CoverageInfo))
1129 case TargetOpcode::G_ANYEXT:
1130 case TargetOpcode::G_PTRTOINT:
1131 case TargetOpcode::G_INTTOPTR:
1132 case TargetOpcode::G_TRUNC:
1133 case TargetOpcode::G_FREEZE:
1135 case TargetOpcode::G_CONSTANT: {
1137 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1139 if (!materializeImm(DstReg, Imm, MIB))
1142 MI.eraseFromParent();
1145 case TargetOpcode::G_ZEXT:
1146 case TargetOpcode::G_SEXT: {
1147 bool IsSigned =
Opc != TargetOpcode::G_ZEXT;
1150 LLT SrcTy =
MRI->getType(SrcReg);
1157 RISCV::GPRBRegBankID &&
1158 "Unexpected ext regbank");
1161 if (IsSigned && SrcSize == 32) {
1162 MI.setDesc(
TII.get(RISCV::ADDIW));
1168 if (!IsSigned && SrcSize == 32 && STI.hasStdExtZba()) {
1169 MI.setDesc(
TII.get(RISCV::ADD_UW));
1175 if (SrcSize == 16 && STI.hasStdExtZbb()) {
1176 MI.setDesc(
TII.get(IsSigned ? RISCV::SEXT_H
1177 : STI.isRV64() ? RISCV::ZEXT_H_RV64
1178 : RISCV::ZEXT_H_RV32));
1183 if (!IsSigned && SrcSize == 16 && STI.hasStdExtZbkb()) {
1184 MI.setDesc(
TII.get(STI.
is64Bit() ? RISCV::PACKW : RISCV::PACK));
1191 MIB.
buildInstr(RISCV::SLLI, {&RISCV::GPRRegClass}, {SrcReg})
1192 .addImm(STI.
getXLen() - SrcSize);
1194 auto ShiftRight = MIB.
buildInstr(IsSigned ? RISCV::SRAI : RISCV::SRLI,
1195 {DstReg}, {ShiftLeft})
1196 .addImm(STI.
getXLen() - SrcSize);
1198 MI.eraseFromParent();
1201 case TargetOpcode::G_FCONSTANT: {
1204 const APFloat &FPimm =
MI.getOperand(1).getFPImm()->getValueAPF();
1205 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1211 GPRReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1213 if (!materializeImm(GPRReg,
Imm.getSExtValue(), MIB))
1217 unsigned Opcode =
Size == 64 ? RISCV::FMV_D_X
1218 :
Size == 32 ? RISCV::FMV_W_X
1220 auto FMV = MIB.
buildInstr(Opcode, {DstReg}, {GPRReg});
1221 if (!FMV.constrainAllUses(
TII,
TRI, RBI))
1226 "Unexpected size or subtarget");
1230 MachineInstrBuilder FCVT =
1236 MI.eraseFromParent();
1241 Register GPRRegHigh =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1242 Register GPRRegLow =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1244 if (!materializeImm(GPRRegHigh,
Imm.extractBits(32, 32).getSExtValue(),
1247 if (!materializeImm(GPRRegLow,
Imm.trunc(32).getSExtValue(), MIB))
1249 MachineInstrBuilder PairF64 = MIB.
buildInstr(
1250 RISCV::BuildPairF64Pseudo, {DstReg}, {GPRRegLow, GPRRegHigh});
1255 MI.eraseFromParent();
1258 case TargetOpcode::G_GLOBAL_VALUE: {
1259 auto *GV =
MI.getOperand(1).getGlobal();
1260 if (GV->isThreadLocal()) {
1265 return selectAddr(
MI, MIB, GV->isDSOLocal(), GV->hasExternalWeakLinkage());
1267 case TargetOpcode::G_JUMP_TABLE:
1268 case TargetOpcode::G_CONSTANT_POOL:
1269 return selectAddr(
MI, MIB,
MRI);
1270 case TargetOpcode::G_BRCOND: {
1276 .addMBB(
MI.getOperand(1).getMBB());
1277 MI.eraseFromParent();
1280 case TargetOpcode::G_BRINDIRECT:
1281 MI.setDesc(
TII.get(RISCV::PseudoBRIND));
1284 case TargetOpcode::G_SELECT:
1285 return selectSelect(
MI, MIB);
1286 case TargetOpcode::G_FCMP:
1287 return selectFPCompare(
MI, MIB);
1288 case TargetOpcode::G_FENCE: {
1293 emitFence(FenceOrdering, FenceSSID, MIB);
1294 MI.eraseFromParent();
1297 case TargetOpcode::G_IMPLICIT_DEF:
1298 return selectImplicitDef(
MI, MIB);
1299 case TargetOpcode::G_UNMERGE_VALUES:
1301 case TargetOpcode::G_LOAD:
1302 case TargetOpcode::G_STORE: {
1306 LLT PtrTy =
MRI->getType(PtrReg);
1309 if (RB.
getID() != RISCV::GPRBRegBankID)
1316 "Load/Store pointer operand isn't a GPR");
1317 assert(PtrTy.
isPointer() &&
"Load/Store pointer operand isn't a pointer");
1333 if (NewOpc ==
MI.getOpcode())
1337 auto AddrModeFns = selectAddrRegImm(
MI.getOperand(1));
1342 auto NewInst = MIB.
buildInstr(NewOpc, {}, {},
MI.getFlags());
1348 for (
auto &Fn : *AddrModeFns)
1350 MI.eraseFromParent();
1354 case TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS:
1355 return selectIntrinsicWithSideEffects(
MI, MIB);
1356 case TargetOpcode::G_INTRINSIC:
1357 return selectIntrinsic(
MI, MIB);
1358 case TargetOpcode::G_EXTRACT_SUBVECTOR:
1359 return selectExtractSubvector(
MI, MIB);
1365bool RISCVInstructionSelector::selectUnmergeValues(
1366 MachineInstr &
MI, MachineIRBuilder &MIB)
const {
1367 assert(
MI.getOpcode() == TargetOpcode::G_UNMERGE_VALUES);
1369 if (!Subtarget->hasStdExtZfa())
1373 if (
MI.getNumOperands() != 3)
1378 if (!isRegInFprb(Src) || !isRegInGprb(
Lo) || !isRegInGprb(
Hi))
1381 MachineInstr *ExtractLo = MIB.
buildInstr(RISCV::FMV_X_W_FPR64, {
Lo}, {Src});
1385 MachineInstr *ExtractHi = MIB.
buildInstr(RISCV::FMVH_X_D, {
Hi}, {Src});
1389 MI.eraseFromParent();
1393bool RISCVInstructionSelector::replacePtrWithInt(MachineOperand &
Op,
1394 MachineIRBuilder &MIB) {
1396 assert(
MRI->getType(PtrReg).isPointer() &&
"Operand is not a pointer!");
1400 MRI->setRegBank(PtrToInt.getReg(0), RBI.
getRegBank(RISCV::GPRBRegBankID));
1401 Op.setReg(PtrToInt.getReg(0));
1402 return select(*PtrToInt);
1405void RISCVInstructionSelector::preISelLower(MachineInstr &
MI,
1406 MachineIRBuilder &MIB) {
1407 switch (
MI.getOpcode()) {
1408 case TargetOpcode::G_PTR_ADD: {
1412 replacePtrWithInt(
MI.getOperand(1), MIB);
1413 MI.setDesc(
TII.get(TargetOpcode::G_ADD));
1414 MRI->setType(DstReg, sXLen);
1417 case TargetOpcode::G_PTRMASK: {
1420 replacePtrWithInt(
MI.getOperand(1), MIB);
1421 MI.setDesc(
TII.get(TargetOpcode::G_AND));
1422 MRI->setType(DstReg, sXLen);
1428void RISCVInstructionSelector::renderNegImm(MachineInstrBuilder &MIB,
1429 const MachineInstr &
MI,
1431 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1432 "Expected G_CONSTANT");
1433 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1437void RISCVInstructionSelector::renderImmSubFromXLen(MachineInstrBuilder &MIB,
1438 const MachineInstr &
MI,
1440 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1441 "Expected G_CONSTANT");
1442 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1446void RISCVInstructionSelector::renderImmSubFrom32(MachineInstrBuilder &MIB,
1447 const MachineInstr &
MI,
1449 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1450 "Expected G_CONSTANT");
1451 uint64_t CstVal =
MI.getOperand(1).getCImm()->getZExtValue();
1455void RISCVInstructionSelector::renderImmPlus1(MachineInstrBuilder &MIB,
1456 const MachineInstr &
MI,
1458 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1459 "Expected G_CONSTANT");
1460 int64_t CstVal =
MI.getOperand(1).getCImm()->getSExtValue();
1464void RISCVInstructionSelector::renderFrameIndex(MachineInstrBuilder &MIB,
1465 const MachineInstr &
MI,
1467 assert(
MI.getOpcode() == TargetOpcode::G_FRAME_INDEX &&
OpIdx == -1 &&
1468 "Expected G_FRAME_INDEX");
1469 MIB.
add(
MI.getOperand(1));
1472void RISCVInstructionSelector::renderTrailingZeros(MachineInstrBuilder &MIB,
1473 const MachineInstr &
MI,
1475 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1476 "Expected G_CONSTANT");
1477 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1481void RISCVInstructionSelector::renderXLenSubTrailingOnes(
1482 MachineInstrBuilder &MIB,
const MachineInstr &
MI,
int OpIdx)
const {
1483 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1484 "Expected G_CONSTANT");
1485 uint64_t
C =
MI.getOperand(1).getCImm()->getZExtValue();
1489void RISCVInstructionSelector::renderAddiPairImmSmall(MachineInstrBuilder &MIB,
1490 const MachineInstr &
MI,
1492 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1493 "Expected G_CONSTANT");
1494 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue();
1495 int64_t Adj =
Imm < 0 ? -2048 : 2047;
1499void RISCVInstructionSelector::renderAddiPairImmLarge(MachineInstrBuilder &MIB,
1500 const MachineInstr &
MI,
1502 assert(
MI.getOpcode() == TargetOpcode::G_CONSTANT &&
OpIdx == -1 &&
1503 "Expected G_CONSTANT");
1504 int64_t
Imm =
MI.getOperand(1).getCImm()->getSExtValue() < 0 ? -2048 : 2047;
1508const TargetRegisterClass *RISCVInstructionSelector::getRegClassForTypeOnBank(
1509 LLT Ty,
const RegisterBank &RB)
const {
1510 if (RB.
getID() == RISCV::GPRBRegBankID) {
1512 return &RISCV::GPRRegClass;
1515 if (RB.
getID() == RISCV::FPRBRegBankID) {
1517 return &RISCV::FPR16RegClass;
1519 return &RISCV::FPR32RegClass;
1521 return &RISCV::FPR64RegClass;
1524 if (RB.
getID() == RISCV::VRBRegBankID) {
1526 return &RISCV::VRRegClass;
1529 return &RISCV::VRM2RegClass;
1532 return &RISCV::VRM4RegClass;
1535 return &RISCV::VRM8RegClass;
1541bool RISCVInstructionSelector::isRegInGprb(
Register Reg)
const {
1545bool RISCVInstructionSelector::isRegInFprb(
Register Reg)
const {
1549bool RISCVInstructionSelector::selectCopy(MachineInstr &
MI)
const {
1555 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1558 "Register class not available for LLT, register bank combination");
1569 MI.setDesc(
TII.get(RISCV::COPY));
1573bool RISCVInstructionSelector::selectImplicitDef(MachineInstr &
MI,
1574 MachineIRBuilder &MIB)
const {
1575 assert(
MI.getOpcode() == TargetOpcode::G_IMPLICIT_DEF);
1577 const Register DstReg =
MI.getOperand(0).getReg();
1578 const TargetRegisterClass *DstRC = getRegClassForTypeOnBank(
1582 "Register class not available for LLT, register bank combination");
1588 MI.setDesc(
TII.get(TargetOpcode::IMPLICIT_DEF));
1592bool RISCVInstructionSelector::materializeImm(
Register DstReg, int64_t Imm,
1593 MachineIRBuilder &MIB)
const {
1601 unsigned NumInsts = Seq.
size();
1604 for (
unsigned i = 0; i < NumInsts; i++) {
1606 ?
MRI->createVirtualRegister(&RISCV::GPRRegClass)
1608 const RISCVMatInt::Inst &
I = Seq[i];
1611 switch (
I.getOpndKind()) {
1620 {SrcReg, Register(RISCV::X0)});
1640bool RISCVInstructionSelector::selectAddr(MachineInstr &
MI,
1641 MachineIRBuilder &MIB,
bool IsLocal,
1642 bool IsExternWeak)
const {
1643 assert((
MI.getOpcode() == TargetOpcode::G_GLOBAL_VALUE ||
1644 MI.getOpcode() == TargetOpcode::G_JUMP_TABLE ||
1645 MI.getOpcode() == TargetOpcode::G_CONSTANT_POOL) &&
1646 "Unexpected opcode");
1648 const MachineOperand &DispMO =
MI.getOperand(1);
1651 const LLT DefTy =
MRI->getType(DefReg);
1658 if (IsLocal && !Subtarget->allowTaggedGlobals()) {
1662 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1670 MachineFunction &MF = *
MI.getParent()->getParent();
1684 MI.eraseFromParent();
1691 "Unsupported code model for lowering",
MI);
1698 Register AddrHiDest =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1699 MachineInstr *AddrHi = MIB.
buildInstr(RISCV::LUI, {AddrHiDest}, {})
1711 MI.eraseFromParent();
1724 MachineFunction &MF = *
MI.getParent()->getParent();
1738 MI.eraseFromParent();
1745 MI.setDesc(
TII.get(RISCV::PseudoLLA));
1752bool RISCVInstructionSelector::selectSelect(MachineInstr &
MI,
1753 MachineIRBuilder &MIB)
const {
1760 Register DstReg = SelectMI.getReg(0);
1762 unsigned Opc = RISCV::Select_GPR_Using_CC_GPR;
1764 unsigned Size =
MRI->getType(DstReg).getSizeInBits();
1765 Opc =
Size == 32 ? RISCV::Select_FPR32_Using_CC_GPR
1766 : RISCV::Select_FPR64_Using_CC_GPR;
1774 .
addReg(SelectMI.getTrueReg())
1775 .
addReg(SelectMI.getFalseReg());
1776 MI.eraseFromParent();
1787 return Size == 16 ? RISCV::FLT_H :
Size == 32 ? RISCV::FLT_S : RISCV::FLT_D;
1789 return Size == 16 ? RISCV::FLE_H :
Size == 32 ? RISCV::FLE_S : RISCV::FLE_D;
1791 return Size == 16 ? RISCV::FEQ_H :
Size == 32 ? RISCV::FEQ_S : RISCV::FEQ_D;
1804 assert(!isLegalFCmpPredicate(Pred) &&
"Predicate already legal?");
1807 if (isLegalFCmpPredicate(InvPred)) {
1815 if (isLegalFCmpPredicate(InvPred)) {
1820 if (isLegalFCmpPredicate(InvPred)) {
1832bool RISCVInstructionSelector::selectFPCompare(MachineInstr &
MI,
1833 MachineIRBuilder &MIB)
const {
1841 unsigned Size =
MRI->getType(
LHS).getSizeInBits();
1846 bool NeedInvert =
false;
1850 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1852 if (!
Cmp.constrainAllUses(
TII,
TRI, RBI))
1858 {&RISCV::GPRRegClass}, {
LHS,
RHS});
1859 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1862 {&RISCV::GPRRegClass}, {
RHS,
LHS});
1863 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1866 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1868 MIB.
buildInstr(RISCV::OR, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1869 if (!
Or.constrainAllUses(
TII,
TRI, RBI))
1876 {&RISCV::GPRRegClass}, {
LHS,
LHS});
1877 if (!Cmp1.constrainAllUses(
TII,
TRI, RBI))
1880 {&RISCV::GPRRegClass}, {
RHS,
RHS});
1881 if (!Cmp2.constrainAllUses(
TII,
TRI, RBI))
1884 TmpReg =
MRI->createVirtualRegister(&RISCV::GPRRegClass);
1886 MIB.
buildInstr(RISCV::AND, {TmpReg}, {Cmp1.getReg(0), Cmp2.getReg(0)});
1887 if (!
And.constrainAllUses(
TII,
TRI, RBI))
1894 auto Xor = MIB.
buildInstr(RISCV::XORI, {DstReg}, {TmpReg}).addImm(1);
1895 if (!
Xor.constrainAllUses(
TII,
TRI, RBI))
1899 MI.eraseFromParent();
1903void RISCVInstructionSelector::emitFence(
AtomicOrdering FenceOrdering,
1905 MachineIRBuilder &MIB)
const {
1906 if (STI.hasStdExtZtso()) {
1909 if (FenceOrdering == AtomicOrdering::SequentiallyConsistent &&
1919 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1927 MIB.
buildInstr(TargetOpcode::MEMBARRIER, {}, {});
1933 unsigned Pred, Succ;
1934 switch (FenceOrdering) {
1937 case AtomicOrdering::AcquireRelease:
1941 case AtomicOrdering::Acquire:
1946 case AtomicOrdering::Release:
1951 case AtomicOrdering::SequentiallyConsistent:
1961InstructionSelector *
1965 return new RISCVInstructionSelector(TM, Subtarget, RBI);
unsigned const MachineRegisterInfo * MRI
#define GET_GLOBALISEL_PREDICATES_INIT
#define GET_GLOBALISEL_TEMPORARIES_INIT
static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool selectUnmergeValues(MachineInstrBuilder &MIB, const ARMBaseInstrInfo &TII, MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Provides analysis for querying information about KnownBits during GISel passes.
Declares convenience wrapper classes for interpreting MachineInstr instances as specific generic oper...
const HexagonInstrInfo * TII
static bool hasAllWUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI)
static bool hasAllNBitUsers(const MachineInstr &OrigMI, const LoongArchSubtarget &ST, const MachineRegisterInfo &MRI, unsigned OrigBits)
Contains matchers for matching SSA Machine Instructions.
This file declares the MachineIRBuilder class.
Register const TargetRegisterInfo * TRI
Promote Memory to Register
MachineInstr unsigned OpIdx
static StringRef getName(Value *V)
static unsigned selectRegImmLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V regimm opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned selectZalasrLoadStoreOp(unsigned GenericOpc, unsigned OpSize)
Select the RISC-V Zalasr opcode for the G_LOAD or G_STORE operation GenericOpc, appropriate for the G...
static unsigned getFCmpOpcode(CmpInst::Predicate Pred, unsigned Size)
static bool legalizeFCmpPredicate(Register &LHS, Register &RHS, CmpInst::Predicate &Pred, bool &NeedInvert)
static void getOperandsForBranch(Register CondReg, RISCVCC::CondCode &CC, Register &LHS, Register &RHS, MachineRegisterInfo &MRI)
const SmallVectorImpl< MachineOperand > & Cond
This file declares the targeting of the RegisterBankInfo class for RISC-V.
APInt bitcastToAPInt() const
unsigned getBitWidth() const
Return the number of bits in the APInt.
bool ult(const APInt &RHS) const
Unsigned less than comparison.
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
static APInt getBitsSetFrom(unsigned numBits, unsigned loBit)
Constructs an APInt value that has a contiguous range of bits set.
BlockFrequencyInfo pass uses BlockFrequencyInfoImpl implementation to estimate IR basic block frequen...
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
@ ICMP_SLT
signed less than
@ ICMP_SLE
signed less or equal
@ FCMP_OLT
0 1 0 0 True if ordered and less than
@ ICMP_UGE
unsigned greater or equal
@ ICMP_UGT
unsigned greater than
@ ICMP_SGT
signed greater than
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
@ ICMP_ULT
unsigned less than
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
@ ICMP_SGE
signed greater or equal
@ ICMP_ULE
unsigned less or equal
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
int64_t getSExtValue() const
Return the constant as a 64-bit integer value after it has been sign extended as appropriate for the ...
This is an important base class in LLVM.
virtual void setupMF(MachineFunction &mf, GISelValueTracking *vt, CodeGenCoverage *covinfo=nullptr, ProfileSummaryInfo *psi=nullptr, BlockFrequencyInfo *bfi=nullptr)
Setup per-MF executor state.
Register getPointerReg() const
Get the source register of the pointer value.
MachineMemOperand & getMMO() const
Get the MachineMemOperand on this instruction.
LocationSize getMemSizeInBits() const
Returns the size in bits of the memory access.
Register getReg(unsigned Idx) const
Access the Idx'th operand as a register and return it.
constexpr unsigned getScalarSizeInBits() const
static constexpr LLT scalar(unsigned SizeInBits)
Get a low-level scalar or aggregate "bag of bits".
constexpr bool isValid() const
constexpr bool isVector() const
constexpr TypeSize getSizeInBits() const
Returns the total size of the type. Must only be called on sized types.
constexpr bool isPointer() const
constexpr unsigned getAddressSpace() const
TypeSize getValue() const
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Helper class to build MachineInstr.
MachineInstrBuilder buildInstr(unsigned Opcode)
Build and insert <empty> = Opcode <empty>.
MachineInstrBuilder buildCopy(const DstOp &Res, const SrcOp &Op)
Build and insert Res = COPY Op.
MachineInstrBuilder buildPtrToInt(const DstOp &Dst, const SrcOp &Src)
Build and insert a G_PTRTOINT instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool constrainAllUses(const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
unsigned getOperandNo(const_mop_iterator I) const
Returns the number of the operand iterator I points to.
const MachineOperand & getOperand(unsigned i) const
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
AtomicOrdering getSuccessOrdering() const
Return the atomic ordering requirements for this memory operation.
MachineOperand class - Representation of each machine instruction operand.
const ConstantInt * getCImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Analysis providing profile information.
This class provides the information for the target register banks.
std::optional< unsigned > getRealVLen() const
static std::pair< unsigned, unsigned > decomposeSubvectorInsertExtractToSubRegs(MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx, const RISCVRegisterInfo *TRI)
static unsigned getRegClassIDForVecVT(MVT VT)
static RISCVVType::VLMUL getLMUL(MVT VT)
static const TargetRegisterClass * constrainGenericRegister(Register Reg, const TargetRegisterClass &RC, MachineRegisterInfo &MRI)
Constrain the (possibly generic) virtual register Reg to RC.
const RegisterBank & getRegBank(unsigned ID)
Get the register bank identified by ID.
This class implements the register bank concept.
unsigned getID() const
Get the identifier of this register bank.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
bool isPositionIndependent() const
CodeModel::Model getCodeModel() const
Returns the code model.
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
operand_type_match m_Reg()
SpecificConstantMatch m_SpecificICst(const APInt &RequestedValue)
Matches a constant equal to RequestedValue.
operand_type_match m_Pred()
UnaryOp_match< SrcTy, TargetOpcode::G_ZEXT > m_GZExt(const SrcTy &Src)
ConstantMatch< APInt > m_ICst(APInt &Cst)
BinaryOp_match< LHS, RHS, TargetOpcode::G_ADD, true > m_GAdd(const LHS &L, const RHS &R)
OneNonDBGUse_match< SubPat > m_OneNonDBGUse(const SubPat &SP)
CompareOp_match< Pred, LHS, RHS, TargetOpcode::G_ICMP > m_GICmp(const Pred &P, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SUB > m_GSub(const LHS &L, const RHS &R)
bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P)
BinaryOp_match< LHS, RHS, TargetOpcode::G_SHL, false > m_GShl(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_AND, true > m_GAnd(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, TargetOpcode::G_LSHR, false > m_GLShr(const LHS &L, const RHS &R)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
static unsigned decodeVSEW(unsigned VSEW)
LLVM_ABI unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul)
LLVM_ABI unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic, bool AltFmt=false)
static constexpr int64_t VLMaxSentinel
@ SingleThread
Synchronized with respect to signal handlers executing in the same thread.
@ System
Synchronized with respect to all concurrently executing threads.
This is an optimization pass for GlobalISel generic memory operations.
PointerUnion< const TargetRegisterClass *, const RegisterBank * > RegClassOrRegBank
Convenient type to represent either a register class or a register bank.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
bool isStrongerThanMonotonic(AtomicOrdering AO)
int countr_one(T Value)
Count the number of ones from the least significant bit to the first zero bit.
LLVM_ABI bool constrainSelectedInstRegOperands(MachineInstr &I, const TargetInstrInfo &TII, const TargetRegisterInfo &TRI, const RegisterBankInfo &RBI)
Mutate the newly-selected instruction I to constrain its (possibly generic) virtual register operands...
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
LLVM_ABI MVT getMVTForLLT(LLT Ty)
Get a rough equivalent of an MVT for a given LLT.
InstructionSelector * createRISCVInstructionSelector(const RISCVTargetMachine &TM, const RISCVSubtarget &Subtarget, const RISCVRegisterBankInfo &RBI)
LLVM_ABI std::optional< int64_t > getIConstantVRegSExtVal(Register VReg, const MachineRegisterInfo &MRI)
If VReg is defined by a G_CONSTANT fits in int64_t returns it.
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void reportGISelFailure(MachineFunction &MF, MachineOptimizationRemarkEmitter &MORE, MachineOptimizationRemarkMissed &R)
Report an ISel error as a missed optimization remark to the LLVMContext's diagnostic stream.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
AtomicOrdering
Atomic ordering for LLVM's memory model.
constexpr T maskTrailingZeros(unsigned N)
Create a bitmask with the N right-most bits set to 0, and all other bits set to 1.
@ Or
Bitwise or logical OR of integers.
@ Xor
Bitwise or logical XOR of integers.
@ And
Bitwise or logical AND of integers.
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
LLVM_ABI std::optional< ValueAndVReg > getIConstantVRegValWithLookThrough(Register VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs=true)
If VReg is defined by a statically evaluable chain of instructions rooted on a G_CONSTANT returns its...
constexpr T maskTrailingOnes(unsigned N)
Create a bitmask with the N right-most bits set to 1, and all other bits set to 0.
LLVM_ABI void reportFatalUsageError(Error Err)
Report a fatal error that does not indicate a bug in LLVM.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.