41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#define GET_INSTRINFO_NAMED_OPS
46#include "RISCVGenInstrInfo.inc"
48#define DEBUG_TYPE "riscv-instr-info"
50 "Number of registers within vector register groups spilled");
52 "Number of registers within vector register groups reloaded");
56 cl::desc(
"Prefer whole register move for vector registers."));
59 "riscv-force-machine-combiner-strategy",
cl::Hidden,
60 cl::desc(
"Force machine combiner to use a specific strategy for machine "
61 "trace metrics evaluation."),
66 "MinInstrCount strategy.")));
72#define GET_RISCVVPseudosTable_IMPL
73#include "RISCVGenSearchableTables.inc"
79#define GET_RISCVMaskedPseudosTable_IMPL
80#include "RISCVGenSearchableTables.inc"
86 RISCV::ADJCALLSTACKUP),
89#define GET_INSTRINFO_HELPERS
90#include "RISCVGenInstrInfo.inc"
93 if (
STI.hasStdExtZca())
102 int &FrameIndex)
const {
112 case RISCV::VL1RE8_V:
113 case RISCV::VL1RE16_V:
114 case RISCV::VL1RE32_V:
115 case RISCV::VL1RE64_V:
118 case RISCV::VL2RE8_V:
119 case RISCV::VL2RE16_V:
120 case RISCV::VL2RE32_V:
121 case RISCV::VL2RE64_V:
124 case RISCV::VL4RE8_V:
125 case RISCV::VL4RE16_V:
126 case RISCV::VL4RE32_V:
127 case RISCV::VL4RE64_V:
130 case RISCV::VL8RE8_V:
131 case RISCV::VL8RE16_V:
132 case RISCV::VL8RE32_V:
133 case RISCV::VL8RE64_V:
141 switch (
MI.getOpcode()) {
165 case RISCV::VL1RE8_V:
166 case RISCV::VL2RE8_V:
167 case RISCV::VL4RE8_V:
168 case RISCV::VL8RE8_V:
169 if (!
MI.getOperand(1).isFI())
171 FrameIndex =
MI.getOperand(1).getIndex();
174 return MI.getOperand(0).getReg();
177 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
178 MI.getOperand(2).getImm() == 0) {
179 FrameIndex =
MI.getOperand(1).getIndex();
180 return MI.getOperand(0).getReg();
187 int &FrameIndex)
const {
195 switch (
MI.getOpcode()) {
220 if (!
MI.getOperand(1).isFI())
222 FrameIndex =
MI.getOperand(1).getIndex();
225 return MI.getOperand(0).getReg();
228 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
229 MI.getOperand(2).getImm() == 0) {
230 FrameIndex =
MI.getOperand(1).getIndex();
231 return MI.getOperand(0).getReg();
241 case RISCV::VFMV_V_F:
244 case RISCV::VFMV_S_F:
246 return MI.getOperand(1).isUndef();
254 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
265 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
266 "Unexpected COPY instruction.");
270 bool FoundDef =
false;
271 bool FirstVSetVLI =
false;
272 unsigned FirstSEW = 0;
275 if (
MBBI->isMetaInstruction())
278 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
288 unsigned FirstVType =
MBBI->getOperand(2).getImm();
293 if (FirstLMul != LMul)
298 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
304 unsigned VType =
MBBI->getOperand(2).getImm();
322 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
324 }
else if (
MBBI->getNumDefs()) {
327 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
333 if (!MO.isReg() || !MO.isDef())
335 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
350 if (MO.getReg() != SrcReg)
391 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
392 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
394 assert(!Fractional &&
"It is impossible be fractional lmul here.");
395 unsigned NumRegs = NF * LMulVal;
401 SrcEncoding += NumRegs - 1;
402 DstEncoding += NumRegs - 1;
408 unsigned,
unsigned> {
416 uint16_t Diff = DstEncoding - SrcEncoding;
417 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
418 DstEncoding % 8 == 7)
420 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
421 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
422 DstEncoding % 4 == 3)
424 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
425 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
426 DstEncoding % 2 == 1)
428 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
431 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
436 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
438 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
439 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
441 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
442 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
444 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
447 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
450 while (
I != NumRegs) {
455 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
456 GetCopyInfo(SrcEncoding, DstEncoding);
460 if (LMul == LMulCopied &&
463 if (DefMBBI->getOpcode() == VIOpc)
470 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
472 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
480 MIB = MIB.add(DefMBBI->getOperand(2));
488 MIB.addImm(Log2SEW ? Log2SEW : 3);
500 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
501 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
510 bool RenamableDest,
bool RenamableSrc)
const {
514 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
521 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
527 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
533 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
534 if (
STI.isRV32() &&
STI.hasStdExtZdinx()) {
543 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
544 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
546 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
548 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
552 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
553 .
addReg(EvenReg, KillFlag)
556 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
563 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
564 RISCV::GPRRegClass.
contains(DstReg)) {
566 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
571 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
573 if (
STI.hasStdExtZfh()) {
574 Opc = RISCV::FSGNJ_H;
577 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
578 "Unexpected extensions");
580 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
581 &RISCV::FPR32RegClass);
582 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
583 &RISCV::FPR32RegClass);
584 Opc = RISCV::FSGNJ_S;
588 .
addReg(SrcReg, KillFlag);
592 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
595 .
addReg(SrcReg, KillFlag);
599 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
602 .
addReg(SrcReg, KillFlag);
606 if (RISCV::FPR32RegClass.
contains(DstReg) &&
607 RISCV::GPRRegClass.
contains(SrcReg)) {
609 .
addReg(SrcReg, KillFlag);
613 if (RISCV::GPRRegClass.
contains(DstReg) &&
614 RISCV::FPR32RegClass.
contains(SrcReg)) {
616 .
addReg(SrcReg, KillFlag);
620 if (RISCV::FPR64RegClass.
contains(DstReg) &&
621 RISCV::GPRRegClass.
contains(SrcReg)) {
622 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
624 .
addReg(SrcReg, KillFlag);
628 if (RISCV::GPRRegClass.
contains(DstReg) &&
629 RISCV::FPR64RegClass.
contains(SrcReg)) {
630 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
632 .
addReg(SrcReg, KillFlag);
638 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
649 Register SrcReg,
bool IsKill,
int FI,
658 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
659 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
661 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
662 Opcode = RISCV::SH_INX;
663 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
664 Opcode = RISCV::SW_INX;
665 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
666 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
667 Alignment >=
STI.getZilsdAlign()) {
668 Opcode = RISCV::SD_RV32;
670 Opcode = RISCV::PseudoRV32ZdinxSD;
672 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
674 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
676 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
678 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
679 Opcode = RISCV::VS1R_V;
680 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
681 Opcode = RISCV::VS2R_V;
682 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
683 Opcode = RISCV::VS4R_V;
684 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
685 Opcode = RISCV::VS8R_V;
686 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL2_M1;
688 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL2_M2;
690 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL2_M4;
692 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
693 Opcode = RISCV::PseudoVSPILL3_M1;
694 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
695 Opcode = RISCV::PseudoVSPILL3_M2;
696 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
697 Opcode = RISCV::PseudoVSPILL4_M1;
698 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
699 Opcode = RISCV::PseudoVSPILL4_M2;
700 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
701 Opcode = RISCV::PseudoVSPILL5_M1;
702 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
703 Opcode = RISCV::PseudoVSPILL6_M1;
704 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
705 Opcode = RISCV::PseudoVSPILL7_M1;
706 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
707 Opcode = RISCV::PseudoVSPILL8_M1;
750 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
751 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
753 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
754 Opcode = RISCV::LH_INX;
755 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
756 Opcode = RISCV::LW_INX;
757 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
758 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
759 Alignment >=
STI.getZilsdAlign()) {
760 Opcode = RISCV::LD_RV32;
762 Opcode = RISCV::PseudoRV32ZdinxLD;
764 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
766 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
768 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
770 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
771 Opcode = RISCV::VL1RE8_V;
772 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
773 Opcode = RISCV::VL2RE8_V;
774 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
775 Opcode = RISCV::VL4RE8_V;
776 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
777 Opcode = RISCV::VL8RE8_V;
778 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
779 Opcode = RISCV::PseudoVRELOAD2_M1;
780 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
781 Opcode = RISCV::PseudoVRELOAD2_M2;
782 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
783 Opcode = RISCV::PseudoVRELOAD2_M4;
784 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
785 Opcode = RISCV::PseudoVRELOAD3_M1;
786 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
787 Opcode = RISCV::PseudoVRELOAD3_M2;
788 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
789 Opcode = RISCV::PseudoVRELOAD4_M1;
790 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
791 Opcode = RISCV::PseudoVRELOAD4_M2;
792 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
793 Opcode = RISCV::PseudoVRELOAD5_M1;
794 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
795 Opcode = RISCV::PseudoVRELOAD6_M1;
796 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
797 Opcode = RISCV::PseudoVRELOAD7_M1;
798 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
799 Opcode = RISCV::PseudoVRELOAD8_M1;
837 if (
Ops.size() != 1 ||
Ops[0] != 1)
840 switch (
MI.getOpcode()) {
842 if (RISCVInstrInfo::isSEXT_W(
MI))
844 if (RISCVInstrInfo::isZEXT_W(
MI))
846 if (RISCVInstrInfo::isZEXT_B(
MI))
853 case RISCV::ZEXT_H_RV32:
854 case RISCV::ZEXT_H_RV64:
861 case RISCV::VMV_X_S: {
864 if (ST.getXLen() < (1U << Log2SEW))
879 case RISCV::VFMV_F_S: {
906 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
915 return RISCV::PseudoCCLB;
917 return RISCV::PseudoCCLBU;
919 return RISCV::PseudoCCLH;
921 return RISCV::PseudoCCLHU;
923 return RISCV::PseudoCCLW;
925 return RISCV::PseudoCCLWU;
927 return RISCV::PseudoCCLD;
929 return RISCV::PseudoCCQC_E_LB;
930 case RISCV::QC_E_LBU:
931 return RISCV::PseudoCCQC_E_LBU;
933 return RISCV::PseudoCCQC_E_LH;
934 case RISCV::QC_E_LHU:
935 return RISCV::PseudoCCQC_E_LHU;
937 return RISCV::PseudoCCQC_E_LW;
948 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
953 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
957 if (
Ops.size() != 1 || (
Ops[0] != 4 &&
Ops[0] != 5))
960 bool Invert =
Ops[0] == 5;
964 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
969 MI.getDebugLoc(),
get(PredOpc), DestReg)
970 .
add({
MI.getOperand(1),
MI.getOperand(2)});
994 bool DstIsDead)
const {
1010 bool SrcRenamable =
false;
1014 bool LastItem = ++Num == Seq.
size();
1019 switch (Inst.getOpndKind()) {
1029 .
addReg(SrcReg, SrcRegState)
1036 .
addReg(SrcReg, SrcRegState)
1037 .
addReg(SrcReg, SrcRegState)
1043 .
addReg(SrcReg, SrcRegState)
1051 SrcRenamable = DstRenamable;
1061 case RISCV::CV_BEQIMM:
1062 case RISCV::QC_BEQI:
1063 case RISCV::QC_E_BEQI:
1064 case RISCV::NDS_BBC:
1065 case RISCV::NDS_BEQC:
1069 case RISCV::QC_BNEI:
1070 case RISCV::QC_E_BNEI:
1071 case RISCV::CV_BNEIMM:
1072 case RISCV::NDS_BBS:
1073 case RISCV::NDS_BNEC:
1076 case RISCV::QC_BLTI:
1077 case RISCV::QC_E_BLTI:
1080 case RISCV::QC_BGEI:
1081 case RISCV::QC_E_BGEI:
1084 case RISCV::QC_BLTUI:
1085 case RISCV::QC_E_BLTUI:
1088 case RISCV::QC_BGEUI:
1089 case RISCV::QC_E_BGEUI:
1121 "Unknown conditional branch");
1132 case RISCV::QC_MVEQ:
1133 return RISCV::QC_MVNE;
1134 case RISCV::QC_MVNE:
1135 return RISCV::QC_MVEQ;
1136 case RISCV::QC_MVLT:
1137 return RISCV::QC_MVGE;
1138 case RISCV::QC_MVGE:
1139 return RISCV::QC_MVLT;
1140 case RISCV::QC_MVLTU:
1141 return RISCV::QC_MVGEU;
1142 case RISCV::QC_MVGEU:
1143 return RISCV::QC_MVLTU;
1144 case RISCV::QC_MVEQI:
1145 return RISCV::QC_MVNEI;
1146 case RISCV::QC_MVNEI:
1147 return RISCV::QC_MVEQI;
1148 case RISCV::QC_MVLTI:
1149 return RISCV::QC_MVGEI;
1150 case RISCV::QC_MVGEI:
1151 return RISCV::QC_MVLTI;
1152 case RISCV::QC_MVLTUI:
1153 return RISCV::QC_MVGEUI;
1154 case RISCV::QC_MVGEUI:
1155 return RISCV::QC_MVLTUI;
1160 switch (SelectOpc) {
1179 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1189 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1194 return RISCV::CV_BEQIMM;
1196 return RISCV::CV_BNEIMM;
1199 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1204 return RISCV::QC_BEQI;
1206 return RISCV::QC_BNEI;
1208 return RISCV::QC_BLTI;
1210 return RISCV::QC_BGEI;
1213 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1218 return RISCV::QC_BLTUI;
1220 return RISCV::QC_BGEUI;
1223 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1228 return RISCV::QC_E_BEQI;
1230 return RISCV::QC_E_BNEI;
1232 return RISCV::QC_E_BLTI;
1234 return RISCV::QC_E_BGEI;
1237 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1242 return RISCV::QC_E_BLTUI;
1244 return RISCV::QC_E_BGEUI;
1247 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1252 return RISCV::NDS_BBC;
1254 return RISCV::NDS_BBS;
1257 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1262 return RISCV::NDS_BEQC;
1264 return RISCV::NDS_BNEC;
1293 bool AllowModify)
const {
1294 TBB = FBB =
nullptr;
1299 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1305 int NumTerminators = 0;
1306 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1309 if (J->getDesc().isUnconditionalBranch() ||
1310 J->getDesc().isIndirectBranch()) {
1317 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1318 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1319 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1322 I = FirstUncondOrIndirectBr;
1326 if (
I->getDesc().isIndirectBranch())
1330 if (
I->isPreISelOpcode())
1334 if (NumTerminators > 2)
1338 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1344 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1350 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1351 I->getDesc().isUnconditionalBranch()) {
1362 int *BytesRemoved)
const {
1369 if (!
I->getDesc().isUnconditionalBranch() &&
1370 !
I->getDesc().isConditionalBranch())
1376 I->eraseFromParent();
1380 if (
I ==
MBB.begin())
1383 if (!
I->getDesc().isConditionalBranch())
1389 I->eraseFromParent();
1402 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1404 "RISC-V branch conditions have two components!");
1438 assert(RS &&
"RegScavenger required for long branching");
1440 "new block should be inserted for expanding unconditional branch");
1443 "restore block should be inserted for restoring clobbered registers");
1452 "Branch offsets outside of the signed 32-bit range not supported");
1457 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1458 auto II =
MBB.end();
1464 RS->enterBasicBlockEnd(
MBB);
1466 if (
STI.hasStdExtZicfilp())
1467 RC = &RISCV::GPRX7RegClass;
1469 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1473 RS->setRegUsed(TmpGPR);
1478 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1480 if (
STI.hasStdExtZicfilp())
1484 if (FrameIndex == -1)
1489 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1492 MI.getOperand(1).setMBB(&RestoreBB);
1496 TRI->eliminateFrameIndex(RestoreBB.
back(),
1500 MRI.replaceRegWith(ScratchReg, TmpGPR);
1501 MRI.clearVirtRegs();
1506 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1511 Cond[0].setImm(RISCV::BNE);
1514 Cond[0].setImm(RISCV::BNEI);
1517 Cond[0].setImm(RISCV::BEQ);
1520 Cond[0].setImm(RISCV::BEQI);
1523 Cond[0].setImm(RISCV::BGE);
1526 Cond[0].setImm(RISCV::BLT);
1529 Cond[0].setImm(RISCV::BGEU);
1532 Cond[0].setImm(RISCV::BLTU);
1534 case RISCV::CV_BEQIMM:
1535 Cond[0].setImm(RISCV::CV_BNEIMM);
1537 case RISCV::CV_BNEIMM:
1538 Cond[0].setImm(RISCV::CV_BEQIMM);
1540 case RISCV::QC_BEQI:
1541 Cond[0].setImm(RISCV::QC_BNEI);
1543 case RISCV::QC_BNEI:
1544 Cond[0].setImm(RISCV::QC_BEQI);
1546 case RISCV::QC_BGEI:
1547 Cond[0].setImm(RISCV::QC_BLTI);
1549 case RISCV::QC_BLTI:
1550 Cond[0].setImm(RISCV::QC_BGEI);
1552 case RISCV::QC_BGEUI:
1553 Cond[0].setImm(RISCV::QC_BLTUI);
1555 case RISCV::QC_BLTUI:
1556 Cond[0].setImm(RISCV::QC_BGEUI);
1558 case RISCV::QC_E_BEQI:
1559 Cond[0].setImm(RISCV::QC_E_BNEI);
1561 case RISCV::QC_E_BNEI:
1562 Cond[0].setImm(RISCV::QC_E_BEQI);
1564 case RISCV::QC_E_BGEI:
1565 Cond[0].setImm(RISCV::QC_E_BLTI);
1567 case RISCV::QC_E_BLTI:
1568 Cond[0].setImm(RISCV::QC_E_BGEI);
1570 case RISCV::QC_E_BGEUI:
1571 Cond[0].setImm(RISCV::QC_E_BLTUI);
1573 case RISCV::QC_E_BLTUI:
1574 Cond[0].setImm(RISCV::QC_E_BGEUI);
1576 case RISCV::NDS_BBC:
1577 Cond[0].setImm(RISCV::NDS_BBS);
1579 case RISCV::NDS_BBS:
1580 Cond[0].setImm(RISCV::NDS_BBC);
1582 case RISCV::NDS_BEQC:
1583 Cond[0].setImm(RISCV::NDS_BNEC);
1585 case RISCV::NDS_BNEC:
1586 Cond[0].setImm(RISCV::NDS_BEQC);
1596 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1597 MI->getOperand(1).getReg() == RISCV::X0) {
1598 Imm =
MI->getOperand(2).getImm();
1611 if (Reg == RISCV::X0) {
1615 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1619 bool IsSigned =
false;
1620 bool IsEquality =
false;
1621 switch (
MI.getOpcode()) {
1657 MI.eraseFromParent();
1683 auto searchConst = [&](int64_t C1) ->
Register {
1685 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1688 I.getOperand(0).getReg().isVirtual();
1691 return DefC1->getOperand(0).getReg();
1704 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1706 if (
Register RegZ = searchConst(C0 + 1)) {
1713 MRI.clearKillFlags(RegZ);
1714 MI.eraseFromParent();
1725 MRI.hasOneUse(RHS.getReg())) {
1727 if (
Register RegZ = searchConst(C0 - 1)) {
1734 MRI.clearKillFlags(RegZ);
1735 MI.eraseFromParent();
1745 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1747 int NumOp =
MI.getNumExplicitOperands();
1748 return MI.getOperand(NumOp - 1).getMBB();
1752 int64_t BrOffset)
const {
1753 unsigned XLen =
STI.getXLen();
1760 case RISCV::NDS_BBC:
1761 case RISCV::NDS_BBS:
1762 case RISCV::NDS_BEQC:
1763 case RISCV::NDS_BNEC:
1773 case RISCV::CV_BEQIMM:
1774 case RISCV::CV_BNEIMM:
1775 case RISCV::QC_BEQI:
1776 case RISCV::QC_BNEI:
1777 case RISCV::QC_BGEI:
1778 case RISCV::QC_BLTI:
1779 case RISCV::QC_BLTUI:
1780 case RISCV::QC_BGEUI:
1781 case RISCV::QC_E_BEQI:
1782 case RISCV::QC_E_BNEI:
1783 case RISCV::QC_E_BGEI:
1784 case RISCV::QC_E_BLTI:
1785 case RISCV::QC_E_BLTUI:
1786 case RISCV::QC_E_BGEUI:
1789 case RISCV::PseudoBR:
1791 case RISCV::PseudoJump:
1802 case RISCV::ADD:
return RISCV::PseudoCCADD;
1803 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1804 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1805 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1806 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1807 case RISCV::AND:
return RISCV::PseudoCCAND;
1808 case RISCV::OR:
return RISCV::PseudoCCOR;
1809 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1810 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1811 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1812 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1813 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1814 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1815 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1816 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1817 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1819 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1820 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1821 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1822 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1823 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1824 case RISCV::ORI:
return RISCV::PseudoCCORI;
1825 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1827 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1828 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1829 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1830 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1831 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1833 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1834 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1835 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1836 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1838 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1839 case RISCV::ORN:
return RISCV::PseudoCCORN;
1840 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1842 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1843 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1847 return RISCV::INSTRUCTION_LIST_END;
1856 if (!
Reg.isVirtual())
1858 if (!
MRI.hasOneNonDBGUse(
Reg))
1864 if (!STI.hasShortForwardBranchIMinMax() &&
1865 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1866 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1869 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1876 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1877 MI->getOperand(1).getReg() == RISCV::X0)
1882 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1892 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1895 bool DontMoveAcrossStores =
true;
1896 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1903 unsigned &TrueOp,
unsigned &FalseOp,
1904 bool &Optimizable)
const {
1905 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1906 "Unknown select instruction");
1916 Cond.push_back(
MI.getOperand(1));
1917 Cond.push_back(
MI.getOperand(2));
1918 Cond.push_back(
MI.getOperand(3));
1920 Optimizable =
STI.hasShortForwardBranchIALU();
1927 bool PreferFalse)
const {
1928 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1929 "Unknown select instruction");
1930 if (!
STI.hasShortForwardBranchIALU())
1936 bool Invert = !
DefMI;
1944 Register DestReg =
MI.getOperand(0).getReg();
1946 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1950 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1957 NewMI.
add(
MI.getOperand(1));
1958 NewMI.
add(
MI.getOperand(2));
1967 NewMI.
add(FalseReg);
1982 if (
DefMI->getParent() !=
MI.getParent())
1986 DefMI->eraseFromParent();
1991 if (
MI.isMetaInstruction())
1994 unsigned Opcode =
MI.getOpcode();
1996 if (Opcode == TargetOpcode::INLINEASM ||
1997 Opcode == TargetOpcode::INLINEASM_BR) {
1999 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
2003 if (!
MI.memoperands_empty()) {
2006 if (
STI.hasStdExtZca()) {
2007 if (isCompressibleInst(
MI,
STI))
2015 if (Opcode == TargetOpcode::BUNDLE)
2016 return getInstBundleLength(
MI);
2018 if (
MI.getParent() &&
MI.getParent()->getParent()) {
2019 if (isCompressibleInst(
MI,
STI))
2024 case RISCV::PseudoMV_FPR16INX:
2025 case RISCV::PseudoMV_FPR32INX:
2027 return STI.hasStdExtZca() ? 2 : 4;
2028 case TargetOpcode::STACKMAP:
2031 case TargetOpcode::PATCHPOINT:
2034 case TargetOpcode::STATEPOINT: {
2038 return std::max(NumBytes, 8U);
2040 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2041 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2042 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2045 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2046 F.hasFnAttribute(
"patchable-function-entry")) {
2048 if (
F.getFnAttribute(
"patchable-function-entry")
2050 .getAsInteger(10, Num))
2051 return get(Opcode).getSize();
2054 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2058 return STI.is64Bit() ? 68 : 44;
2061 return get(Opcode).getSize();
2065unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
2069 while (++
I != E &&
I->isInsideBundle()) {
2070 assert(!
I->isBundle() &&
"No nested bundle!");
2077 const unsigned Opcode =
MI.getOpcode();
2081 case RISCV::FSGNJ_D:
2082 case RISCV::FSGNJ_S:
2083 case RISCV::FSGNJ_H:
2084 case RISCV::FSGNJ_D_INX:
2085 case RISCV::FSGNJ_D_IN32X:
2086 case RISCV::FSGNJ_S_INX:
2087 case RISCV::FSGNJ_H_INX:
2089 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2090 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2094 return (
MI.getOperand(1).isReg() &&
2095 MI.getOperand(1).getReg() == RISCV::X0) ||
2096 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2098 return MI.isAsCheapAsAMove();
2101std::optional<DestSourcePair>
2105 switch (
MI.getOpcode()) {
2111 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2112 MI.getOperand(2).isReg())
2114 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2115 MI.getOperand(1).isReg())
2120 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2121 MI.getOperand(2).getImm() == 0)
2125 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2126 MI.getOperand(1).isReg())
2130 case RISCV::SH1ADD_UW:
2132 case RISCV::SH2ADD_UW:
2134 case RISCV::SH3ADD_UW:
2135 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2136 MI.getOperand(2).isReg())
2139 case RISCV::FSGNJ_D:
2140 case RISCV::FSGNJ_S:
2141 case RISCV::FSGNJ_H:
2142 case RISCV::FSGNJ_D_INX:
2143 case RISCV::FSGNJ_D_IN32X:
2144 case RISCV::FSGNJ_S_INX:
2145 case RISCV::FSGNJ_H_INX:
2147 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2148 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2152 return std::nullopt;
2160 const auto &SchedModel =
STI.getSchedModel();
2161 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2173 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2177 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2178 RISCV::OpName::frm) < 0;
2180 "New instructions require FRM whereas the old one does not have it");
2187 for (
auto *NewMI : InsInstrs) {
2189 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2190 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2232bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2233 bool Invert)
const {
2234#define OPCODE_LMUL_CASE(OPC) \
2235 case RISCV::OPC##_M1: \
2236 case RISCV::OPC##_M2: \
2237 case RISCV::OPC##_M4: \
2238 case RISCV::OPC##_M8: \
2239 case RISCV::OPC##_MF2: \
2240 case RISCV::OPC##_MF4: \
2241 case RISCV::OPC##_MF8
2243#define OPCODE_LMUL_MASK_CASE(OPC) \
2244 case RISCV::OPC##_M1_MASK: \
2245 case RISCV::OPC##_M2_MASK: \
2246 case RISCV::OPC##_M4_MASK: \
2247 case RISCV::OPC##_M8_MASK: \
2248 case RISCV::OPC##_MF2_MASK: \
2249 case RISCV::OPC##_MF4_MASK: \
2250 case RISCV::OPC##_MF8_MASK
2255 Opcode = *InvOpcode;
2272#undef OPCODE_LMUL_MASK_CASE
2273#undef OPCODE_LMUL_CASE
2276bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2283 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2287 const uint64_t TSFlags =
Desc.TSFlags;
2289 auto checkImmOperand = [&](
unsigned OpIdx) {
2293 auto checkRegOperand = [&](
unsigned OpIdx) {
2301 if (!checkRegOperand(1))
2316 bool SeenMI2 =
false;
2317 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2326 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2327 Register SrcReg = It->getOperand(1).getReg();
2345 if (MI1VReg != SrcReg)
2354 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2393bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2394 bool &Commuted)
const {
2398 "Expect the present of passthrough operand.");
2404 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2405 areRVVInstsReassociable(Inst, *MI2);
2409 return areRVVInstsReassociable(Inst, *MI1) &&
2410 (isVectorAssociativeAndCommutative(*MI1) ||
2411 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2418 if (!isVectorAssociativeAndCommutative(Inst) &&
2419 !isVectorAssociativeAndCommutative(Inst,
true))
2431 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2433 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2445 for (
unsigned I = 0;
I < 5; ++
I)
2451 bool &Commuted)
const {
2452 if (isVectorAssociativeAndCommutative(Inst) ||
2453 isVectorAssociativeAndCommutative(Inst,
true))
2454 return hasReassociableVectorSibling(Inst, Commuted);
2460 unsigned OperandIdx = Commuted ? 2 : 1;
2464 int16_t InstFrmOpIdx =
2465 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2466 int16_t SiblingFrmOpIdx =
2467 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2469 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2474 bool Invert)
const {
2475 if (isVectorAssociativeAndCommutative(Inst, Invert))
2483 Opc = *InverseOpcode;
2528std::optional<unsigned>
2530#define RVV_OPC_LMUL_CASE(OPC, INV) \
2531 case RISCV::OPC##_M1: \
2532 return RISCV::INV##_M1; \
2533 case RISCV::OPC##_M2: \
2534 return RISCV::INV##_M2; \
2535 case RISCV::OPC##_M4: \
2536 return RISCV::INV##_M4; \
2537 case RISCV::OPC##_M8: \
2538 return RISCV::INV##_M8; \
2539 case RISCV::OPC##_MF2: \
2540 return RISCV::INV##_MF2; \
2541 case RISCV::OPC##_MF4: \
2542 return RISCV::INV##_MF4; \
2543 case RISCV::OPC##_MF8: \
2544 return RISCV::INV##_MF8
2546#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2547 case RISCV::OPC##_M1_MASK: \
2548 return RISCV::INV##_M1_MASK; \
2549 case RISCV::OPC##_M2_MASK: \
2550 return RISCV::INV##_M2_MASK; \
2551 case RISCV::OPC##_M4_MASK: \
2552 return RISCV::INV##_M4_MASK; \
2553 case RISCV::OPC##_M8_MASK: \
2554 return RISCV::INV##_M8_MASK; \
2555 case RISCV::OPC##_MF2_MASK: \
2556 return RISCV::INV##_MF2_MASK; \
2557 case RISCV::OPC##_MF4_MASK: \
2558 return RISCV::INV##_MF4_MASK; \
2559 case RISCV::OPC##_MF8_MASK: \
2560 return RISCV::INV##_MF8_MASK
2564 return std::nullopt;
2566 return RISCV::FSUB_H;
2568 return RISCV::FSUB_S;
2570 return RISCV::FSUB_D;
2572 return RISCV::FADD_H;
2574 return RISCV::FADD_S;
2576 return RISCV::FADD_D;
2593#undef RVV_OPC_LMUL_MASK_CASE
2594#undef RVV_OPC_LMUL_CASE
2599 bool DoRegPressureReduce) {
2615 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2626 bool DoRegPressureReduce) {
2633 DoRegPressureReduce)) {
2639 DoRegPressureReduce)) {
2649 bool DoRegPressureReduce) {
2657 unsigned CombineOpc) {
2664 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2667 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2678 unsigned OuterShiftAmt) {
2684 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2711 case RISCV::SH1ADD_UW:
2713 case RISCV::SH2ADD_UW:
2715 case RISCV::SH3ADD_UW:
2761 bool DoRegPressureReduce)
const {
2770 DoRegPressureReduce);
2778 return RISCV::FMADD_H;
2780 return RISCV::FMADD_S;
2782 return RISCV::FMADD_D;
2827 bool Mul1IsKill = Mul1.
isKill();
2828 bool Mul2IsKill = Mul2.
isKill();
2829 bool AddendIsKill = Addend.
isKill();
2838 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2863 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2870 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2873 switch (InnerShiftAmt - OuterShiftAmt) {
2877 InnerOpc = RISCV::ADD;
2880 InnerOpc = RISCV::SH1ADD;
2883 InnerOpc = RISCV::SH2ADD;
2886 InnerOpc = RISCV::SH3ADD;
2894 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2904 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2921 DelInstrs, InstrIdxForVirtReg);
2948 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2950 unsigned OpType = Operand.OperandType;
2956 ErrInfo =
"Expected an immediate operand.";
2959 int64_t Imm = MO.
getImm();
2966#define CASE_OPERAND_UIMM(NUM) \
2967 case RISCVOp::OPERAND_UIMM##NUM: \
2968 Ok = isUInt<NUM>(Imm); \
2970#define CASE_OPERAND_SIMM(NUM) \
2971 case RISCVOp::OPERAND_SIMM##NUM: \
2972 Ok = isInt<NUM>(Imm); \
3003 Ok = Imm >= 1 && Imm <= 32;
3045 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3055 Ok = Imm >= -15 && Imm <= 16;
3083 Ok = Ok && Imm != 0;
3086 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3089 Ok = Imm >= 0 && Imm <= 10;
3092 Ok = Imm >= 0 && Imm <= 7;
3095 Ok = Imm >= 1 && Imm <= 10;
3098 Ok = Imm >= 2 && Imm <= 14;
3107 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3142 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3146 ErrInfo =
"Invalid immediate";
3155 ErrInfo =
"Expected a non-register operand.";
3159 ErrInfo =
"Invalid immediate";
3168 ErrInfo =
"Expected a non-register operand.";
3172 ErrInfo =
"Invalid immediate";
3180 ErrInfo =
"Expected a non-register operand.";
3184 ErrInfo =
"Invalid immediate";
3190 int64_t Imm = MO.
getImm();
3193 ErrInfo =
"Invalid immediate";
3196 }
else if (!MO.
isReg()) {
3197 ErrInfo =
"Expected a register or immediate operand.";
3207 if (!
Op.isImm() && !
Op.isReg()) {
3208 ErrInfo =
"Invalid operand type for VL operand";
3211 if (
Op.isReg() &&
Op.getReg().isValid()) {
3213 auto *RC =
MRI.getRegClass(
Op.getReg());
3214 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3215 ErrInfo =
"Invalid register class for VL operand";
3220 ErrInfo =
"VL operand w/o SEW operand?";
3226 if (!
MI.getOperand(
OpIdx).isImm()) {
3227 ErrInfo =
"SEW value expected to be an immediate";
3232 ErrInfo =
"Unexpected SEW value";
3235 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3237 ErrInfo =
"Unexpected SEW value";
3243 if (!
MI.getOperand(
OpIdx).isImm()) {
3244 ErrInfo =
"Policy operand expected to be an immediate";
3249 ErrInfo =
"Invalid Policy Value";
3253 ErrInfo =
"policy operand w/o VL operand?";
3261 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3262 ErrInfo =
"policy operand w/o tied operand?";
3269 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3270 ErrInfo =
"dynamic rounding mode should read FRM";
3292 case RISCV::LD_RV32:
3302 case RISCV::SD_RV32:
3318 int64_t NewOffset = OldOffset + Disp;
3340 "Addressing mode not supported for folding");
3414 case RISCV::LD_RV32:
3417 case RISCV::SD_RV32:
3424 OffsetIsScalable =
false;
3440 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3448 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3451 auto Base1 = MO1->getValue();
3452 auto Base2 = MO2->getValue();
3453 if (!Base1 || !Base2)
3461 return Base1 == Base2;
3467 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3468 unsigned NumBytes)
const {
3471 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3476 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3482 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3488 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3538 int64_t OffsetA = 0, OffsetB = 0;
3544 int LowOffset = std::min(OffsetA, OffsetB);
3545 int HighOffset = std::max(OffsetA, OffsetB);
3546 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3548 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3555std::pair<unsigned, unsigned>
3558 return std::make_pair(TF & Mask, TF & ~Mask);
3564 static const std::pair<unsigned, const char *> TargetFlags[] = {
3565 {MO_CALL,
"riscv-call"},
3566 {MO_LO,
"riscv-lo"},
3567 {MO_HI,
"riscv-hi"},
3568 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3569 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3570 {MO_GOT_HI,
"riscv-got-hi"},
3571 {MO_TPREL_LO,
"riscv-tprel-lo"},
3572 {MO_TPREL_HI,
"riscv-tprel-hi"},
3573 {MO_TPREL_ADD,
"riscv-tprel-add"},
3574 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3575 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3576 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3577 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3578 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3579 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3587 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3600 unsigned &Flags)
const {
3619 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3620 F.hasFnAttribute(
"patchable-function-entry");
3625 return MI.readsRegister(RegNo,
TRI) ||
3626 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3631 return MI.modifiesRegister(RegNo,
TRI) ||
3632 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3636 if (!
MBB.back().isReturn())
3659 if (
C.back().isReturn()) {
3661 "The candidate who uses return instruction must be outlined "
3674 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3677std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3680 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3681 unsigned MinRepeats)
const {
3687 if (RepeatedSequenceLocs.size() < MinRepeats)
3688 return std::nullopt;
3692 unsigned InstrSizeCExt =
3694 unsigned CallOverhead = 0, FrameOverhead = 0;
3697 unsigned CFICount = 0;
3698 for (
auto &
I : Candidate) {
3699 if (
I.isCFIInstruction())
3710 std::vector<MCCFIInstruction> CFIInstructions =
3711 C.getMF()->getFrameInstructions();
3713 if (CFICount > 0 && CFICount != CFIInstructions.size())
3714 return std::nullopt;
3722 CallOverhead = 4 + InstrSizeCExt;
3729 FrameOverhead = InstrSizeCExt;
3735 return std::nullopt;
3737 for (
auto &
C : RepeatedSequenceLocs)
3738 C.setCallInfo(MOCI, CallOverhead);
3740 unsigned SequenceSize = 0;
3741 for (
auto &
MI : Candidate)
3744 return std::make_unique<outliner::OutlinedFunction>(
3745 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3751 unsigned Flags)
const {
3755 MBB->getParent()->getSubtarget().getRegisterInfo();
3756 const auto &
F =
MI.getMF()->getFunction();
3761 if (
MI.isCFIInstruction())
3769 for (
const auto &MO :
MI.operands()) {
3774 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3775 F.hasSection() ||
F.getSectionPrefix()))
3792 MBB.addLiveIn(RISCV::X5);
3807 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3815 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3826 return std::nullopt;
3830 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3831 MI.getOperand(2).isImm())
3832 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3834 return std::nullopt;
3842 std::string GenericComment =
3844 if (!GenericComment.empty())
3845 return GenericComment;
3849 return std::string();
3853 return std::string();
3855 std::string Comment;
3862 switch (OpInfo.OperandType) {
3865 unsigned Imm =
Op.getImm();
3870 unsigned Imm =
Op.getImm();
3875 unsigned Imm =
Op.getImm();
3881 unsigned Log2SEW =
Op.getImm();
3882 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3888 unsigned Policy =
Op.getImm();
3890 "Invalid Policy Value");
3900#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3901 RISCV::Pseudo##OP##_##LMUL
3903#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3904 RISCV::Pseudo##OP##_##LMUL##_MASK
3906#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3907 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3908 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3910#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3911 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3912 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3913 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3914 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3915 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3916 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3918#define CASE_RVV_OPCODE_UNMASK(OP) \
3919 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3920 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3922#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3923 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3924 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3925 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3926 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3927 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3928 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3930#define CASE_RVV_OPCODE_MASK(OP) \
3931 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3932 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3934#define CASE_RVV_OPCODE_WIDEN(OP) \
3935 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3936 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3938#define CASE_RVV_OPCODE(OP) \
3939 CASE_RVV_OPCODE_UNMASK(OP): \
3940 case CASE_RVV_OPCODE_MASK(OP)
3944#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3945 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3947#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3948 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3949 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3950 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3951 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3952 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3953 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3954 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3957#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3958 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3960#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3961 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3962 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3963 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3964 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3966#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3967 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3968 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3970#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3971 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3972 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3974#define CASE_VFMA_OPCODE_VV(OP) \
3975 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3976 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
3977 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3978 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3980#define CASE_VFMA_SPLATS(OP) \
3981 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3982 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
3983 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3984 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3988 unsigned &SrcOpIdx1,
3989 unsigned &SrcOpIdx2)
const {
3991 if (!
Desc.isCommutable())
3994 switch (
MI.getOpcode()) {
3995 case RISCV::TH_MVEQZ:
3996 case RISCV::TH_MVNEZ:
4000 if (
MI.getOperand(2).getReg() == RISCV::X0)
4003 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4004 case RISCV::QC_SELECTIEQ:
4005 case RISCV::QC_SELECTINE:
4006 case RISCV::QC_SELECTIIEQ:
4007 case RISCV::QC_SELECTIINE:
4008 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4009 case RISCV::QC_MVEQ:
4010 case RISCV::QC_MVNE:
4011 case RISCV::QC_MVLT:
4012 case RISCV::QC_MVGE:
4013 case RISCV::QC_MVLTU:
4014 case RISCV::QC_MVGEU:
4015 case RISCV::QC_MVEQI:
4016 case RISCV::QC_MVNEI:
4017 case RISCV::QC_MVLTI:
4018 case RISCV::QC_MVGEI:
4019 case RISCV::QC_MVLTUI:
4020 case RISCV::QC_MVGEUI:
4021 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4022 case RISCV::TH_MULA:
4023 case RISCV::TH_MULAW:
4024 case RISCV::TH_MULAH:
4025 case RISCV::TH_MULS:
4026 case RISCV::TH_MULSW:
4027 case RISCV::TH_MULSH:
4029 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4030 case RISCV::PseudoCCMOVGPRNoX0:
4031 case RISCV::PseudoCCMOVGPR:
4033 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
4060 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4087 unsigned CommutableOpIdx1 = 1;
4088 unsigned CommutableOpIdx2 = 3;
4089 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4110 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4112 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4116 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4117 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4123 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4124 SrcOpIdx2 == CommuteAnyOperandIndex) {
4127 unsigned CommutableOpIdx1 = SrcOpIdx1;
4128 if (SrcOpIdx1 == SrcOpIdx2) {
4131 CommutableOpIdx1 = 1;
4132 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4134 CommutableOpIdx1 = SrcOpIdx2;
4139 unsigned CommutableOpIdx2;
4140 if (CommutableOpIdx1 != 1) {
4142 CommutableOpIdx2 = 1;
4144 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4149 if (Op1Reg !=
MI.getOperand(2).getReg())
4150 CommutableOpIdx2 = 2;
4152 CommutableOpIdx2 = 3;
4157 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4170#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4171 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4172 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4175#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4176 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4177 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4178 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4179 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4180 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4181 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4182 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4185#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4186 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4187 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4190#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4191 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4192 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4193 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4194 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4196#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4197 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4198 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4200#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4201 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4202 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4204#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4205 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4206 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4207 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4208 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4210#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4211 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4212 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4213 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4214 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4220 unsigned OpIdx2)
const {
4223 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4227 switch (
MI.getOpcode()) {
4228 case RISCV::TH_MVEQZ:
4229 case RISCV::TH_MVNEZ: {
4230 auto &WorkingMI = cloneIfNew(
MI);
4231 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4232 : RISCV::TH_MVEQZ));
4236 case RISCV::QC_SELECTIEQ:
4237 case RISCV::QC_SELECTINE:
4238 case RISCV::QC_SELECTIIEQ:
4239 case RISCV::QC_SELECTIINE:
4241 case RISCV::QC_MVEQ:
4242 case RISCV::QC_MVNE:
4243 case RISCV::QC_MVLT:
4244 case RISCV::QC_MVGE:
4245 case RISCV::QC_MVLTU:
4246 case RISCV::QC_MVGEU:
4247 case RISCV::QC_MVEQI:
4248 case RISCV::QC_MVNEI:
4249 case RISCV::QC_MVLTI:
4250 case RISCV::QC_MVGEI:
4251 case RISCV::QC_MVLTUI:
4252 case RISCV::QC_MVGEUI: {
4253 auto &WorkingMI = cloneIfNew(
MI);
4258 case RISCV::PseudoCCMOVGPRNoX0:
4259 case RISCV::PseudoCCMOVGPR: {
4263 auto &WorkingMI = cloneIfNew(
MI);
4264 WorkingMI.getOperand(3).setImm(CC);
4288 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4289 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4291 switch (
MI.getOpcode()) {
4314 auto &WorkingMI = cloneIfNew(
MI);
4315 WorkingMI.setDesc(
get(
Opc));
4325 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4328 if (OpIdx1 == 3 || OpIdx2 == 3) {
4330 switch (
MI.getOpcode()) {
4341 auto &WorkingMI = cloneIfNew(
MI);
4342 WorkingMI.setDesc(
get(
Opc));
4354#undef CASE_VMA_CHANGE_OPCODE_COMMON
4355#undef CASE_VMA_CHANGE_OPCODE_LMULS
4356#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4357#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4358#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4359#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4360#undef CASE_VFMA_CHANGE_OPCODE_VV
4361#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4363#undef CASE_RVV_OPCODE_UNMASK_LMUL
4364#undef CASE_RVV_OPCODE_MASK_LMUL
4365#undef CASE_RVV_OPCODE_LMUL
4366#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4367#undef CASE_RVV_OPCODE_UNMASK
4368#undef CASE_RVV_OPCODE_MASK_WIDEN
4369#undef CASE_RVV_OPCODE_MASK
4370#undef CASE_RVV_OPCODE_WIDEN
4371#undef CASE_RVV_OPCODE
4373#undef CASE_VMA_OPCODE_COMMON
4374#undef CASE_VMA_OPCODE_LMULS
4375#undef CASE_VFMA_OPCODE_COMMON
4376#undef CASE_VFMA_OPCODE_LMULS_M1
4377#undef CASE_VFMA_OPCODE_LMULS_MF2
4378#undef CASE_VFMA_OPCODE_LMULS_MF4
4379#undef CASE_VFMA_OPCODE_VV
4380#undef CASE_VFMA_SPLATS
4383 switch (
MI.getOpcode()) {
4391 if (
MI.getOperand(1).getReg() == RISCV::X0)
4392 commuteInstruction(
MI);
4394 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4395 MI.getOperand(2).ChangeToImmediate(0);
4396 MI.setDesc(
get(RISCV::ADDI));
4400 if (
MI.getOpcode() == RISCV::XOR &&
4401 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4402 MI.getOperand(1).setReg(RISCV::X0);
4403 MI.getOperand(2).ChangeToImmediate(0);
4404 MI.setDesc(
get(RISCV::ADDI));
4411 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4412 MI.setDesc(
get(RISCV::ADDI));
4418 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4419 MI.getOperand(2).ChangeToImmediate(0);
4420 MI.setDesc(
get(RISCV::ADDI));
4426 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4427 MI.getOperand(2).ChangeToImmediate(0);
4428 MI.setDesc(
get(RISCV::ADDIW));
4435 if (
MI.getOperand(1).getReg() == RISCV::X0)
4436 commuteInstruction(
MI);
4438 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4439 MI.getOperand(2).ChangeToImmediate(0);
4440 MI.setDesc(
get(RISCV::ADDIW));
4445 case RISCV::SH1ADD_UW:
4447 case RISCV::SH2ADD_UW:
4449 case RISCV::SH3ADD_UW:
4451 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4452 MI.removeOperand(1);
4454 MI.setDesc(
get(RISCV::ADDI));
4458 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4459 MI.removeOperand(2);
4460 unsigned Opc =
MI.getOpcode();
4461 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4462 Opc == RISCV::SH3ADD_UW) {
4464 MI.setDesc(
get(RISCV::SLLI_UW));
4468 MI.setDesc(
get(RISCV::SLLI));
4482 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4483 MI.getOperand(2).getReg() == RISCV::X0) {
4484 MI.getOperand(1).setReg(RISCV::X0);
4485 MI.getOperand(2).ChangeToImmediate(0);
4486 MI.setDesc(
get(RISCV::ADDI));
4492 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4493 MI.getOperand(2).setImm(0);
4494 MI.setDesc(
get(RISCV::ADDI));
4502 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4503 MI.getOperand(2).ChangeToImmediate(0);
4504 MI.setDesc(
get(RISCV::ADDI));
4508 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4509 MI.getOperand(2).ChangeToImmediate(0);
4510 MI.setDesc(
get(RISCV::ADDI));
4518 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4519 MI.getOperand(2).ChangeToImmediate(0);
4520 MI.setDesc(
get(RISCV::ADDI));
4530 case RISCV::SLLI_UW:
4532 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4533 MI.getOperand(2).setImm(0);
4534 MI.setDesc(
get(RISCV::ADDI));
4542 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4543 MI.getOperand(2).getReg() == RISCV::X0) {
4544 MI.getOperand(2).ChangeToImmediate(0);
4545 MI.setDesc(
get(RISCV::ADDI));
4549 if (
MI.getOpcode() == RISCV::ADD_UW &&
4550 MI.getOperand(1).getReg() == RISCV::X0) {
4551 MI.removeOperand(1);
4553 MI.setDesc(
get(RISCV::ADDI));
4559 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4560 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4561 MI.setDesc(
get(RISCV::ADDI));
4567 case RISCV::ZEXT_H_RV32:
4568 case RISCV::ZEXT_H_RV64:
4571 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4573 MI.setDesc(
get(RISCV::ADDI));
4582 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4583 MI.getOperand(2).ChangeToImmediate(0);
4584 MI.setDesc(
get(RISCV::ADDI));
4591 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4593 MI.removeOperand(0);
4594 MI.insert(
MI.operands_begin() + 1, {MO0});
4599 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4601 MI.removeOperand(0);
4602 MI.insert(
MI.operands_begin() + 1, {MO0});
4603 MI.setDesc(
get(RISCV::BNE));
4608 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4610 MI.removeOperand(0);
4611 MI.insert(
MI.operands_begin() + 1, {MO0});
4612 MI.setDesc(
get(RISCV::BEQ));
4620#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4621 RISCV::PseudoV##OP##_##LMUL##_TIED
4623#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4624 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4625 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4626 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4627 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4628 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4629 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4631#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4632 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4633 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4636#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4637 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4638 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4639 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4640 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4641 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4642 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4645#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4646 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4648#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4649 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4650 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4651 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4652 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4653 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4654 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4655 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4656 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4657 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4659#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4660 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4661 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4664#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4665 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4666 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4667 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4668 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4669 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4670 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4671 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4672 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4673 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4675#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4676 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4677 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4678 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4679 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4680 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4682#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4683 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4684 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4685 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4686 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4687 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4694 switch (
MI.getOpcode()) {
4702 MI.getNumExplicitOperands() == 7 &&
4703 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4710 switch (
MI.getOpcode()) {
4722 .
add(
MI.getOperand(0))
4724 .
add(
MI.getOperand(1))
4725 .
add(
MI.getOperand(2))
4726 .
add(
MI.getOperand(3))
4727 .
add(
MI.getOperand(4))
4728 .
add(
MI.getOperand(5))
4729 .
add(
MI.getOperand(6));
4738 MI.getNumExplicitOperands() == 6);
4745 switch (
MI.getOpcode()) {
4757 .
add(
MI.getOperand(0))
4759 .
add(
MI.getOperand(1))
4760 .
add(
MI.getOperand(2))
4761 .
add(
MI.getOperand(3))
4762 .
add(
MI.getOperand(4))
4763 .
add(
MI.getOperand(5));
4770 unsigned NumOps =
MI.getNumOperands();
4773 if (
Op.isReg() &&
Op.isKill())
4781 if (
MI.getOperand(0).isEarlyClobber()) {
4795#undef CASE_WIDEOP_OPCODE_COMMON
4796#undef CASE_WIDEOP_OPCODE_LMULS
4797#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4798#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4799#undef CASE_FP_WIDEOP_OPCODE_COMMON
4800#undef CASE_FP_WIDEOP_OPCODE_LMULS
4801#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4802#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4811 if (ShiftAmount == 0)
4817 }
else if (
int ShXAmount, ShiftAmount;
4819 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4822 switch (ShXAmount) {
4824 Opc = RISCV::SH1ADD;
4827 Opc = RISCV::SH2ADD;
4830 Opc = RISCV::SH3ADD;
4845 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4856 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4866 }
else if (
STI.hasStdExtZmmul()) {
4867 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4876 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4877 if (Amount & (1U << ShiftAmount)) {
4881 .
addImm(ShiftAmount - PrevShiftAmount)
4883 if (Amount >> (ShiftAmount + 1)) {
4886 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4897 PrevShiftAmount = ShiftAmount;
4900 assert(Acc &&
"Expected valid accumulator");
4910 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4918 ?
STI.getTailDupAggressiveThreshold()
4925 unsigned Opcode =
MI.getOpcode();
4926 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4935 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
4937 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
4940std::optional<std::pair<unsigned, unsigned>>
4944 return std::nullopt;
4945 case RISCV::PseudoVSPILL2_M1:
4946 case RISCV::PseudoVRELOAD2_M1:
4947 return std::make_pair(2u, 1u);
4948 case RISCV::PseudoVSPILL2_M2:
4949 case RISCV::PseudoVRELOAD2_M2:
4950 return std::make_pair(2u, 2u);
4951 case RISCV::PseudoVSPILL2_M4:
4952 case RISCV::PseudoVRELOAD2_M4:
4953 return std::make_pair(2u, 4u);
4954 case RISCV::PseudoVSPILL3_M1:
4955 case RISCV::PseudoVRELOAD3_M1:
4956 return std::make_pair(3u, 1u);
4957 case RISCV::PseudoVSPILL3_M2:
4958 case RISCV::PseudoVRELOAD3_M2:
4959 return std::make_pair(3u, 2u);
4960 case RISCV::PseudoVSPILL4_M1:
4961 case RISCV::PseudoVRELOAD4_M1:
4962 return std::make_pair(4u, 1u);
4963 case RISCV::PseudoVSPILL4_M2:
4964 case RISCV::PseudoVRELOAD4_M2:
4965 return std::make_pair(4u, 2u);
4966 case RISCV::PseudoVSPILL5_M1:
4967 case RISCV::PseudoVRELOAD5_M1:
4968 return std::make_pair(5u, 1u);
4969 case RISCV::PseudoVSPILL6_M1:
4970 case RISCV::PseudoVRELOAD6_M1:
4971 return std::make_pair(6u, 1u);
4972 case RISCV::PseudoVSPILL7_M1:
4973 case RISCV::PseudoVRELOAD7_M1:
4974 return std::make_pair(7u, 1u);
4975 case RISCV::PseudoVSPILL8_M1:
4976 case RISCV::PseudoVRELOAD8_M1:
4977 return std::make_pair(8u, 1u);
4982 int16_t MI1FrmOpIdx =
4983 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4984 int16_t MI2FrmOpIdx =
4985 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4986 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4993std::optional<unsigned>
4997 return std::nullopt;
5000 case RISCV::VSLL_VX:
5001 case RISCV::VSRL_VX:
5002 case RISCV::VSRA_VX:
5004 case RISCV::VSSRL_VX:
5005 case RISCV::VSSRA_VX:
5007 case RISCV::VROL_VX:
5008 case RISCV::VROR_VX:
5013 case RISCV::VNSRL_WX:
5014 case RISCV::VNSRA_WX:
5016 case RISCV::VNCLIPU_WX:
5017 case RISCV::VNCLIP_WX:
5019 case RISCV::VWSLL_VX:
5024 case RISCV::VADD_VX:
5025 case RISCV::VSUB_VX:
5026 case RISCV::VRSUB_VX:
5028 case RISCV::VWADDU_VX:
5029 case RISCV::VWSUBU_VX:
5030 case RISCV::VWADD_VX:
5031 case RISCV::VWSUB_VX:
5032 case RISCV::VWADDU_WX:
5033 case RISCV::VWSUBU_WX:
5034 case RISCV::VWADD_WX:
5035 case RISCV::VWSUB_WX:
5037 case RISCV::VADC_VXM:
5038 case RISCV::VADC_VIM:
5039 case RISCV::VMADC_VXM:
5040 case RISCV::VMADC_VIM:
5041 case RISCV::VMADC_VX:
5042 case RISCV::VSBC_VXM:
5043 case RISCV::VMSBC_VXM:
5044 case RISCV::VMSBC_VX:
5046 case RISCV::VAND_VX:
5048 case RISCV::VXOR_VX:
5050 case RISCV::VMSEQ_VX:
5051 case RISCV::VMSNE_VX:
5052 case RISCV::VMSLTU_VX:
5053 case RISCV::VMSLT_VX:
5054 case RISCV::VMSLEU_VX:
5055 case RISCV::VMSLE_VX:
5056 case RISCV::VMSGTU_VX:
5057 case RISCV::VMSGT_VX:
5059 case RISCV::VMINU_VX:
5060 case RISCV::VMIN_VX:
5061 case RISCV::VMAXU_VX:
5062 case RISCV::VMAX_VX:
5064 case RISCV::VMUL_VX:
5065 case RISCV::VMULH_VX:
5066 case RISCV::VMULHU_VX:
5067 case RISCV::VMULHSU_VX:
5069 case RISCV::VDIVU_VX:
5070 case RISCV::VDIV_VX:
5071 case RISCV::VREMU_VX:
5072 case RISCV::VREM_VX:
5074 case RISCV::VWMUL_VX:
5075 case RISCV::VWMULU_VX:
5076 case RISCV::VWMULSU_VX:
5078 case RISCV::VMACC_VX:
5079 case RISCV::VNMSAC_VX:
5080 case RISCV::VMADD_VX:
5081 case RISCV::VNMSUB_VX:
5083 case RISCV::VWMACCU_VX:
5084 case RISCV::VWMACC_VX:
5085 case RISCV::VWMACCSU_VX:
5086 case RISCV::VWMACCUS_VX:
5088 case RISCV::VMERGE_VXM:
5090 case RISCV::VMV_V_X:
5092 case RISCV::VSADDU_VX:
5093 case RISCV::VSADD_VX:
5094 case RISCV::VSSUBU_VX:
5095 case RISCV::VSSUB_VX:
5097 case RISCV::VAADDU_VX:
5098 case RISCV::VAADD_VX:
5099 case RISCV::VASUBU_VX:
5100 case RISCV::VASUB_VX:
5102 case RISCV::VSMUL_VX:
5104 case RISCV::VMV_S_X:
5106 case RISCV::VANDN_VX:
5107 return 1U << Log2SEW;
5113 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5116 return RVV->BaseInstr;
5126 unsigned Scaled = Log2SEW + (DestEEW - 1);
5140 return std::nullopt;
5145 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5146 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5147 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5148 LHS.getReg() == RHS.getReg())
5152 if (LHS.isImm() && LHS.getImm() == 0)
5158 if (!LHSImm || !RHSImm)
5160 return LHSImm <= RHSImm;
5172 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5174 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5184 std::optional<bool> createTripCountGreaterCondition(
5185 int TC, MachineBasicBlock &
MBB,
5186 SmallVectorImpl<MachineOperand> &CondParam)
override {
5194 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5196 void adjustTripCount(
int TripCountAdjust)
override {}
5200std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5208 if (
TBB == LoopBB && FBB == LoopBB)
5215 assert((
TBB == LoopBB || FBB == LoopBB) &&
5216 "The Loop must be a single-basic-block loop");
5227 if (!Reg.isVirtual())
5229 return MRI.getVRegDef(Reg);
5234 if (LHS && LHS->isPHI())
5236 if (RHS && RHS->isPHI())
5239 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5245 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5262 case RISCV::FDIV_H_INX:
5263 case RISCV::FDIV_S_INX:
5264 case RISCV::FDIV_D_INX:
5265 case RISCV::FDIV_D_IN32X:
5266 case RISCV::FSQRT_H:
5267 case RISCV::FSQRT_S:
5268 case RISCV::FSQRT_D:
5269 case RISCV::FSQRT_H_INX:
5270 case RISCV::FSQRT_S_INX:
5271 case RISCV::FSQRT_D_INX:
5272 case RISCV::FSQRT_D_IN32X:
5274 case RISCV::VDIV_VV:
5275 case RISCV::VDIV_VX:
5276 case RISCV::VDIVU_VV:
5277 case RISCV::VDIVU_VX:
5278 case RISCV::VREM_VV:
5279 case RISCV::VREM_VX:
5280 case RISCV::VREMU_VV:
5281 case RISCV::VREMU_VX:
5283 case RISCV::VFDIV_VV:
5284 case RISCV::VFDIV_VF:
5285 case RISCV::VFRDIV_VF:
5286 case RISCV::VFSQRT_V:
5287 case RISCV::VFRSQRT7_V:
5293 if (
MI->getOpcode() != TargetOpcode::COPY)
5298 Register DstReg =
MI->getOperand(0).getReg();
5300 ?
MRI.getRegClass(DstReg)
5301 :
TRI->getMinimalPhysRegClass(DstReg);
5311 auto [RCLMul, RCFractional] =
5313 return (!RCFractional && LMul == RCLMul) || (RCFractional && LMul == 1);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool isVRegCopy(const MachineInstr *MI, unsigned LMul=0) const
Return true if MI is a COPY to a vector register of a specific LMul, or any kind of vector registers ...
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
bool isVectorCopy(const TargetRegisterInfo *TRI, const MachineInstr &MI)
Return true if MI is a copy that will be lowered to one or more vmvNr.vs.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.