41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
69 cl::desc(
"Enable RegSave strategy in machine outliner (save X5 to a "
70 "temporary register when X5 is live across outlined calls)."));
76#define GET_RISCVVPseudosTable_IMPL
77#include "RISCVGenSearchableTables.inc"
83#define GET_RISCVMaskedPseudosTable_IMPL
84#include "RISCVGenSearchableTables.inc"
90 RISCV::ADJCALLSTACKUP),
93#define GET_INSTRINFO_HELPERS
94#include "RISCVGenInstrInfo.inc"
97 if (
STI.hasStdExtZca())
106 int &FrameIndex)
const {
116 case RISCV::VL1RE8_V:
117 case RISCV::VL1RE16_V:
118 case RISCV::VL1RE32_V:
119 case RISCV::VL1RE64_V:
122 case RISCV::VL2RE8_V:
123 case RISCV::VL2RE16_V:
124 case RISCV::VL2RE32_V:
125 case RISCV::VL2RE64_V:
128 case RISCV::VL4RE8_V:
129 case RISCV::VL4RE16_V:
130 case RISCV::VL4RE32_V:
131 case RISCV::VL4RE64_V:
134 case RISCV::VL8RE8_V:
135 case RISCV::VL8RE16_V:
136 case RISCV::VL8RE32_V:
137 case RISCV::VL8RE64_V:
145 switch (
MI.getOpcode()) {
169 case RISCV::VL1RE8_V:
170 case RISCV::VL2RE8_V:
171 case RISCV::VL4RE8_V:
172 case RISCV::VL8RE8_V:
173 if (!
MI.getOperand(1).isFI())
175 FrameIndex =
MI.getOperand(1).getIndex();
178 return MI.getOperand(0).getReg();
181 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
182 MI.getOperand(2).getImm() == 0) {
183 FrameIndex =
MI.getOperand(1).getIndex();
184 return MI.getOperand(0).getReg();
191 int &FrameIndex)
const {
199 switch (
MI.getOpcode()) {
224 if (!
MI.getOperand(1).isFI())
226 FrameIndex =
MI.getOperand(1).getIndex();
229 return MI.getOperand(0).getReg();
232 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
233 MI.getOperand(2).getImm() == 0) {
234 FrameIndex =
MI.getOperand(1).getIndex();
235 return MI.getOperand(0).getReg();
245 case RISCV::VFMV_V_F:
248 case RISCV::VFMV_S_F:
250 return MI.getOperand(1).isUndef();
258 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
269 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
270 "Unexpected COPY instruction.");
274 bool FoundDef =
false;
275 bool FirstVSetVLI =
false;
276 unsigned FirstSEW = 0;
279 if (
MBBI->isMetaInstruction())
282 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
292 unsigned FirstVType =
MBBI->getOperand(2).getImm();
297 if (FirstLMul != LMul)
302 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
308 unsigned VType =
MBBI->getOperand(2).getImm();
326 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
328 }
else if (
MBBI->getNumDefs()) {
331 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
337 if (!MO.isReg() || !MO.isDef())
339 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
354 if (MO.getReg() != SrcReg)
395 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
396 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
398 assert(!Fractional &&
"It is impossible be fractional lmul here.");
399 unsigned NumRegs = NF * LMulVal;
405 SrcEncoding += NumRegs - 1;
406 DstEncoding += NumRegs - 1;
412 unsigned,
unsigned> {
420 uint16_t Diff = DstEncoding - SrcEncoding;
421 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
422 DstEncoding % 8 == 7)
424 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
425 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
426 DstEncoding % 4 == 3)
428 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
429 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
430 DstEncoding % 2 == 1)
432 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
435 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
440 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
442 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
443 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
445 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
446 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
448 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
451 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
454 while (
I != NumRegs) {
459 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
460 GetCopyInfo(SrcEncoding, DstEncoding);
464 if (LMul == LMulCopied &&
467 if (DefMBBI->getOpcode() == VIOpc)
474 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
476 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
484 MIB = MIB.add(DefMBBI->getOperand(2));
492 MIB.addImm(Log2SEW ? Log2SEW : 3);
504 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
505 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
514 bool RenamableDest,
bool RenamableSrc)
const {
518 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
525 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
531 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
537 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
539 if (
STI.hasStdExtZdinx()) {
548 if (
STI.hasStdExtP()) {
557 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
558 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
560 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
562 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
566 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
567 .
addReg(EvenReg, KillFlag)
570 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
577 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
578 RISCV::GPRRegClass.
contains(DstReg)) {
580 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
585 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
587 if (
STI.hasStdExtZfh()) {
588 Opc = RISCV::FSGNJ_H;
591 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
592 "Unexpected extensions");
594 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
595 &RISCV::FPR32RegClass);
596 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
597 &RISCV::FPR32RegClass);
598 Opc = RISCV::FSGNJ_S;
602 .
addReg(SrcReg, KillFlag);
606 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
609 .
addReg(SrcReg, KillFlag);
613 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
616 .
addReg(SrcReg, KillFlag);
620 if (RISCV::FPR32RegClass.
contains(DstReg) &&
621 RISCV::GPRRegClass.
contains(SrcReg)) {
623 .
addReg(SrcReg, KillFlag);
627 if (RISCV::GPRRegClass.
contains(DstReg) &&
628 RISCV::FPR32RegClass.
contains(SrcReg)) {
630 .
addReg(SrcReg, KillFlag);
634 if (RISCV::FPR64RegClass.
contains(DstReg) &&
635 RISCV::GPRRegClass.
contains(SrcReg)) {
636 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
638 .
addReg(SrcReg, KillFlag);
642 if (RISCV::GPRRegClass.
contains(DstReg) &&
643 RISCV::FPR64RegClass.
contains(SrcReg)) {
644 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
646 .
addReg(SrcReg, KillFlag);
652 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
663 Register SrcReg,
bool IsKill,
int FI,
672 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
673 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
675 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
676 Opcode = RISCV::SH_INX;
677 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
678 Opcode = RISCV::SW_INX;
679 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
680 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
681 Alignment >=
STI.getZilsdAlign()) {
682 Opcode = RISCV::SD_RV32;
684 Opcode = RISCV::PseudoRV32ZdinxSD;
686 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
688 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
690 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
692 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
693 Opcode = RISCV::VS1R_V;
694 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
695 Opcode = RISCV::VS2R_V;
696 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
697 Opcode = RISCV::VS4R_V;
698 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
699 Opcode = RISCV::VS8R_V;
700 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
701 Opcode = RISCV::PseudoVSPILL2_M1;
702 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
703 Opcode = RISCV::PseudoVSPILL2_M2;
704 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
705 Opcode = RISCV::PseudoVSPILL2_M4;
706 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
707 Opcode = RISCV::PseudoVSPILL3_M1;
708 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
709 Opcode = RISCV::PseudoVSPILL3_M2;
710 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
711 Opcode = RISCV::PseudoVSPILL4_M1;
712 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
713 Opcode = RISCV::PseudoVSPILL4_M2;
714 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
715 Opcode = RISCV::PseudoVSPILL5_M1;
716 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
717 Opcode = RISCV::PseudoVSPILL6_M1;
718 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
719 Opcode = RISCV::PseudoVSPILL7_M1;
720 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
721 Opcode = RISCV::PseudoVSPILL8_M1;
764 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
765 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
767 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
768 Opcode = RISCV::LH_INX;
769 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
770 Opcode = RISCV::LW_INX;
771 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
772 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
773 Alignment >=
STI.getZilsdAlign()) {
774 Opcode = RISCV::LD_RV32;
776 Opcode = RISCV::PseudoRV32ZdinxLD;
778 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
780 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
782 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
784 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
785 Opcode = RISCV::VL1RE8_V;
786 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
787 Opcode = RISCV::VL2RE8_V;
788 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
789 Opcode = RISCV::VL4RE8_V;
790 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
791 Opcode = RISCV::VL8RE8_V;
792 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
793 Opcode = RISCV::PseudoVRELOAD2_M1;
794 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
795 Opcode = RISCV::PseudoVRELOAD2_M2;
796 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
797 Opcode = RISCV::PseudoVRELOAD2_M4;
798 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
799 Opcode = RISCV::PseudoVRELOAD3_M1;
800 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
801 Opcode = RISCV::PseudoVRELOAD3_M2;
802 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
803 Opcode = RISCV::PseudoVRELOAD4_M1;
804 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
805 Opcode = RISCV::PseudoVRELOAD4_M2;
806 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
807 Opcode = RISCV::PseudoVRELOAD5_M1;
808 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
809 Opcode = RISCV::PseudoVRELOAD6_M1;
810 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
811 Opcode = RISCV::PseudoVRELOAD7_M1;
812 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
813 Opcode = RISCV::PseudoVRELOAD8_M1;
851 if (
Ops.size() != 1 ||
Ops[0] != 1)
854 switch (
MI.getOpcode()) {
856 if (RISCVInstrInfo::isSEXT_W(
MI))
858 if (RISCVInstrInfo::isZEXT_W(
MI))
860 if (RISCVInstrInfo::isZEXT_B(
MI))
867 case RISCV::ZEXT_H_RV32:
868 case RISCV::ZEXT_H_RV64:
875 case RISCV::VMV_X_S: {
878 if (ST.getXLen() < (1U << Log2SEW))
893 case RISCV::VFMV_F_S: {
921 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
930 return RISCV::PseudoCCLB;
932 return RISCV::PseudoCCLBU;
934 return RISCV::PseudoCCLH;
936 return RISCV::PseudoCCLHU;
938 return RISCV::PseudoCCLW;
940 return RISCV::PseudoCCLWU;
942 return RISCV::PseudoCCLD;
944 return RISCV::PseudoCCQC_E_LB;
945 case RISCV::QC_E_LBU:
946 return RISCV::PseudoCCQC_E_LBU;
948 return RISCV::PseudoCCQC_E_LH;
949 case RISCV::QC_E_LHU:
950 return RISCV::PseudoCCQC_E_LHU;
952 return RISCV::PseudoCCQC_E_LW;
963 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
968 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
972 if (
Ops.size() != 1 || (
Ops[0] != 1 &&
Ops[0] != 2))
975 bool Invert =
Ops[0] == 2;
984 MI.getDebugLoc(),
get(PredOpc), DestReg);
995 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
1001 NewMI.
add({
MI.getOperand(
MI.getNumExplicitOperands() - 2),
1002 MI.getOperand(
MI.getNumExplicitOperands() - 1)});
1011 bool DstIsDead)
const {
1027 bool SrcRenamable =
false;
1031 bool LastItem = ++Num == Seq.
size();
1036 switch (Inst.getOpndKind()) {
1046 .
addReg(SrcReg, SrcRegState)
1053 .
addReg(SrcReg, SrcRegState)
1054 .
addReg(SrcReg, SrcRegState)
1060 .
addReg(SrcReg, SrcRegState)
1068 SrcRenamable = DstRenamable;
1078 case RISCV::CV_BEQIMM:
1079 case RISCV::QC_BEQI:
1080 case RISCV::QC_E_BEQI:
1081 case RISCV::NDS_BBC:
1082 case RISCV::NDS_BEQC:
1086 case RISCV::QC_BNEI:
1087 case RISCV::QC_E_BNEI:
1088 case RISCV::CV_BNEIMM:
1089 case RISCV::NDS_BBS:
1090 case RISCV::NDS_BNEC:
1093 case RISCV::QC_BLTI:
1094 case RISCV::QC_E_BLTI:
1097 case RISCV::QC_BGEI:
1098 case RISCV::QC_E_BGEI:
1101 case RISCV::QC_BLTUI:
1102 case RISCV::QC_E_BLTUI:
1105 case RISCV::QC_BGEUI:
1106 case RISCV::QC_E_BGEUI:
1138 "Unknown conditional branch");
1149 case RISCV::QC_MVEQ:
1150 return RISCV::QC_MVNE;
1151 case RISCV::QC_MVNE:
1152 return RISCV::QC_MVEQ;
1153 case RISCV::QC_MVLT:
1154 return RISCV::QC_MVGE;
1155 case RISCV::QC_MVGE:
1156 return RISCV::QC_MVLT;
1157 case RISCV::QC_MVLTU:
1158 return RISCV::QC_MVGEU;
1159 case RISCV::QC_MVGEU:
1160 return RISCV::QC_MVLTU;
1161 case RISCV::QC_MVEQI:
1162 return RISCV::QC_MVNEI;
1163 case RISCV::QC_MVNEI:
1164 return RISCV::QC_MVEQI;
1165 case RISCV::QC_MVLTI:
1166 return RISCV::QC_MVGEI;
1167 case RISCV::QC_MVGEI:
1168 return RISCV::QC_MVLTI;
1169 case RISCV::QC_MVLTUI:
1170 return RISCV::QC_MVGEUI;
1171 case RISCV::QC_MVGEUI:
1172 return RISCV::QC_MVLTUI;
1177 switch (SelectOpc) {
1196 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1206 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1211 return RISCV::CV_BEQIMM;
1213 return RISCV::CV_BNEIMM;
1216 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1221 return RISCV::QC_BEQI;
1223 return RISCV::QC_BNEI;
1225 return RISCV::QC_BLTI;
1227 return RISCV::QC_BGEI;
1230 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1235 return RISCV::QC_BLTUI;
1237 return RISCV::QC_BGEUI;
1240 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1245 return RISCV::QC_E_BEQI;
1247 return RISCV::QC_E_BNEI;
1249 return RISCV::QC_E_BLTI;
1251 return RISCV::QC_E_BGEI;
1254 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1259 return RISCV::QC_E_BLTUI;
1261 return RISCV::QC_E_BGEUI;
1264 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1269 return RISCV::NDS_BBC;
1271 return RISCV::NDS_BBS;
1274 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1279 return RISCV::NDS_BEQC;
1281 return RISCV::NDS_BNEC;
1327 case RISCV::CV_BEQIMM:
1328 return RISCV::CV_BNEIMM;
1329 case RISCV::CV_BNEIMM:
1330 return RISCV::CV_BEQIMM;
1331 case RISCV::QC_BEQI:
1332 return RISCV::QC_BNEI;
1333 case RISCV::QC_BNEI:
1334 return RISCV::QC_BEQI;
1335 case RISCV::QC_BLTI:
1336 return RISCV::QC_BGEI;
1337 case RISCV::QC_BGEI:
1338 return RISCV::QC_BLTI;
1339 case RISCV::QC_BLTUI:
1340 return RISCV::QC_BGEUI;
1341 case RISCV::QC_BGEUI:
1342 return RISCV::QC_BLTUI;
1343 case RISCV::QC_E_BEQI:
1344 return RISCV::QC_E_BNEI;
1345 case RISCV::QC_E_BNEI:
1346 return RISCV::QC_E_BEQI;
1347 case RISCV::QC_E_BLTI:
1348 return RISCV::QC_E_BGEI;
1349 case RISCV::QC_E_BGEI:
1350 return RISCV::QC_E_BLTI;
1351 case RISCV::QC_E_BLTUI:
1352 return RISCV::QC_E_BGEUI;
1353 case RISCV::QC_E_BGEUI:
1354 return RISCV::QC_E_BLTUI;
1355 case RISCV::NDS_BBC:
1356 return RISCV::NDS_BBS;
1357 case RISCV::NDS_BBS:
1358 return RISCV::NDS_BBC;
1359 case RISCV::NDS_BEQC:
1360 return RISCV::NDS_BNEC;
1361 case RISCV::NDS_BNEC:
1362 return RISCV::NDS_BEQC;
1370 bool AllowModify)
const {
1371 TBB = FBB =
nullptr;
1376 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1382 int NumTerminators = 0;
1383 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1386 if (J->getDesc().isUnconditionalBranch() ||
1387 J->getDesc().isIndirectBranch()) {
1394 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1395 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1396 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1399 I = FirstUncondOrIndirectBr;
1403 if (
I->getDesc().isIndirectBranch())
1407 if (
I->isPreISelOpcode())
1411 if (NumTerminators > 2)
1415 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1421 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1427 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1428 I->getDesc().isUnconditionalBranch()) {
1439 int *BytesRemoved)
const {
1446 if (!
I->getDesc().isUnconditionalBranch() &&
1447 !
I->getDesc().isConditionalBranch())
1453 I->eraseFromParent();
1457 if (
I ==
MBB.begin())
1460 if (!
I->getDesc().isConditionalBranch())
1466 I->eraseFromParent();
1479 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1481 "RISC-V branch conditions have two components!");
1515 assert(RS &&
"RegScavenger required for long branching");
1517 "new block should be inserted for expanding unconditional branch");
1520 "restore block should be inserted for restoring clobbered registers");
1529 "Branch offsets outside of the signed 32-bit range not supported");
1535 auto II =
MBB.end();
1541 RS->enterBasicBlockEnd(
MBB);
1548 RC = &RISCV::GPRX7RegClass;
1550 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1554 RS->setRegUsed(TmpGPR);
1559 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1565 if (FrameIndex == -1)
1570 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1573 MI.getOperand(1).setMBB(&RestoreBB);
1577 TRI->eliminateFrameIndex(RestoreBB.
back(),
1587 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1597 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1598 MI->getOperand(1).getReg() == RISCV::X0) {
1599 Imm =
MI->getOperand(2).getImm();
1604 if (
MI->getOpcode() == RISCV::BSETI &&
MI->getOperand(1).isReg() &&
1605 MI->getOperand(1).getReg() == RISCV::X0 &&
1606 MI->getOperand(2).getImm() == 11) {
1620 if (Reg == RISCV::X0) {
1628 bool IsSigned =
false;
1629 bool IsEquality =
false;
1630 switch (
MI.getOpcode()) {
1666 MI.eraseFromParent();
1692 auto searchConst = [&](int64_t C1) ->
Register {
1694 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1697 I.getOperand(0).getReg().isVirtual();
1700 return DefC1->getOperand(0).getReg();
1712 if (
isFromLoadImm(MRI, LHS, C0) && C0 != 0 && LHS.getReg().isVirtual() &&
1713 MRI.
hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1715 if (
Register RegZ = searchConst(C0 + 1)) {
1723 MI.eraseFromParent();
1733 if (
isFromLoadImm(MRI, RHS, C0) && C0 != 0 && RHS.getReg().isVirtual() &&
1736 if (
Register RegZ = searchConst(C0 - 1)) {
1744 MI.eraseFromParent();
1754 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1756 int NumOp =
MI.getNumExplicitOperands();
1757 return MI.getOperand(NumOp - 1).getMBB();
1761 int64_t BrOffset)
const {
1762 unsigned XLen =
STI.getXLen();
1769 case RISCV::NDS_BBC:
1770 case RISCV::NDS_BBS:
1771 case RISCV::NDS_BEQC:
1772 case RISCV::NDS_BNEC:
1782 case RISCV::CV_BEQIMM:
1783 case RISCV::CV_BNEIMM:
1784 case RISCV::QC_BEQI:
1785 case RISCV::QC_BNEI:
1786 case RISCV::QC_BGEI:
1787 case RISCV::QC_BLTI:
1788 case RISCV::QC_BLTUI:
1789 case RISCV::QC_BGEUI:
1790 case RISCV::QC_E_BEQI:
1791 case RISCV::QC_E_BNEI:
1792 case RISCV::QC_E_BGEI:
1793 case RISCV::QC_E_BLTI:
1794 case RISCV::QC_E_BLTUI:
1795 case RISCV::QC_E_BGEUI:
1798 case RISCV::PseudoBR:
1800 case RISCV::PseudoJump:
1811 case RISCV::ADD:
return RISCV::PseudoCCADD;
1812 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1813 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1814 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1815 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1816 case RISCV::AND:
return RISCV::PseudoCCAND;
1817 case RISCV::OR:
return RISCV::PseudoCCOR;
1818 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1819 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1820 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1821 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1822 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1823 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1824 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1825 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1826 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1828 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1829 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1830 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1831 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1832 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1833 case RISCV::ORI:
return RISCV::PseudoCCORI;
1834 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1836 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1837 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1838 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1839 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1840 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1842 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1843 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1844 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1845 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1847 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1848 case RISCV::ORN:
return RISCV::PseudoCCORN;
1849 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1851 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1852 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1856 return RISCV::INSTRUCTION_LIST_END;
1865 if (!
Reg.isVirtual())
1873 if (!STI.hasShortForwardBranchIMinMax() &&
1874 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1875 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1878 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1885 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1886 MI->getOperand(1).getReg() == RISCV::X0)
1891 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1904 bool DontMoveAcrossStores =
true;
1905 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1913 bool PreferFalse)
const {
1914 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1915 "Unknown select instruction");
1916 if (!
STI.hasShortForwardBranchIALU())
1922 bool Invert = !
DefMI;
1930 Register DestReg =
MI.getOperand(0).getReg();
1936 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1943 NewMI.
add(FalseReg);
1951 unsigned BCCOpcode =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
1957 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 2));
1958 NewMI.
add(
MI.getOperand(
MI.getNumExplicitOperands() - 1));
1968 if (
DefMI->getParent() !=
MI.getParent())
1972 DefMI->eraseFromParent();
1977 if (
MI.isMetaInstruction())
1980 unsigned Opcode =
MI.getOpcode();
1982 if (Opcode == TargetOpcode::INLINEASM ||
1983 Opcode == TargetOpcode::INLINEASM_BR) {
1985 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1990 if (
STI.hasStdExtZca()) {
1991 if (isCompressibleInst(
MI,
STI))
1998 if (Opcode == TargetOpcode::BUNDLE)
1999 return getInstBundleSize(
MI);
2001 if (
MI.getParent() &&
MI.getParent()->getParent()) {
2002 if (isCompressibleInst(
MI,
STI))
2007 case RISCV::PseudoMV_FPR16INX:
2008 case RISCV::PseudoMV_FPR32INX:
2010 return STI.hasStdExtZca() ? 2 : 4;
2012 case RISCV::PseudoCCMOVGPRNoX0:
2013 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2016 case RISCV::PseudoCCMOVGPR:
2017 case RISCV::PseudoCCADD:
2018 case RISCV::PseudoCCSUB:
2019 case RISCV::PseudoCCSLL:
2020 case RISCV::PseudoCCSRL:
2021 case RISCV::PseudoCCSRA:
2022 case RISCV::PseudoCCAND:
2023 case RISCV::PseudoCCOR:
2024 case RISCV::PseudoCCXOR:
2025 case RISCV::PseudoCCADDI:
2026 case RISCV::PseudoCCANDI:
2027 case RISCV::PseudoCCORI:
2028 case RISCV::PseudoCCXORI:
2029 case RISCV::PseudoCCLUI:
2030 case RISCV::PseudoCCSLLI:
2031 case RISCV::PseudoCCSRLI:
2032 case RISCV::PseudoCCSRAI:
2033 case RISCV::PseudoCCADDW:
2034 case RISCV::PseudoCCSUBW:
2035 case RISCV::PseudoCCSLLW:
2036 case RISCV::PseudoCCSRLW:
2037 case RISCV::PseudoCCSRAW:
2038 case RISCV::PseudoCCADDIW:
2039 case RISCV::PseudoCCSLLIW:
2040 case RISCV::PseudoCCSRLIW:
2041 case RISCV::PseudoCCSRAIW:
2042 case RISCV::PseudoCCANDN:
2043 case RISCV::PseudoCCORN:
2044 case RISCV::PseudoCCXNOR:
2045 case RISCV::PseudoCCMAX:
2046 case RISCV::PseudoCCMIN:
2047 case RISCV::PseudoCCMAXU:
2048 case RISCV::PseudoCCMINU:
2049 case RISCV::PseudoCCMUL:
2050 case RISCV::PseudoCCLB:
2051 case RISCV::PseudoCCLH:
2052 case RISCV::PseudoCCLW:
2053 case RISCV::PseudoCCLHU:
2054 case RISCV::PseudoCCLBU:
2055 case RISCV::PseudoCCLWU:
2056 case RISCV::PseudoCCLD:
2057 case RISCV::PseudoCCQC_LI:
2058 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2061 case RISCV::PseudoCCQC_E_LI:
2062 case RISCV::PseudoCCQC_E_LB:
2063 case RISCV::PseudoCCQC_E_LH:
2064 case RISCV::PseudoCCQC_E_LW:
2065 case RISCV::PseudoCCQC_E_LHU:
2066 case RISCV::PseudoCCQC_E_LBU:
2067 return get(
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm())
2070 case TargetOpcode::STACKMAP:
2073 case TargetOpcode::PATCHPOINT:
2076 case TargetOpcode::STATEPOINT: {
2080 return std::max(NumBytes, 8U);
2082 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2083 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2084 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2087 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2088 F.hasFnAttribute(
"patchable-function-entry")) {
2090 F.getFnAttributeAsParsedInteger(
"patchable-function-entry");
2092 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2096 return STI.is64Bit() ? 68 : 44;
2099 return get(Opcode).getSize();
2104 const unsigned Opcode =
MI.getOpcode();
2108 case RISCV::FSGNJ_D:
2109 case RISCV::FSGNJ_S:
2110 case RISCV::FSGNJ_H:
2111 case RISCV::FSGNJ_D_INX:
2112 case RISCV::FSGNJ_D_IN32X:
2113 case RISCV::FSGNJ_S_INX:
2114 case RISCV::FSGNJ_H_INX:
2116 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2117 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2121 return (
MI.getOperand(1).isReg() &&
2122 MI.getOperand(1).getReg() == RISCV::X0) ||
2123 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2125 return MI.isAsCheapAsAMove();
2128std::optional<DestSourcePair>
2132 switch (
MI.getOpcode()) {
2138 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2139 MI.getOperand(2).isReg())
2141 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2142 MI.getOperand(1).isReg())
2147 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2148 MI.getOperand(2).getImm() == 0)
2152 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2153 MI.getOperand(1).isReg())
2157 case RISCV::SH1ADD_UW:
2159 case RISCV::SH2ADD_UW:
2161 case RISCV::SH3ADD_UW:
2162 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2163 MI.getOperand(2).isReg())
2166 case RISCV::FSGNJ_D:
2167 case RISCV::FSGNJ_S:
2168 case RISCV::FSGNJ_H:
2169 case RISCV::FSGNJ_D_INX:
2170 case RISCV::FSGNJ_D_IN32X:
2171 case RISCV::FSGNJ_S_INX:
2172 case RISCV::FSGNJ_H_INX:
2174 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2175 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2179 return std::nullopt;
2187 const auto &SchedModel =
STI.getSchedModel();
2188 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2200 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2204 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2205 RISCV::OpName::frm) < 0;
2207 "New instructions require FRM whereas the old one does not have it");
2214 for (
auto *NewMI : InsInstrs) {
2216 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2217 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2259bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2260 bool Invert)
const {
2261#define OPCODE_LMUL_CASE(OPC) \
2262 case RISCV::OPC##_M1: \
2263 case RISCV::OPC##_M2: \
2264 case RISCV::OPC##_M4: \
2265 case RISCV::OPC##_M8: \
2266 case RISCV::OPC##_MF2: \
2267 case RISCV::OPC##_MF4: \
2268 case RISCV::OPC##_MF8
2270#define OPCODE_LMUL_MASK_CASE(OPC) \
2271 case RISCV::OPC##_M1_MASK: \
2272 case RISCV::OPC##_M2_MASK: \
2273 case RISCV::OPC##_M4_MASK: \
2274 case RISCV::OPC##_M8_MASK: \
2275 case RISCV::OPC##_MF2_MASK: \
2276 case RISCV::OPC##_MF4_MASK: \
2277 case RISCV::OPC##_MF8_MASK
2282 Opcode = *InvOpcode;
2299#undef OPCODE_LMUL_MASK_CASE
2300#undef OPCODE_LMUL_CASE
2303bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2314 const uint64_t TSFlags =
Desc.TSFlags;
2316 auto checkImmOperand = [&](
unsigned OpIdx) {
2320 auto checkRegOperand = [&](
unsigned OpIdx) {
2328 if (!checkRegOperand(1))
2343 bool SeenMI2 =
false;
2344 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2353 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2354 Register SrcReg = It->getOperand(1).getReg();
2372 if (MI1VReg != SrcReg)
2381 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2421bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2422 bool &Commuted)
const {
2426 "Expect the present of passthrough operand.");
2432 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2433 areRVVInstsReassociable(Inst, *MI2);
2437 return areRVVInstsReassociable(Inst, *MI1) &&
2438 (isVectorAssociativeAndCommutative(*MI1) ||
2439 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2446 if (!isVectorAssociativeAndCommutative(Inst) &&
2447 !isVectorAssociativeAndCommutative(Inst,
true))
2473 for (
unsigned I = 0;
I < 5; ++
I)
2479 bool &Commuted)
const {
2480 if (isVectorAssociativeAndCommutative(Inst) ||
2481 isVectorAssociativeAndCommutative(Inst,
true))
2482 return hasReassociableVectorSibling(Inst, Commuted);
2488 unsigned OperandIdx = Commuted ? 2 : 1;
2492 int16_t InstFrmOpIdx =
2493 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2494 int16_t SiblingFrmOpIdx =
2495 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2497 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2502 bool Invert)
const {
2503 if (isVectorAssociativeAndCommutative(Inst, Invert))
2511 Opc = *InverseOpcode;
2556std::optional<unsigned>
2558#define RVV_OPC_LMUL_CASE(OPC, INV) \
2559 case RISCV::OPC##_M1: \
2560 return RISCV::INV##_M1; \
2561 case RISCV::OPC##_M2: \
2562 return RISCV::INV##_M2; \
2563 case RISCV::OPC##_M4: \
2564 return RISCV::INV##_M4; \
2565 case RISCV::OPC##_M8: \
2566 return RISCV::INV##_M8; \
2567 case RISCV::OPC##_MF2: \
2568 return RISCV::INV##_MF2; \
2569 case RISCV::OPC##_MF4: \
2570 return RISCV::INV##_MF4; \
2571 case RISCV::OPC##_MF8: \
2572 return RISCV::INV##_MF8
2574#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2575 case RISCV::OPC##_M1_MASK: \
2576 return RISCV::INV##_M1_MASK; \
2577 case RISCV::OPC##_M2_MASK: \
2578 return RISCV::INV##_M2_MASK; \
2579 case RISCV::OPC##_M4_MASK: \
2580 return RISCV::INV##_M4_MASK; \
2581 case RISCV::OPC##_M8_MASK: \
2582 return RISCV::INV##_M8_MASK; \
2583 case RISCV::OPC##_MF2_MASK: \
2584 return RISCV::INV##_MF2_MASK; \
2585 case RISCV::OPC##_MF4_MASK: \
2586 return RISCV::INV##_MF4_MASK; \
2587 case RISCV::OPC##_MF8_MASK: \
2588 return RISCV::INV##_MF8_MASK
2592 return std::nullopt;
2594 return RISCV::FSUB_H;
2596 return RISCV::FSUB_S;
2598 return RISCV::FSUB_D;
2600 return RISCV::FADD_H;
2602 return RISCV::FADD_S;
2604 return RISCV::FADD_D;
2621#undef RVV_OPC_LMUL_MASK_CASE
2622#undef RVV_OPC_LMUL_CASE
2627 bool DoRegPressureReduce) {
2654 bool DoRegPressureReduce) {
2661 DoRegPressureReduce)) {
2667 DoRegPressureReduce)) {
2677 bool DoRegPressureReduce) {
2685 unsigned CombineOpc) {
2692 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2706 unsigned OuterShiftAmt) {
2712 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2739 case RISCV::SH1ADD_UW:
2741 case RISCV::SH2ADD_UW:
2743 case RISCV::SH3ADD_UW:
2789 bool DoRegPressureReduce)
const {
2798 DoRegPressureReduce);
2806 return RISCV::FMADD_H;
2808 return RISCV::FMADD_S;
2810 return RISCV::FMADD_D;
2855 bool Mul1IsKill = Mul1.
isKill();
2856 bool Mul2IsKill = Mul2.
isKill();
2857 bool AddendIsKill = Addend.
isKill();
2866 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2891 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2898 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2901 switch (InnerShiftAmt - OuterShiftAmt) {
2905 InnerOpc = RISCV::ADD;
2908 InnerOpc = RISCV::SH1ADD;
2911 InnerOpc = RISCV::SH2ADD;
2914 InnerOpc = RISCV::SH3ADD;
2932 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2949 DelInstrs, InstrIdxForVirtReg);
2976 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2978 unsigned OpType = Operand.OperandType;
2984 ErrInfo =
"Expected an immediate operand.";
2987 int64_t Imm = MO.
getImm();
2993#define CASE_OPERAND_UIMM(NUM) \
2994 case RISCVOp::OPERAND_UIMM##NUM: \
2995 Ok = isUInt<NUM>(Imm); \
2997#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX) \
2998 case RISCVOp::OPERAND_UIMM##BITS##_LSB##SUFFIX: { \
2999 constexpr size_t NumZeros = sizeof(#SUFFIX) - 1; \
3000 Ok = isShiftedUInt<BITS - NumZeros, NumZeros>(Imm); \
3003#define CASE_OPERAND_SIMM(NUM) \
3004 case RISCVOp::OPERAND_SIMM##NUM: \
3005 Ok = isInt<NUM>(Imm); \
3039 Ok = Imm >= 1 && Imm <= 32;
3042 Ok = Imm >= 1 && Imm <= 64;
3063 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3074 Ok = Imm >= -15 && Imm <= 16;
3102 Ok = Ok && Imm != 0;
3105 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3108 Ok = Imm >= 0 && Imm <= 10;
3111 Ok = Imm >= 0 && Imm <= 7;
3114 Ok = Imm >= 1 && Imm <= 10;
3117 Ok = Imm >= 2 && Imm <= 14;
3126 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3161 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3165 ErrInfo =
"Invalid immediate";
3174 ErrInfo =
"Expected a non-register operand.";
3178 ErrInfo =
"Invalid immediate";
3187 ErrInfo =
"Expected a non-register operand.";
3191 ErrInfo =
"Invalid immediate";
3199 ErrInfo =
"Expected a non-register operand.";
3203 ErrInfo =
"Invalid immediate";
3209 int64_t Imm = MO.
getImm();
3212 ErrInfo =
"Invalid immediate";
3215 }
else if (!MO.
isReg()) {
3216 ErrInfo =
"Expected a register or immediate operand.";
3222 ErrInfo =
"Expected a register or immediate operand.";
3232 if (!
Op.isImm() && !
Op.isReg()) {
3233 ErrInfo =
"Invalid operand type for VL operand";
3236 if (
Op.isReg() &&
Op.getReg().isValid()) {
3239 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3240 ErrInfo =
"Invalid register class for VL operand";
3245 ErrInfo =
"VL operand w/o SEW operand?";
3251 if (!
MI.getOperand(
OpIdx).isImm()) {
3252 ErrInfo =
"SEW value expected to be an immediate";
3257 ErrInfo =
"Unexpected SEW value";
3260 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3262 ErrInfo =
"Unexpected SEW value";
3268 if (!
MI.getOperand(
OpIdx).isImm()) {
3269 ErrInfo =
"Policy operand expected to be an immediate";
3274 ErrInfo =
"Invalid Policy Value";
3278 ErrInfo =
"policy operand w/o VL operand?";
3286 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3287 ErrInfo =
"policy operand w/o tied operand?";
3294 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3295 ErrInfo =
"dynamic rounding mode should read FRM";
3317 case RISCV::LD_RV32:
3327 case RISCV::SD_RV32:
3343 int64_t NewOffset = OldOffset + Disp;
3365 "Addressing mode not supported for folding");
3438 case RISCV::LD_RV32:
3441 case RISCV::SD_RV32:
3448 OffsetIsScalable =
false;
3464 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3472 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3475 auto Base1 = MO1->getValue();
3476 auto Base2 = MO2->getValue();
3477 if (!Base1 || !Base2)
3485 return Base1 == Base2;
3491 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3492 unsigned NumBytes)
const {
3495 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3500 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3506 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3512 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3562 int64_t OffsetA = 0, OffsetB = 0;
3568 int LowOffset = std::min(OffsetA, OffsetB);
3569 int HighOffset = std::max(OffsetA, OffsetB);
3570 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3572 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3579std::pair<unsigned, unsigned>
3582 return std::make_pair(TF & Mask, TF & ~Mask);
3588 static const std::pair<unsigned, const char *> TargetFlags[] = {
3589 {MO_CALL,
"riscv-call"},
3590 {MO_LO,
"riscv-lo"},
3591 {MO_HI,
"riscv-hi"},
3592 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3593 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3594 {MO_GOT_HI,
"riscv-got-hi"},
3595 {MO_TPREL_LO,
"riscv-tprel-lo"},
3596 {MO_TPREL_HI,
"riscv-tprel-hi"},
3597 {MO_TPREL_ADD,
"riscv-tprel-add"},
3598 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3599 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3600 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3601 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3602 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3603 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3611 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3624 unsigned &Flags)
const {
3644 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3645 F.hasFnAttribute(
"patchable-function-entry");
3650 return MI.readsRegister(RegNo,
TRI) ||
3651 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3656 return MI.modifiesRegister(RegNo,
TRI) ||
3657 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3661 if (!
MBB.back().isReturn())
3700 if (
C.isAvailableAcrossAndOutOfSeq(
Reg,
TRI) &&
3701 C.isAvailableInsideSeq(
Reg,
TRI)) {
3715 if (
C.back().isReturn() &&
3716 !
C.isAvailableAcrossAndOutOfSeq(TailExpandUseReg, RegInfo)) {
3718 LLVM_DEBUG(
dbgs() <<
"Cannot be outlined between: " <<
C.front() <<
"and "
3720 LLVM_DEBUG(
dbgs() <<
"Because the tail-call register is live across "
3721 "the proposed outlined function call\n");
3727 if (
C.back().isReturn()) {
3729 "The candidate who uses return instruction must be outlined "
3736 if (!
C.isAvailableInsideSeq(RISCV::X5, RegInfo))
3740 if (
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo))
3750std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3753 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3754 unsigned MinRepeats)
const {
3762 if (RepeatedSequenceLocs.size() < MinRepeats)
3763 return std::nullopt;
3767 unsigned InstrSizeCExt =
3769 unsigned CallOverhead = 0, FrameOverhead = 0;
3772 unsigned CFICount = 0;
3773 for (
auto &
I : Candidate) {
3774 if (
I.isCFIInstruction())
3785 std::vector<MCCFIInstruction> CFIInstructions =
3786 C.getMF()->getFrameInstructions();
3788 if (CFICount > 0 && CFICount != CFIInstructions.size())
3789 return std::nullopt;
3797 CallOverhead = 4 + InstrSizeCExt;
3804 FrameOverhead = InstrSizeCExt;
3810 return std::nullopt;
3814 for (
auto &
C : RepeatedSequenceLocs) {
3816 if (
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo)) {
3818 unsigned CandCallOverhead = 8;
3823 unsigned CandCallOverhead = InstrSizeCExt + 8 + InstrSizeCExt;
3828 for (
auto &
C : RepeatedSequenceLocs)
3829 C.setCallInfo(MOCI, CallOverhead);
3832 unsigned SequenceSize = 0;
3833 for (
auto &
MI : Candidate)
3836 return std::make_unique<outliner::OutlinedFunction>(
3837 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3843 unsigned Flags)
const {
3847 MBB->getParent()->getSubtarget().getRegisterInfo();
3848 const auto &
F =
MI.getMF()->getFunction();
3853 if (
MI.isCFIInstruction())
3861 for (
const auto &MO :
MI.operands()) {
3866 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3867 F.hasSection() ||
F.getSectionPrefix()))
3884 MBB.addLiveIn(RISCV::X5);
3899 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3906 assert(SaveReg &&
"Cannot find an available register to save/restore X5.");
3917 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3933 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3944 return std::nullopt;
3948 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3949 MI.getOperand(2).isImm())
3950 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3952 return std::nullopt;
3960 std::string GenericComment =
3962 if (!GenericComment.empty())
3963 return GenericComment;
3967 return std::string();
3969 std::string Comment;
3976 switch (OpInfo.OperandType) {
3979 unsigned Imm =
Op.getImm();
3984 unsigned Imm =
Op.getImm();
3989 unsigned Imm =
Op.getImm();
3995 unsigned Log2SEW =
Op.getImm();
3996 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
4002 unsigned Policy =
Op.getImm();
4004 "Invalid Policy Value");
4010 if (
Op.isImm() &&
Op.getImm() == -1)
4032#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
4033 RISCV::Pseudo##OP##_##LMUL
4035#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
4036 RISCV::Pseudo##OP##_##LMUL##_MASK
4038#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
4039 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
4040 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
4042#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
4043 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
4044 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
4045 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
4046 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
4047 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
4048 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
4050#define CASE_RVV_OPCODE_UNMASK(OP) \
4051 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
4052 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
4054#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
4055 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
4056 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
4057 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
4058 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
4059 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
4060 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
4062#define CASE_RVV_OPCODE_MASK(OP) \
4063 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
4064 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
4066#define CASE_RVV_OPCODE_WIDEN(OP) \
4067 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
4068 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
4070#define CASE_RVV_OPCODE(OP) \
4071 CASE_RVV_OPCODE_UNMASK(OP): \
4072 case CASE_RVV_OPCODE_MASK(OP)
4076#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
4077 RISCV::PseudoV##OP##_##TYPE##_##LMUL
4079#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
4080 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
4081 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
4082 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
4083 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
4084 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
4085 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
4086 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
4089#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
4090 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
4092#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
4093 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
4094 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
4095 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
4096 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
4098#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
4099 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
4100 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
4102#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
4103 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
4104 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
4106#define CASE_VFMA_OPCODE_VV(OP) \
4107 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
4108 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
4109 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
4110 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
4112#define CASE_VFMA_SPLATS(OP) \
4113 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
4114 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
4115 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
4116 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
4120 unsigned &SrcOpIdx1,
4121 unsigned &SrcOpIdx2)
const {
4123 if (!
Desc.isCommutable())
4126 switch (
MI.getOpcode()) {
4127 case RISCV::TH_MVEQZ:
4128 case RISCV::TH_MVNEZ:
4132 if (
MI.getOperand(2).getReg() == RISCV::X0)
4135 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4136 case RISCV::QC_SELECTIEQ:
4137 case RISCV::QC_SELECTINE:
4138 case RISCV::QC_SELECTIIEQ:
4139 case RISCV::QC_SELECTIINE:
4140 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4141 case RISCV::QC_MVEQ:
4142 case RISCV::QC_MVNE:
4143 case RISCV::QC_MVLT:
4144 case RISCV::QC_MVGE:
4145 case RISCV::QC_MVLTU:
4146 case RISCV::QC_MVGEU:
4147 case RISCV::QC_MVEQI:
4148 case RISCV::QC_MVNEI:
4149 case RISCV::QC_MVLTI:
4150 case RISCV::QC_MVGEI:
4151 case RISCV::QC_MVLTUI:
4152 case RISCV::QC_MVGEUI:
4153 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4154 case RISCV::TH_MULA:
4155 case RISCV::TH_MULAW:
4156 case RISCV::TH_MULAH:
4157 case RISCV::TH_MULS:
4158 case RISCV::TH_MULSW:
4159 case RISCV::TH_MULSH:
4161 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4162 case RISCV::PseudoCCMOVGPRNoX0:
4163 case RISCV::PseudoCCMOVGPR:
4165 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4206 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4233 unsigned CommutableOpIdx1 = 1;
4234 unsigned CommutableOpIdx2 = 3;
4235 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4256 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4258 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4262 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4263 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4269 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4270 SrcOpIdx2 == CommuteAnyOperandIndex) {
4273 unsigned CommutableOpIdx1 = SrcOpIdx1;
4274 if (SrcOpIdx1 == SrcOpIdx2) {
4277 CommutableOpIdx1 = 1;
4278 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4280 CommutableOpIdx1 = SrcOpIdx2;
4285 unsigned CommutableOpIdx2;
4286 if (CommutableOpIdx1 != 1) {
4288 CommutableOpIdx2 = 1;
4290 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4295 if (Op1Reg !=
MI.getOperand(2).getReg())
4296 CommutableOpIdx2 = 2;
4298 CommutableOpIdx2 = 3;
4303 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4316#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4317 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4318 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4321#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4322 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4323 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4324 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4325 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4326 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4327 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4328 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4331#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4332 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4333 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4336#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4337 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4338 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4339 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4340 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4342#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4343 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4344 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4346#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4347 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4348 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4350#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4351 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4352 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4353 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4354 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4356#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4357 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4358 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4359 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4360 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4366 unsigned OpIdx2)
const {
4369 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4373 switch (
MI.getOpcode()) {
4374 case RISCV::TH_MVEQZ:
4375 case RISCV::TH_MVNEZ: {
4376 auto &WorkingMI = cloneIfNew(
MI);
4377 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4378 : RISCV::TH_MVEQZ));
4382 case RISCV::QC_SELECTIEQ:
4383 case RISCV::QC_SELECTINE:
4384 case RISCV::QC_SELECTIIEQ:
4385 case RISCV::QC_SELECTIINE:
4387 case RISCV::QC_MVEQ:
4388 case RISCV::QC_MVNE:
4389 case RISCV::QC_MVLT:
4390 case RISCV::QC_MVGE:
4391 case RISCV::QC_MVLTU:
4392 case RISCV::QC_MVGEU:
4393 case RISCV::QC_MVEQI:
4394 case RISCV::QC_MVNEI:
4395 case RISCV::QC_MVLTI:
4396 case RISCV::QC_MVGEI:
4397 case RISCV::QC_MVLTUI:
4398 case RISCV::QC_MVGEUI: {
4399 auto &WorkingMI = cloneIfNew(
MI);
4404 case RISCV::PseudoCCMOVGPRNoX0:
4405 case RISCV::PseudoCCMOVGPR: {
4407 unsigned BCC =
MI.getOperand(
MI.getNumExplicitOperands() - 3).getImm();
4409 auto &WorkingMI = cloneIfNew(
MI);
4410 WorkingMI.getOperand(
MI.getNumExplicitOperands() - 3).setImm(BCC);
4434 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4435 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4437 switch (
MI.getOpcode()) {
4460 auto &WorkingMI = cloneIfNew(
MI);
4461 WorkingMI.setDesc(
get(
Opc));
4471 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4474 if (OpIdx1 == 3 || OpIdx2 == 3) {
4476 switch (
MI.getOpcode()) {
4487 auto &WorkingMI = cloneIfNew(
MI);
4488 WorkingMI.setDesc(
get(
Opc));
4500#undef CASE_VMA_CHANGE_OPCODE_COMMON
4501#undef CASE_VMA_CHANGE_OPCODE_LMULS
4502#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4503#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4504#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4505#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4506#undef CASE_VFMA_CHANGE_OPCODE_VV
4507#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4509#undef CASE_RVV_OPCODE_UNMASK_LMUL
4510#undef CASE_RVV_OPCODE_MASK_LMUL
4511#undef CASE_RVV_OPCODE_LMUL
4512#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4513#undef CASE_RVV_OPCODE_UNMASK
4514#undef CASE_RVV_OPCODE_MASK_WIDEN
4515#undef CASE_RVV_OPCODE_MASK
4516#undef CASE_RVV_OPCODE_WIDEN
4517#undef CASE_RVV_OPCODE
4519#undef CASE_VMA_OPCODE_COMMON
4520#undef CASE_VMA_OPCODE_LMULS
4521#undef CASE_VFMA_OPCODE_COMMON
4522#undef CASE_VFMA_OPCODE_LMULS_M1
4523#undef CASE_VFMA_OPCODE_LMULS_MF2
4524#undef CASE_VFMA_OPCODE_LMULS_MF4
4525#undef CASE_VFMA_OPCODE_VV
4526#undef CASE_VFMA_SPLATS
4529 switch (
MI.getOpcode()) {
4537 if (
MI.getOperand(1).getReg() == RISCV::X0)
4538 commuteInstruction(
MI);
4540 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4541 MI.getOperand(2).ChangeToImmediate(0);
4542 MI.setDesc(
get(RISCV::ADDI));
4546 if (
MI.getOpcode() == RISCV::XOR &&
4547 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4548 MI.getOperand(1).setReg(RISCV::X0);
4549 MI.getOperand(2).ChangeToImmediate(0);
4550 MI.setDesc(
get(RISCV::ADDI));
4557 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4558 MI.setDesc(
get(RISCV::ADDI));
4564 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4565 MI.getOperand(2).ChangeToImmediate(0);
4566 MI.setDesc(
get(RISCV::ADDI));
4572 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4573 MI.getOperand(2).ChangeToImmediate(0);
4574 MI.setDesc(
get(RISCV::ADDIW));
4581 if (
MI.getOperand(1).getReg() == RISCV::X0)
4582 commuteInstruction(
MI);
4584 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4585 MI.getOperand(2).ChangeToImmediate(0);
4586 MI.setDesc(
get(RISCV::ADDIW));
4591 case RISCV::SH1ADD_UW:
4593 case RISCV::SH2ADD_UW:
4595 case RISCV::SH3ADD_UW:
4597 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4598 MI.removeOperand(1);
4600 MI.setDesc(
get(RISCV::ADDI));
4604 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4605 MI.removeOperand(2);
4606 unsigned Opc =
MI.getOpcode();
4607 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4608 Opc == RISCV::SH3ADD_UW) {
4610 MI.setDesc(
get(RISCV::SLLI_UW));
4614 MI.setDesc(
get(RISCV::SLLI));
4628 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4629 MI.getOperand(2).getReg() == RISCV::X0) {
4630 MI.getOperand(1).setReg(RISCV::X0);
4631 MI.getOperand(2).ChangeToImmediate(0);
4632 MI.setDesc(
get(RISCV::ADDI));
4638 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4639 MI.getOperand(2).setImm(0);
4640 MI.setDesc(
get(RISCV::ADDI));
4648 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4649 MI.getOperand(2).ChangeToImmediate(0);
4650 MI.setDesc(
get(RISCV::ADDI));
4654 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4655 MI.getOperand(2).ChangeToImmediate(0);
4656 MI.setDesc(
get(RISCV::ADDI));
4664 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4665 MI.getOperand(2).ChangeToImmediate(0);
4666 MI.setDesc(
get(RISCV::ADDI));
4676 case RISCV::SLLI_UW:
4678 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4679 MI.getOperand(2).setImm(0);
4680 MI.setDesc(
get(RISCV::ADDI));
4688 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4689 MI.getOperand(2).getReg() == RISCV::X0) {
4690 MI.getOperand(2).ChangeToImmediate(0);
4691 MI.setDesc(
get(RISCV::ADDI));
4695 if (
MI.getOpcode() == RISCV::ADD_UW &&
4696 MI.getOperand(1).getReg() == RISCV::X0) {
4697 MI.removeOperand(1);
4699 MI.setDesc(
get(RISCV::ADDI));
4705 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4706 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4707 MI.setDesc(
get(RISCV::ADDI));
4713 case RISCV::ZEXT_H_RV32:
4714 case RISCV::ZEXT_H_RV64:
4717 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4719 MI.setDesc(
get(RISCV::ADDI));
4728 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4729 MI.getOperand(2).ChangeToImmediate(0);
4730 MI.setDesc(
get(RISCV::ADDI));
4737 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4739 MI.removeOperand(0);
4740 MI.insert(
MI.operands_begin() + 1, {MO0});
4745 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4747 MI.removeOperand(0);
4748 MI.insert(
MI.operands_begin() + 1, {MO0});
4749 MI.setDesc(
get(RISCV::BNE));
4754 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4756 MI.removeOperand(0);
4757 MI.insert(
MI.operands_begin() + 1, {MO0});
4758 MI.setDesc(
get(RISCV::BEQ));
4766#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4767 RISCV::PseudoV##OP##_##LMUL##_TIED
4769#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4770 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4771 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4772 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4773 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4774 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4775 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4777#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4778 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4779 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4782#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4783 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4784 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4785 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4786 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4787 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4788 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4791#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4792 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4794#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4795 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4796 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4797 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4798 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4799 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4800 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4801 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4802 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4803 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4805#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4806 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4807 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4810#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4811 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4812 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4813 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4814 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4815 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4816 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4817 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4818 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4819 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4821#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4822 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4823 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4824 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4825 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4826 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4828#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4829 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4830 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4831 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4832 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4833 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4840 switch (
MI.getOpcode()) {
4848 MI.getNumExplicitOperands() == 7 &&
4849 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4856 switch (
MI.getOpcode()) {
4868 .
add(
MI.getOperand(0))
4870 .
add(
MI.getOperand(1))
4871 .
add(
MI.getOperand(2))
4872 .
add(
MI.getOperand(3))
4873 .
add(
MI.getOperand(4))
4874 .
add(
MI.getOperand(5))
4875 .
add(
MI.getOperand(6));
4884 MI.getNumExplicitOperands() == 6);
4891 switch (
MI.getOpcode()) {
4903 .
add(
MI.getOperand(0))
4905 .
add(
MI.getOperand(1))
4906 .
add(
MI.getOperand(2))
4907 .
add(
MI.getOperand(3))
4908 .
add(
MI.getOperand(4))
4909 .
add(
MI.getOperand(5));
4916 unsigned NumOps =
MI.getNumOperands();
4919 if (
Op.isReg() &&
Op.isKill())
4927 if (
MI.getOperand(0).isEarlyClobber()) {
4941#undef CASE_WIDEOP_OPCODE_COMMON
4942#undef CASE_WIDEOP_OPCODE_LMULS
4943#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4944#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4945#undef CASE_FP_WIDEOP_OPCODE_COMMON
4946#undef CASE_FP_WIDEOP_OPCODE_LMULS
4947#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4948#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4957 if (ShiftAmount == 0)
4963 }
else if (
int ShXAmount, ShiftAmount;
4965 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4968 switch (ShXAmount) {
4970 Opc = RISCV::SH1ADD;
4973 Opc = RISCV::SH2ADD;
4976 Opc = RISCV::SH3ADD;
5012 }
else if (
STI.hasStdExtZmmul()) {
5022 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
5023 if (Amount & (1U << ShiftAmount)) {
5027 .
addImm(ShiftAmount - PrevShiftAmount)
5029 if (Amount >> (ShiftAmount + 1)) {
5043 PrevShiftAmount = ShiftAmount;
5046 assert(Acc &&
"Expected valid accumulator");
5056 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
5064 ?
STI.getTailDupAggressiveThreshold()
5071 unsigned Opcode =
MI.getOpcode();
5072 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
5081 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
5083 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
5086std::optional<std::pair<unsigned, unsigned>>
5090 return std::nullopt;
5091 case RISCV::PseudoVSPILL2_M1:
5092 case RISCV::PseudoVRELOAD2_M1:
5093 return std::make_pair(2u, 1u);
5094 case RISCV::PseudoVSPILL2_M2:
5095 case RISCV::PseudoVRELOAD2_M2:
5096 return std::make_pair(2u, 2u);
5097 case RISCV::PseudoVSPILL2_M4:
5098 case RISCV::PseudoVRELOAD2_M4:
5099 return std::make_pair(2u, 4u);
5100 case RISCV::PseudoVSPILL3_M1:
5101 case RISCV::PseudoVRELOAD3_M1:
5102 return std::make_pair(3u, 1u);
5103 case RISCV::PseudoVSPILL3_M2:
5104 case RISCV::PseudoVRELOAD3_M2:
5105 return std::make_pair(3u, 2u);
5106 case RISCV::PseudoVSPILL4_M1:
5107 case RISCV::PseudoVRELOAD4_M1:
5108 return std::make_pair(4u, 1u);
5109 case RISCV::PseudoVSPILL4_M2:
5110 case RISCV::PseudoVRELOAD4_M2:
5111 return std::make_pair(4u, 2u);
5112 case RISCV::PseudoVSPILL5_M1:
5113 case RISCV::PseudoVRELOAD5_M1:
5114 return std::make_pair(5u, 1u);
5115 case RISCV::PseudoVSPILL6_M1:
5116 case RISCV::PseudoVRELOAD6_M1:
5117 return std::make_pair(6u, 1u);
5118 case RISCV::PseudoVSPILL7_M1:
5119 case RISCV::PseudoVRELOAD7_M1:
5120 return std::make_pair(7u, 1u);
5121 case RISCV::PseudoVSPILL8_M1:
5122 case RISCV::PseudoVRELOAD8_M1:
5123 return std::make_pair(8u, 1u);
5128 int16_t MI1FrmOpIdx =
5129 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
5130 int16_t MI2FrmOpIdx =
5131 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
5132 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
5139std::optional<unsigned>
5143 return std::nullopt;
5146 case RISCV::VSLL_VX:
5147 case RISCV::VSRL_VX:
5148 case RISCV::VSRA_VX:
5150 case RISCV::VSSRL_VX:
5151 case RISCV::VSSRA_VX:
5153 case RISCV::VROL_VX:
5154 case RISCV::VROR_VX:
5159 case RISCV::VNSRL_WX:
5160 case RISCV::VNSRA_WX:
5162 case RISCV::VNCLIPU_WX:
5163 case RISCV::VNCLIP_WX:
5165 case RISCV::VWSLL_VX:
5170 case RISCV::VADD_VX:
5171 case RISCV::VSUB_VX:
5172 case RISCV::VRSUB_VX:
5174 case RISCV::VWADDU_VX:
5175 case RISCV::VWSUBU_VX:
5176 case RISCV::VWADD_VX:
5177 case RISCV::VWSUB_VX:
5178 case RISCV::VWADDU_WX:
5179 case RISCV::VWSUBU_WX:
5180 case RISCV::VWADD_WX:
5181 case RISCV::VWSUB_WX:
5183 case RISCV::VADC_VXM:
5184 case RISCV::VADC_VIM:
5185 case RISCV::VMADC_VXM:
5186 case RISCV::VMADC_VIM:
5187 case RISCV::VMADC_VX:
5188 case RISCV::VSBC_VXM:
5189 case RISCV::VMSBC_VXM:
5190 case RISCV::VMSBC_VX:
5192 case RISCV::VAND_VX:
5194 case RISCV::VXOR_VX:
5196 case RISCV::VMSEQ_VX:
5197 case RISCV::VMSNE_VX:
5198 case RISCV::VMSLTU_VX:
5199 case RISCV::VMSLT_VX:
5200 case RISCV::VMSLEU_VX:
5201 case RISCV::VMSLE_VX:
5202 case RISCV::VMSGTU_VX:
5203 case RISCV::VMSGT_VX:
5205 case RISCV::VMINU_VX:
5206 case RISCV::VMIN_VX:
5207 case RISCV::VMAXU_VX:
5208 case RISCV::VMAX_VX:
5210 case RISCV::VMUL_VX:
5211 case RISCV::VMULH_VX:
5212 case RISCV::VMULHU_VX:
5213 case RISCV::VMULHSU_VX:
5215 case RISCV::VDIVU_VX:
5216 case RISCV::VDIV_VX:
5217 case RISCV::VREMU_VX:
5218 case RISCV::VREM_VX:
5220 case RISCV::VWMUL_VX:
5221 case RISCV::VWMULU_VX:
5222 case RISCV::VWMULSU_VX:
5224 case RISCV::VMACC_VX:
5225 case RISCV::VNMSAC_VX:
5226 case RISCV::VMADD_VX:
5227 case RISCV::VNMSUB_VX:
5229 case RISCV::VWMACCU_VX:
5230 case RISCV::VWMACC_VX:
5231 case RISCV::VWMACCSU_VX:
5232 case RISCV::VWMACCUS_VX:
5234 case RISCV::VMERGE_VXM:
5236 case RISCV::VMV_V_X:
5238 case RISCV::VSADDU_VX:
5239 case RISCV::VSADD_VX:
5240 case RISCV::VSSUBU_VX:
5241 case RISCV::VSSUB_VX:
5243 case RISCV::VAADDU_VX:
5244 case RISCV::VAADD_VX:
5245 case RISCV::VASUBU_VX:
5246 case RISCV::VASUB_VX:
5248 case RISCV::VSMUL_VX:
5250 case RISCV::VMV_S_X:
5252 case RISCV::VANDN_VX:
5253 return 1U << Log2SEW;
5259 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5262 return RVV->BaseInstr;
5272 unsigned Scaled = Log2SEW + (DestEEW - 1);
5286 return std::nullopt;
5291 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5292 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5293 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5294 LHS.getReg() == RHS.getReg())
5298 if (LHS.isImm() && LHS.getImm() == 0)
5304 if (!LHSImm || !RHSImm)
5306 return LHSImm <= RHSImm;
5318 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5320 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5330 std::optional<bool> createTripCountGreaterCondition(
5331 int TC, MachineBasicBlock &
MBB,
5332 SmallVectorImpl<MachineOperand> &CondParam)
override {
5340 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5342 void adjustTripCount(
int TripCountAdjust)
override {}
5346std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5354 if (
TBB == LoopBB && FBB == LoopBB)
5361 assert((
TBB == LoopBB || FBB == LoopBB) &&
5362 "The Loop must be a single-basic-block loop");
5373 if (!Reg.isVirtual())
5380 if (LHS && LHS->isPHI())
5382 if (RHS && RHS->isPHI())
5385 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5391 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5408 case RISCV::FDIV_H_INX:
5409 case RISCV::FDIV_S_INX:
5410 case RISCV::FDIV_D_INX:
5411 case RISCV::FDIV_D_IN32X:
5412 case RISCV::FSQRT_H:
5413 case RISCV::FSQRT_S:
5414 case RISCV::FSQRT_D:
5415 case RISCV::FSQRT_H_INX:
5416 case RISCV::FSQRT_S_INX:
5417 case RISCV::FSQRT_D_INX:
5418 case RISCV::FSQRT_D_IN32X:
5420 case RISCV::VDIV_VV:
5421 case RISCV::VDIV_VX:
5422 case RISCV::VDIVU_VV:
5423 case RISCV::VDIVU_VX:
5424 case RISCV::VREM_VV:
5425 case RISCV::VREM_VX:
5426 case RISCV::VREMU_VV:
5427 case RISCV::VREMU_VX:
5429 case RISCV::VFDIV_VV:
5430 case RISCV::VFDIV_VF:
5431 case RISCV::VFRDIV_VF:
5432 case RISCV::VFSQRT_V:
5433 case RISCV::VFRSQRT7_V:
5439 if (
MI->getOpcode() != TargetOpcode::COPY)
5444 Register DstReg =
MI->getOperand(0).getReg();
5447 :
TRI->getMinimalPhysRegClass(DstReg);
5457 auto [RCLMul, RCFractional] =
5459 return (!RCFractional && LMul == RCLMul) || (RCFractional && LMul == 1);
5463 if (
MI.memoperands_empty())
5478 if (MO.getReg().isPhysical())
5481 if (MO.getReg().isPhysical())
5483 bool SawStore =
false;
5486 if (
II->definesRegister(PhysReg,
nullptr))
5489 if (
II->definesRegister(PhysReg,
nullptr) ||
5490 II->readsRegister(PhysReg,
nullptr))
5492 if (
II->mayStore()) {
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
static cl::opt< bool > OutlinerEnableRegSave("riscv-outliner-regsave", cl::init(true), cl::Hidden, cl::desc("Enable RegSave strategy in machine outliner (save X5 to a " "temporary register when X5 is live across outlined calls)."))
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static Register findRegisterToSaveX5To(outliner::Candidate &C, const TargetRegisterInfo &TRI)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define CASE_RVV_OPCODE_LMUL(OP, LMUL)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
filtered_mop_range all_defs()
Returns an iterator range over all operands that are (explicit or implicit) register defs.
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
filtered_mop_range all_uses()
Returns an iterator range over all operands that are (explicit or implicit) register uses.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI void clearKillFlags(Register Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
bool isReserved(MCRegister PhysReg) const
isReserved - Returns true when PhysReg is a reserved register.
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasOneUse(Register RegNo) const
hasOneUse - Return true if there is exactly one instruction using the specified register.
LLVM_ABI void clearVirtRegs()
clearVirtRegs - Remove all virtual registers (after physreg assignment).
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI bool isConstantPhysReg(MCRegister PhysReg) const
Returns true if PhysReg is unallocatable and constant throughout the function.
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI void replaceRegWith(Register FromReg, Register ToReg)
replaceRegWith - Replace all instances of FromReg with ToReg in the machine function.
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
static bool isSafeToMove(const MachineInstr &From, const MachineBasicBlock::iterator &To)
Return true if moving From down to To won't cause any physical register reads or writes to be clobber...
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool isVRegCopy(const MachineInstr *MI, unsigned LMul=0) const
Return true if MI is a COPY to a vector register of a specific LMul, or any kind of vector registers ...
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
bool analyzeCandidate(outliner::Candidate &C) const
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool requiresNTLHint(const MachineInstr &MI) const
Return true if the instruction requires an NTL hint to be emitted.
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, int FrameIndex, MachineInstr *&CopyMI, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo & getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
self_iterator getIterator()
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getInverseBranchOpcode(unsigned BCC)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static int getVXRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
static bool isValidVType(unsigned VType)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
bool isVectorCopy(const TargetRegisterInfo *TRI, const MachineInstr &MI)
Return true if MI is a copy that will be lowered to one or more vmvNr.vs.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
constexpr RegState getKillRegState(bool B)
static const MachineMemOperand::Flags MONontemporalBit0
constexpr RegState getDeadRegState(bool B)
unsigned M1(unsigned Val)
constexpr bool has_single_bit(T Value) noexcept
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr RegState getRenamableRegState(bool B)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr RegState getDefRegState(bool B)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.