41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#include "RISCVGenInstrInfo.inc"
47#define DEBUG_TYPE "riscv-instr-info"
49 "Number of registers within vector register groups spilled");
51 "Number of registers within vector register groups reloaded");
55 cl::desc(
"Prefer whole register move for vector registers."));
58 "riscv-force-machine-combiner-strategy",
cl::Hidden,
59 cl::desc(
"Force machine combiner to use a specific strategy for machine "
60 "trace metrics evaluation."),
65 "MinInstrCount strategy.")));
71#define GET_RISCVVPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
78#define GET_RISCVMaskedPseudosTable_IMPL
79#include "RISCVGenSearchableTables.inc"
85 RISCV::ADJCALLSTACKUP),
88#define GET_INSTRINFO_HELPERS
89#include "RISCVGenInstrInfo.inc"
92 if (
STI.hasStdExtZca())
101 int &FrameIndex)
const {
111 case RISCV::VL1RE8_V:
112 case RISCV::VL1RE16_V:
113 case RISCV::VL1RE32_V:
114 case RISCV::VL1RE64_V:
117 case RISCV::VL2RE8_V:
118 case RISCV::VL2RE16_V:
119 case RISCV::VL2RE32_V:
120 case RISCV::VL2RE64_V:
123 case RISCV::VL4RE8_V:
124 case RISCV::VL4RE16_V:
125 case RISCV::VL4RE32_V:
126 case RISCV::VL4RE64_V:
129 case RISCV::VL8RE8_V:
130 case RISCV::VL8RE16_V:
131 case RISCV::VL8RE32_V:
132 case RISCV::VL8RE64_V:
140 switch (
MI.getOpcode()) {
164 case RISCV::VL1RE8_V:
165 case RISCV::VL2RE8_V:
166 case RISCV::VL4RE8_V:
167 case RISCV::VL8RE8_V:
168 if (!
MI.getOperand(1).isFI())
170 FrameIndex =
MI.getOperand(1).getIndex();
173 return MI.getOperand(0).getReg();
176 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
177 MI.getOperand(2).getImm() == 0) {
178 FrameIndex =
MI.getOperand(1).getIndex();
179 return MI.getOperand(0).getReg();
186 int &FrameIndex)
const {
194 switch (
MI.getOpcode()) {
219 if (!
MI.getOperand(1).isFI())
221 FrameIndex =
MI.getOperand(1).getIndex();
224 return MI.getOperand(0).getReg();
227 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
228 MI.getOperand(2).getImm() == 0) {
229 FrameIndex =
MI.getOperand(1).getIndex();
230 return MI.getOperand(0).getReg();
240 case RISCV::VFMV_V_F:
243 case RISCV::VFMV_S_F:
245 return MI.getOperand(1).isUndef();
253 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
264 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
265 "Unexpected COPY instruction.");
269 bool FoundDef =
false;
270 bool FirstVSetVLI =
false;
271 unsigned FirstSEW = 0;
274 if (
MBBI->isMetaInstruction())
277 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
287 unsigned FirstVType =
MBBI->getOperand(2).getImm();
292 if (FirstLMul != LMul)
297 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
303 unsigned VType =
MBBI->getOperand(2).getImm();
321 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
323 }
else if (
MBBI->getNumDefs()) {
326 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
332 if (!MO.isReg() || !MO.isDef())
334 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
349 if (MO.getReg() != SrcReg)
390 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
391 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
393 assert(!Fractional &&
"It is impossible be fractional lmul here.");
394 unsigned NumRegs = NF * LMulVal;
400 SrcEncoding += NumRegs - 1;
401 DstEncoding += NumRegs - 1;
407 unsigned,
unsigned> {
415 uint16_t Diff = DstEncoding - SrcEncoding;
416 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
417 DstEncoding % 8 == 7)
419 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
420 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
421 DstEncoding % 4 == 3)
423 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
424 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
425 DstEncoding % 2 == 1)
427 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
430 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
435 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
437 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
438 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
440 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
441 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
443 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
446 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
449 while (
I != NumRegs) {
454 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
455 GetCopyInfo(SrcEncoding, DstEncoding);
459 if (LMul == LMulCopied &&
462 if (DefMBBI->getOpcode() == VIOpc)
469 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
471 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
479 MIB = MIB.add(DefMBBI->getOperand(2));
487 MIB.addImm(Log2SEW ? Log2SEW : 3);
499 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
500 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
509 bool RenamableDest,
bool RenamableSrc)
const {
513 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
520 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
526 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
532 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
534 if (
STI.hasStdExtZdinx()) {
543 if (
STI.hasStdExtP()) {
552 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
553 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
555 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
557 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
561 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
562 .
addReg(EvenReg, KillFlag)
565 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
572 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
573 RISCV::GPRRegClass.
contains(DstReg)) {
575 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
580 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
582 if (
STI.hasStdExtZfh()) {
583 Opc = RISCV::FSGNJ_H;
586 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
587 "Unexpected extensions");
589 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
590 &RISCV::FPR32RegClass);
591 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
592 &RISCV::FPR32RegClass);
593 Opc = RISCV::FSGNJ_S;
597 .
addReg(SrcReg, KillFlag);
601 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
604 .
addReg(SrcReg, KillFlag);
608 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
611 .
addReg(SrcReg, KillFlag);
615 if (RISCV::FPR32RegClass.
contains(DstReg) &&
616 RISCV::GPRRegClass.
contains(SrcReg)) {
618 .
addReg(SrcReg, KillFlag);
622 if (RISCV::GPRRegClass.
contains(DstReg) &&
623 RISCV::FPR32RegClass.
contains(SrcReg)) {
625 .
addReg(SrcReg, KillFlag);
629 if (RISCV::FPR64RegClass.
contains(DstReg) &&
630 RISCV::GPRRegClass.
contains(SrcReg)) {
631 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
633 .
addReg(SrcReg, KillFlag);
637 if (RISCV::GPRRegClass.
contains(DstReg) &&
638 RISCV::FPR64RegClass.
contains(SrcReg)) {
639 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
641 .
addReg(SrcReg, KillFlag);
647 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
658 Register SrcReg,
bool IsKill,
int FI,
667 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
668 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
670 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
671 Opcode = RISCV::SH_INX;
672 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
673 Opcode = RISCV::SW_INX;
674 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
675 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
676 Alignment >=
STI.getZilsdAlign()) {
677 Opcode = RISCV::SD_RV32;
679 Opcode = RISCV::PseudoRV32ZdinxSD;
681 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
683 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
685 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
687 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
688 Opcode = RISCV::VS1R_V;
689 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
690 Opcode = RISCV::VS2R_V;
691 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
692 Opcode = RISCV::VS4R_V;
693 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
694 Opcode = RISCV::VS8R_V;
695 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
696 Opcode = RISCV::PseudoVSPILL2_M1;
697 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
698 Opcode = RISCV::PseudoVSPILL2_M2;
699 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
700 Opcode = RISCV::PseudoVSPILL2_M4;
701 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
702 Opcode = RISCV::PseudoVSPILL3_M1;
703 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
704 Opcode = RISCV::PseudoVSPILL3_M2;
705 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
706 Opcode = RISCV::PseudoVSPILL4_M1;
707 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
708 Opcode = RISCV::PseudoVSPILL4_M2;
709 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
710 Opcode = RISCV::PseudoVSPILL5_M1;
711 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
712 Opcode = RISCV::PseudoVSPILL6_M1;
713 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
714 Opcode = RISCV::PseudoVSPILL7_M1;
715 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
716 Opcode = RISCV::PseudoVSPILL8_M1;
759 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
760 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
762 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
763 Opcode = RISCV::LH_INX;
764 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
765 Opcode = RISCV::LW_INX;
766 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
767 if (!
STI.is64Bit() &&
STI.hasStdExtZilsd() &&
768 Alignment >=
STI.getZilsdAlign()) {
769 Opcode = RISCV::LD_RV32;
771 Opcode = RISCV::PseudoRV32ZdinxLD;
773 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
775 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
777 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
779 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
780 Opcode = RISCV::VL1RE8_V;
781 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
782 Opcode = RISCV::VL2RE8_V;
783 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
784 Opcode = RISCV::VL4RE8_V;
785 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
786 Opcode = RISCV::VL8RE8_V;
787 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
788 Opcode = RISCV::PseudoVRELOAD2_M1;
789 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
790 Opcode = RISCV::PseudoVRELOAD2_M2;
791 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
792 Opcode = RISCV::PseudoVRELOAD2_M4;
793 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
794 Opcode = RISCV::PseudoVRELOAD3_M1;
795 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
796 Opcode = RISCV::PseudoVRELOAD3_M2;
797 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
798 Opcode = RISCV::PseudoVRELOAD4_M1;
799 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
800 Opcode = RISCV::PseudoVRELOAD4_M2;
801 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
802 Opcode = RISCV::PseudoVRELOAD5_M1;
803 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
804 Opcode = RISCV::PseudoVRELOAD6_M1;
805 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
806 Opcode = RISCV::PseudoVRELOAD7_M1;
807 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
808 Opcode = RISCV::PseudoVRELOAD8_M1;
846 if (
Ops.size() != 1 ||
Ops[0] != 1)
849 switch (
MI.getOpcode()) {
851 if (RISCVInstrInfo::isSEXT_W(
MI))
853 if (RISCVInstrInfo::isZEXT_W(
MI))
855 if (RISCVInstrInfo::isZEXT_B(
MI))
862 case RISCV::ZEXT_H_RV32:
863 case RISCV::ZEXT_H_RV64:
870 case RISCV::VMV_X_S: {
873 if (ST.getXLen() < (1U << Log2SEW))
888 case RISCV::VFMV_F_S: {
915 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
924 return RISCV::PseudoCCLB;
926 return RISCV::PseudoCCLBU;
928 return RISCV::PseudoCCLH;
930 return RISCV::PseudoCCLHU;
932 return RISCV::PseudoCCLW;
934 return RISCV::PseudoCCLWU;
936 return RISCV::PseudoCCLD;
938 return RISCV::PseudoCCQC_E_LB;
939 case RISCV::QC_E_LBU:
940 return RISCV::PseudoCCQC_E_LBU;
942 return RISCV::PseudoCCQC_E_LH;
943 case RISCV::QC_E_LHU:
944 return RISCV::PseudoCCQC_E_LHU;
946 return RISCV::PseudoCCQC_E_LW;
957 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
962 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
966 if (
Ops.size() != 1 || (
Ops[0] != 4 &&
Ops[0] != 5))
969 bool Invert =
Ops[0] == 5;
973 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
978 MI.getDebugLoc(),
get(PredOpc), DestReg)
979 .
add({
MI.getOperand(1),
MI.getOperand(2)});
1003 bool DstIsDead)
const {
1019 bool SrcRenamable =
false;
1023 bool LastItem = ++Num == Seq.
size();
1028 switch (Inst.getOpndKind()) {
1038 .
addReg(SrcReg, SrcRegState)
1045 .
addReg(SrcReg, SrcRegState)
1046 .
addReg(SrcReg, SrcRegState)
1052 .
addReg(SrcReg, SrcRegState)
1060 SrcRenamable = DstRenamable;
1070 case RISCV::CV_BEQIMM:
1071 case RISCV::QC_BEQI:
1072 case RISCV::QC_E_BEQI:
1073 case RISCV::NDS_BBC:
1074 case RISCV::NDS_BEQC:
1078 case RISCV::QC_BNEI:
1079 case RISCV::QC_E_BNEI:
1080 case RISCV::CV_BNEIMM:
1081 case RISCV::NDS_BBS:
1082 case RISCV::NDS_BNEC:
1085 case RISCV::QC_BLTI:
1086 case RISCV::QC_E_BLTI:
1089 case RISCV::QC_BGEI:
1090 case RISCV::QC_E_BGEI:
1093 case RISCV::QC_BLTUI:
1094 case RISCV::QC_E_BLTUI:
1097 case RISCV::QC_BGEUI:
1098 case RISCV::QC_E_BGEUI:
1130 "Unknown conditional branch");
1141 case RISCV::QC_MVEQ:
1142 return RISCV::QC_MVNE;
1143 case RISCV::QC_MVNE:
1144 return RISCV::QC_MVEQ;
1145 case RISCV::QC_MVLT:
1146 return RISCV::QC_MVGE;
1147 case RISCV::QC_MVGE:
1148 return RISCV::QC_MVLT;
1149 case RISCV::QC_MVLTU:
1150 return RISCV::QC_MVGEU;
1151 case RISCV::QC_MVGEU:
1152 return RISCV::QC_MVLTU;
1153 case RISCV::QC_MVEQI:
1154 return RISCV::QC_MVNEI;
1155 case RISCV::QC_MVNEI:
1156 return RISCV::QC_MVEQI;
1157 case RISCV::QC_MVLTI:
1158 return RISCV::QC_MVGEI;
1159 case RISCV::QC_MVGEI:
1160 return RISCV::QC_MVLTI;
1161 case RISCV::QC_MVLTUI:
1162 return RISCV::QC_MVGEUI;
1163 case RISCV::QC_MVGEUI:
1164 return RISCV::QC_MVLTUI;
1169 switch (SelectOpc) {
1188 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1198 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1203 return RISCV::CV_BEQIMM;
1205 return RISCV::CV_BNEIMM;
1208 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1213 return RISCV::QC_BEQI;
1215 return RISCV::QC_BNEI;
1217 return RISCV::QC_BLTI;
1219 return RISCV::QC_BGEI;
1222 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1227 return RISCV::QC_BLTUI;
1229 return RISCV::QC_BGEUI;
1232 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1237 return RISCV::QC_E_BEQI;
1239 return RISCV::QC_E_BNEI;
1241 return RISCV::QC_E_BLTI;
1243 return RISCV::QC_E_BGEI;
1246 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1251 return RISCV::QC_E_BLTUI;
1253 return RISCV::QC_E_BGEUI;
1256 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1261 return RISCV::NDS_BBC;
1263 return RISCV::NDS_BBS;
1266 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1271 return RISCV::NDS_BEQC;
1273 return RISCV::NDS_BNEC;
1302 bool AllowModify)
const {
1303 TBB = FBB =
nullptr;
1308 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1314 int NumTerminators = 0;
1315 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1318 if (J->getDesc().isUnconditionalBranch() ||
1319 J->getDesc().isIndirectBranch()) {
1326 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1327 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1328 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1331 I = FirstUncondOrIndirectBr;
1335 if (
I->getDesc().isIndirectBranch())
1339 if (
I->isPreISelOpcode())
1343 if (NumTerminators > 2)
1347 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1353 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1359 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1360 I->getDesc().isUnconditionalBranch()) {
1371 int *BytesRemoved)
const {
1378 if (!
I->getDesc().isUnconditionalBranch() &&
1379 !
I->getDesc().isConditionalBranch())
1385 I->eraseFromParent();
1389 if (
I ==
MBB.begin())
1392 if (!
I->getDesc().isConditionalBranch())
1398 I->eraseFromParent();
1411 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1413 "RISC-V branch conditions have two components!");
1447 assert(RS &&
"RegScavenger required for long branching");
1449 "new block should be inserted for expanding unconditional branch");
1452 "restore block should be inserted for restoring clobbered registers");
1461 "Branch offsets outside of the signed 32-bit range not supported");
1466 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1467 auto II =
MBB.end();
1473 RS->enterBasicBlockEnd(
MBB);
1475 if (
STI.hasStdExtZicfilp())
1476 RC = &RISCV::GPRX7RegClass;
1478 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1482 RS->setRegUsed(TmpGPR);
1487 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1489 if (
STI.hasStdExtZicfilp())
1493 if (FrameIndex == -1)
1498 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1501 MI.getOperand(1).setMBB(&RestoreBB);
1505 TRI->eliminateFrameIndex(RestoreBB.
back(),
1509 MRI.replaceRegWith(ScratchReg, TmpGPR);
1510 MRI.clearVirtRegs();
1515 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1520 Cond[0].setImm(RISCV::BNE);
1523 Cond[0].setImm(RISCV::BNEI);
1526 Cond[0].setImm(RISCV::BEQ);
1529 Cond[0].setImm(RISCV::BEQI);
1532 Cond[0].setImm(RISCV::BGE);
1535 Cond[0].setImm(RISCV::BLT);
1538 Cond[0].setImm(RISCV::BGEU);
1541 Cond[0].setImm(RISCV::BLTU);
1543 case RISCV::CV_BEQIMM:
1544 Cond[0].setImm(RISCV::CV_BNEIMM);
1546 case RISCV::CV_BNEIMM:
1547 Cond[0].setImm(RISCV::CV_BEQIMM);
1549 case RISCV::QC_BEQI:
1550 Cond[0].setImm(RISCV::QC_BNEI);
1552 case RISCV::QC_BNEI:
1553 Cond[0].setImm(RISCV::QC_BEQI);
1555 case RISCV::QC_BGEI:
1556 Cond[0].setImm(RISCV::QC_BLTI);
1558 case RISCV::QC_BLTI:
1559 Cond[0].setImm(RISCV::QC_BGEI);
1561 case RISCV::QC_BGEUI:
1562 Cond[0].setImm(RISCV::QC_BLTUI);
1564 case RISCV::QC_BLTUI:
1565 Cond[0].setImm(RISCV::QC_BGEUI);
1567 case RISCV::QC_E_BEQI:
1568 Cond[0].setImm(RISCV::QC_E_BNEI);
1570 case RISCV::QC_E_BNEI:
1571 Cond[0].setImm(RISCV::QC_E_BEQI);
1573 case RISCV::QC_E_BGEI:
1574 Cond[0].setImm(RISCV::QC_E_BLTI);
1576 case RISCV::QC_E_BLTI:
1577 Cond[0].setImm(RISCV::QC_E_BGEI);
1579 case RISCV::QC_E_BGEUI:
1580 Cond[0].setImm(RISCV::QC_E_BLTUI);
1582 case RISCV::QC_E_BLTUI:
1583 Cond[0].setImm(RISCV::QC_E_BGEUI);
1585 case RISCV::NDS_BBC:
1586 Cond[0].setImm(RISCV::NDS_BBS);
1588 case RISCV::NDS_BBS:
1589 Cond[0].setImm(RISCV::NDS_BBC);
1591 case RISCV::NDS_BEQC:
1592 Cond[0].setImm(RISCV::NDS_BNEC);
1594 case RISCV::NDS_BNEC:
1595 Cond[0].setImm(RISCV::NDS_BEQC);
1605 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1606 MI->getOperand(1).getReg() == RISCV::X0) {
1607 Imm =
MI->getOperand(2).getImm();
1612 if (
MI->getOpcode() == RISCV::BSETI &&
MI->getOperand(1).isReg() &&
1613 MI->getOperand(1).getReg() == RISCV::X0 &&
1614 MI->getOperand(2).getImm() == 11) {
1628 if (Reg == RISCV::X0) {
1632 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1636 bool IsSigned =
false;
1637 bool IsEquality =
false;
1638 switch (
MI.getOpcode()) {
1674 MI.eraseFromParent();
1700 auto searchConst = [&](int64_t C1) ->
Register {
1702 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1705 I.getOperand(0).getReg().isVirtual();
1708 return DefC1->getOperand(0).getReg();
1721 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1723 if (
Register RegZ = searchConst(C0 + 1)) {
1730 MRI.clearKillFlags(RegZ);
1731 MI.eraseFromParent();
1742 MRI.hasOneUse(RHS.getReg())) {
1744 if (
Register RegZ = searchConst(C0 - 1)) {
1751 MRI.clearKillFlags(RegZ);
1752 MI.eraseFromParent();
1762 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1764 int NumOp =
MI.getNumExplicitOperands();
1765 return MI.getOperand(NumOp - 1).getMBB();
1769 int64_t BrOffset)
const {
1770 unsigned XLen =
STI.getXLen();
1777 case RISCV::NDS_BBC:
1778 case RISCV::NDS_BBS:
1779 case RISCV::NDS_BEQC:
1780 case RISCV::NDS_BNEC:
1790 case RISCV::CV_BEQIMM:
1791 case RISCV::CV_BNEIMM:
1792 case RISCV::QC_BEQI:
1793 case RISCV::QC_BNEI:
1794 case RISCV::QC_BGEI:
1795 case RISCV::QC_BLTI:
1796 case RISCV::QC_BLTUI:
1797 case RISCV::QC_BGEUI:
1798 case RISCV::QC_E_BEQI:
1799 case RISCV::QC_E_BNEI:
1800 case RISCV::QC_E_BGEI:
1801 case RISCV::QC_E_BLTI:
1802 case RISCV::QC_E_BLTUI:
1803 case RISCV::QC_E_BGEUI:
1806 case RISCV::PseudoBR:
1808 case RISCV::PseudoJump:
1819 case RISCV::ADD:
return RISCV::PseudoCCADD;
1820 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1821 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1822 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1823 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1824 case RISCV::AND:
return RISCV::PseudoCCAND;
1825 case RISCV::OR:
return RISCV::PseudoCCOR;
1826 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1827 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1828 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1829 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1830 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1831 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1832 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1833 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1834 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1836 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1837 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1838 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1839 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1840 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1841 case RISCV::ORI:
return RISCV::PseudoCCORI;
1842 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1844 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1845 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1846 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1847 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1848 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1850 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1851 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1852 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1853 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1855 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1856 case RISCV::ORN:
return RISCV::PseudoCCORN;
1857 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1859 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1860 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1864 return RISCV::INSTRUCTION_LIST_END;
1873 if (!
Reg.isVirtual())
1875 if (!
MRI.hasOneNonDBGUse(
Reg))
1881 if (!STI.hasShortForwardBranchIMinMax() &&
1882 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1883 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1886 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1893 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1894 MI->getOperand(1).getReg() == RISCV::X0)
1899 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1909 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1912 bool DontMoveAcrossStores =
true;
1913 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1921 bool PreferFalse)
const {
1922 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1923 "Unknown select instruction");
1924 if (!
STI.hasShortForwardBranchIALU())
1930 bool Invert = !
DefMI;
1938 Register DestReg =
MI.getOperand(0).getReg();
1940 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1944 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1951 NewMI.
add(
MI.getOperand(1));
1952 NewMI.
add(
MI.getOperand(2));
1961 NewMI.
add(FalseReg);
1976 if (
DefMI->getParent() !=
MI.getParent())
1980 DefMI->eraseFromParent();
1985 if (
MI.isMetaInstruction())
1988 unsigned Opcode =
MI.getOpcode();
1990 if (Opcode == TargetOpcode::INLINEASM ||
1991 Opcode == TargetOpcode::INLINEASM_BR) {
1993 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1998 if (
STI.hasStdExtZca()) {
1999 if (isCompressibleInst(
MI,
STI))
2006 if (Opcode == TargetOpcode::BUNDLE)
2007 return getInstBundleLength(
MI);
2009 if (
MI.getParent() &&
MI.getParent()->getParent()) {
2010 if (isCompressibleInst(
MI,
STI))
2015 case RISCV::PseudoMV_FPR16INX:
2016 case RISCV::PseudoMV_FPR32INX:
2018 return STI.hasStdExtZca() ? 2 : 4;
2019 case TargetOpcode::STACKMAP:
2022 case TargetOpcode::PATCHPOINT:
2025 case TargetOpcode::STATEPOINT: {
2029 return std::max(NumBytes, 8U);
2031 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2032 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2033 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2036 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2037 F.hasFnAttribute(
"patchable-function-entry")) {
2039 if (
F.getFnAttribute(
"patchable-function-entry")
2041 .getAsInteger(10, Num))
2042 return get(Opcode).getSize();
2045 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2049 return STI.is64Bit() ? 68 : 44;
2052 return get(Opcode).getSize();
2056unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
2060 while (++
I != E &&
I->isInsideBundle()) {
2061 assert(!
I->isBundle() &&
"No nested bundle!");
2068 const unsigned Opcode =
MI.getOpcode();
2072 case RISCV::FSGNJ_D:
2073 case RISCV::FSGNJ_S:
2074 case RISCV::FSGNJ_H:
2075 case RISCV::FSGNJ_D_INX:
2076 case RISCV::FSGNJ_D_IN32X:
2077 case RISCV::FSGNJ_S_INX:
2078 case RISCV::FSGNJ_H_INX:
2080 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2081 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2085 return (
MI.getOperand(1).isReg() &&
2086 MI.getOperand(1).getReg() == RISCV::X0) ||
2087 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2089 return MI.isAsCheapAsAMove();
2092std::optional<DestSourcePair>
2096 switch (
MI.getOpcode()) {
2102 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2103 MI.getOperand(2).isReg())
2105 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2106 MI.getOperand(1).isReg())
2111 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2112 MI.getOperand(2).getImm() == 0)
2116 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2117 MI.getOperand(1).isReg())
2121 case RISCV::SH1ADD_UW:
2123 case RISCV::SH2ADD_UW:
2125 case RISCV::SH3ADD_UW:
2126 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2127 MI.getOperand(2).isReg())
2130 case RISCV::FSGNJ_D:
2131 case RISCV::FSGNJ_S:
2132 case RISCV::FSGNJ_H:
2133 case RISCV::FSGNJ_D_INX:
2134 case RISCV::FSGNJ_D_IN32X:
2135 case RISCV::FSGNJ_S_INX:
2136 case RISCV::FSGNJ_H_INX:
2138 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2139 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2143 return std::nullopt;
2151 const auto &SchedModel =
STI.getSchedModel();
2152 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2164 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2168 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2169 RISCV::OpName::frm) < 0;
2171 "New instructions require FRM whereas the old one does not have it");
2178 for (
auto *NewMI : InsInstrs) {
2180 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2181 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2223bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2224 bool Invert)
const {
2225#define OPCODE_LMUL_CASE(OPC) \
2226 case RISCV::OPC##_M1: \
2227 case RISCV::OPC##_M2: \
2228 case RISCV::OPC##_M4: \
2229 case RISCV::OPC##_M8: \
2230 case RISCV::OPC##_MF2: \
2231 case RISCV::OPC##_MF4: \
2232 case RISCV::OPC##_MF8
2234#define OPCODE_LMUL_MASK_CASE(OPC) \
2235 case RISCV::OPC##_M1_MASK: \
2236 case RISCV::OPC##_M2_MASK: \
2237 case RISCV::OPC##_M4_MASK: \
2238 case RISCV::OPC##_M8_MASK: \
2239 case RISCV::OPC##_MF2_MASK: \
2240 case RISCV::OPC##_MF4_MASK: \
2241 case RISCV::OPC##_MF8_MASK
2246 Opcode = *InvOpcode;
2263#undef OPCODE_LMUL_MASK_CASE
2264#undef OPCODE_LMUL_CASE
2267bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2274 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2278 const uint64_t TSFlags =
Desc.TSFlags;
2280 auto checkImmOperand = [&](
unsigned OpIdx) {
2284 auto checkRegOperand = [&](
unsigned OpIdx) {
2292 if (!checkRegOperand(1))
2307 bool SeenMI2 =
false;
2308 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2317 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2318 Register SrcReg = It->getOperand(1).getReg();
2336 if (MI1VReg != SrcReg)
2345 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2385bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2386 bool &Commuted)
const {
2390 "Expect the present of passthrough operand.");
2396 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2397 areRVVInstsReassociable(Inst, *MI2);
2401 return areRVVInstsReassociable(Inst, *MI1) &&
2402 (isVectorAssociativeAndCommutative(*MI1) ||
2403 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2410 if (!isVectorAssociativeAndCommutative(Inst) &&
2411 !isVectorAssociativeAndCommutative(Inst,
true))
2423 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2425 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2437 for (
unsigned I = 0;
I < 5; ++
I)
2443 bool &Commuted)
const {
2444 if (isVectorAssociativeAndCommutative(Inst) ||
2445 isVectorAssociativeAndCommutative(Inst,
true))
2446 return hasReassociableVectorSibling(Inst, Commuted);
2452 unsigned OperandIdx = Commuted ? 2 : 1;
2456 int16_t InstFrmOpIdx =
2457 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2458 int16_t SiblingFrmOpIdx =
2459 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2461 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2466 bool Invert)
const {
2467 if (isVectorAssociativeAndCommutative(Inst, Invert))
2475 Opc = *InverseOpcode;
2520std::optional<unsigned>
2522#define RVV_OPC_LMUL_CASE(OPC, INV) \
2523 case RISCV::OPC##_M1: \
2524 return RISCV::INV##_M1; \
2525 case RISCV::OPC##_M2: \
2526 return RISCV::INV##_M2; \
2527 case RISCV::OPC##_M4: \
2528 return RISCV::INV##_M4; \
2529 case RISCV::OPC##_M8: \
2530 return RISCV::INV##_M8; \
2531 case RISCV::OPC##_MF2: \
2532 return RISCV::INV##_MF2; \
2533 case RISCV::OPC##_MF4: \
2534 return RISCV::INV##_MF4; \
2535 case RISCV::OPC##_MF8: \
2536 return RISCV::INV##_MF8
2538#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2539 case RISCV::OPC##_M1_MASK: \
2540 return RISCV::INV##_M1_MASK; \
2541 case RISCV::OPC##_M2_MASK: \
2542 return RISCV::INV##_M2_MASK; \
2543 case RISCV::OPC##_M4_MASK: \
2544 return RISCV::INV##_M4_MASK; \
2545 case RISCV::OPC##_M8_MASK: \
2546 return RISCV::INV##_M8_MASK; \
2547 case RISCV::OPC##_MF2_MASK: \
2548 return RISCV::INV##_MF2_MASK; \
2549 case RISCV::OPC##_MF4_MASK: \
2550 return RISCV::INV##_MF4_MASK; \
2551 case RISCV::OPC##_MF8_MASK: \
2552 return RISCV::INV##_MF8_MASK
2556 return std::nullopt;
2558 return RISCV::FSUB_H;
2560 return RISCV::FSUB_S;
2562 return RISCV::FSUB_D;
2564 return RISCV::FADD_H;
2566 return RISCV::FADD_S;
2568 return RISCV::FADD_D;
2585#undef RVV_OPC_LMUL_MASK_CASE
2586#undef RVV_OPC_LMUL_CASE
2591 bool DoRegPressureReduce) {
2607 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2618 bool DoRegPressureReduce) {
2625 DoRegPressureReduce)) {
2631 DoRegPressureReduce)) {
2641 bool DoRegPressureReduce) {
2649 unsigned CombineOpc) {
2656 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2659 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2670 unsigned OuterShiftAmt) {
2676 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2703 case RISCV::SH1ADD_UW:
2705 case RISCV::SH2ADD_UW:
2707 case RISCV::SH3ADD_UW:
2753 bool DoRegPressureReduce)
const {
2762 DoRegPressureReduce);
2770 return RISCV::FMADD_H;
2772 return RISCV::FMADD_S;
2774 return RISCV::FMADD_D;
2819 bool Mul1IsKill = Mul1.
isKill();
2820 bool Mul2IsKill = Mul2.
isKill();
2821 bool AddendIsKill = Addend.
isKill();
2830 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2855 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2862 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2865 switch (InnerShiftAmt - OuterShiftAmt) {
2869 InnerOpc = RISCV::ADD;
2872 InnerOpc = RISCV::SH1ADD;
2875 InnerOpc = RISCV::SH2ADD;
2878 InnerOpc = RISCV::SH3ADD;
2886 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2896 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2913 DelInstrs, InstrIdxForVirtReg);
2940 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2942 unsigned OpType = Operand.OperandType;
2948 ErrInfo =
"Expected an immediate operand.";
2951 int64_t Imm = MO.
getImm();
2957#define CASE_OPERAND_UIMM(NUM) \
2958 case RISCVOp::OPERAND_UIMM##NUM: \
2959 Ok = isUInt<NUM>(Imm); \
2961#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX) \
2962 case RISCVOp::OPERAND_UIMM##BITS##_LSB##SUFFIX: { \
2963 constexpr size_t NumZeros = sizeof(#SUFFIX) - 1; \
2964 Ok = isShiftedUInt<BITS - NumZeros, NumZeros>(Imm); \
2967#define CASE_OPERAND_SIMM(NUM) \
2968 case RISCVOp::OPERAND_SIMM##NUM: \
2969 Ok = isInt<NUM>(Imm); \
3003 Ok = Imm >= 1 && Imm <= 32;
3027 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3037 Ok = Imm >= -15 && Imm <= 16;
3065 Ok = Ok && Imm != 0;
3068 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3071 Ok = Imm >= 0 && Imm <= 10;
3074 Ok = Imm >= 0 && Imm <= 7;
3077 Ok = Imm >= 1 && Imm <= 10;
3080 Ok = Imm >= 2 && Imm <= 14;
3089 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3124 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3128 ErrInfo =
"Invalid immediate";
3137 ErrInfo =
"Expected a non-register operand.";
3141 ErrInfo =
"Invalid immediate";
3150 ErrInfo =
"Expected a non-register operand.";
3154 ErrInfo =
"Invalid immediate";
3162 ErrInfo =
"Expected a non-register operand.";
3166 ErrInfo =
"Invalid immediate";
3172 int64_t Imm = MO.
getImm();
3175 ErrInfo =
"Invalid immediate";
3178 }
else if (!MO.
isReg()) {
3179 ErrInfo =
"Expected a register or immediate operand.";
3189 if (!
Op.isImm() && !
Op.isReg()) {
3190 ErrInfo =
"Invalid operand type for VL operand";
3193 if (
Op.isReg() &&
Op.getReg().isValid()) {
3195 auto *RC =
MRI.getRegClass(
Op.getReg());
3196 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3197 ErrInfo =
"Invalid register class for VL operand";
3202 ErrInfo =
"VL operand w/o SEW operand?";
3208 if (!
MI.getOperand(
OpIdx).isImm()) {
3209 ErrInfo =
"SEW value expected to be an immediate";
3214 ErrInfo =
"Unexpected SEW value";
3217 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3219 ErrInfo =
"Unexpected SEW value";
3225 if (!
MI.getOperand(
OpIdx).isImm()) {
3226 ErrInfo =
"Policy operand expected to be an immediate";
3231 ErrInfo =
"Invalid Policy Value";
3235 ErrInfo =
"policy operand w/o VL operand?";
3243 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3244 ErrInfo =
"policy operand w/o tied operand?";
3251 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3252 ErrInfo =
"dynamic rounding mode should read FRM";
3274 case RISCV::LD_RV32:
3284 case RISCV::SD_RV32:
3300 int64_t NewOffset = OldOffset + Disp;
3322 "Addressing mode not supported for folding");
3395 case RISCV::LD_RV32:
3398 case RISCV::SD_RV32:
3405 OffsetIsScalable =
false;
3421 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3429 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3432 auto Base1 = MO1->getValue();
3433 auto Base2 = MO2->getValue();
3434 if (!Base1 || !Base2)
3442 return Base1 == Base2;
3448 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3449 unsigned NumBytes)
const {
3452 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3457 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3463 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3469 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3519 int64_t OffsetA = 0, OffsetB = 0;
3525 int LowOffset = std::min(OffsetA, OffsetB);
3526 int HighOffset = std::max(OffsetA, OffsetB);
3527 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3529 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3536std::pair<unsigned, unsigned>
3539 return std::make_pair(TF & Mask, TF & ~Mask);
3545 static const std::pair<unsigned, const char *> TargetFlags[] = {
3546 {MO_CALL,
"riscv-call"},
3547 {MO_LO,
"riscv-lo"},
3548 {MO_HI,
"riscv-hi"},
3549 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3550 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3551 {MO_GOT_HI,
"riscv-got-hi"},
3552 {MO_TPREL_LO,
"riscv-tprel-lo"},
3553 {MO_TPREL_HI,
"riscv-tprel-hi"},
3554 {MO_TPREL_ADD,
"riscv-tprel-add"},
3555 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3556 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3557 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3558 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3559 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3560 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3568 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3581 unsigned &Flags)
const {
3600 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3601 F.hasFnAttribute(
"patchable-function-entry");
3606 return MI.readsRegister(RegNo,
TRI) ||
3607 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3612 return MI.modifiesRegister(RegNo,
TRI) ||
3613 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3617 if (!
MBB.back().isReturn())
3643 if (
C.back().isReturn() &&
3644 !
C.isAvailableAcrossAndOutOfSeq(TailExpandUseReg, RegInfo)) {
3646 LLVM_DEBUG(
dbgs() <<
"Cannot be outlined between: " <<
C.front() <<
"and "
3648 LLVM_DEBUG(
dbgs() <<
"Because the tail-call register is live across "
3649 "the proposed outlined function call\n");
3655 if (
C.back().isReturn()) {
3657 "The candidate who uses return instruction must be outlined "
3669 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, RegInfo);
3672std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3675 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3676 unsigned MinRepeats)
const {
3684 if (RepeatedSequenceLocs.size() < MinRepeats)
3685 return std::nullopt;
3689 unsigned InstrSizeCExt =
3691 unsigned CallOverhead = 0, FrameOverhead = 0;
3694 unsigned CFICount = 0;
3695 for (
auto &
I : Candidate) {
3696 if (
I.isCFIInstruction())
3707 std::vector<MCCFIInstruction> CFIInstructions =
3708 C.getMF()->getFrameInstructions();
3710 if (CFICount > 0 && CFICount != CFIInstructions.size())
3711 return std::nullopt;
3719 CallOverhead = 4 + InstrSizeCExt;
3726 FrameOverhead = InstrSizeCExt;
3732 return std::nullopt;
3734 for (
auto &
C : RepeatedSequenceLocs)
3735 C.setCallInfo(MOCI, CallOverhead);
3737 unsigned SequenceSize = 0;
3738 for (
auto &
MI : Candidate)
3741 return std::make_unique<outliner::OutlinedFunction>(
3742 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3748 unsigned Flags)
const {
3752 MBB->getParent()->getSubtarget().getRegisterInfo();
3753 const auto &
F =
MI.getMF()->getFunction();
3758 if (
MI.isCFIInstruction())
3766 for (
const auto &MO :
MI.operands()) {
3771 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3772 F.hasSection() ||
F.getSectionPrefix()))
3789 MBB.addLiveIn(RISCV::X5);
3804 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3812 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3823 return std::nullopt;
3827 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3828 MI.getOperand(2).isImm())
3829 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3831 return std::nullopt;
3839 std::string GenericComment =
3841 if (!GenericComment.empty())
3842 return GenericComment;
3846 return std::string();
3848 std::string Comment;
3855 switch (OpInfo.OperandType) {
3858 unsigned Imm =
Op.getImm();
3863 unsigned Imm =
Op.getImm();
3868 unsigned Imm =
Op.getImm();
3874 unsigned Log2SEW =
Op.getImm();
3875 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3881 unsigned Policy =
Op.getImm();
3883 "Invalid Policy Value");
3889 if (
Op.isImm() &&
Op.getImm() == -1)
3911#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3912 RISCV::Pseudo##OP##_##LMUL
3914#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3915 RISCV::Pseudo##OP##_##LMUL##_MASK
3917#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3918 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3919 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3921#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3922 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3923 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3924 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3925 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3926 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3927 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3929#define CASE_RVV_OPCODE_UNMASK(OP) \
3930 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3931 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3933#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3934 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3935 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3936 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3937 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3938 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3939 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3941#define CASE_RVV_OPCODE_MASK(OP) \
3942 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3943 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3945#define CASE_RVV_OPCODE_WIDEN(OP) \
3946 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3947 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3949#define CASE_RVV_OPCODE(OP) \
3950 CASE_RVV_OPCODE_UNMASK(OP): \
3951 case CASE_RVV_OPCODE_MASK(OP)
3955#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3956 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3958#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3959 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3960 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3961 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3962 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3963 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3964 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3965 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3968#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3969 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3971#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3972 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3973 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3974 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3975 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3977#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3978 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3979 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3981#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3982 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3983 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3985#define CASE_VFMA_OPCODE_VV(OP) \
3986 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3987 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
3988 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3989 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3991#define CASE_VFMA_SPLATS(OP) \
3992 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3993 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
3994 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3995 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3999 unsigned &SrcOpIdx1,
4000 unsigned &SrcOpIdx2)
const {
4002 if (!
Desc.isCommutable())
4005 switch (
MI.getOpcode()) {
4006 case RISCV::TH_MVEQZ:
4007 case RISCV::TH_MVNEZ:
4011 if (
MI.getOperand(2).getReg() == RISCV::X0)
4014 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4015 case RISCV::QC_SELECTIEQ:
4016 case RISCV::QC_SELECTINE:
4017 case RISCV::QC_SELECTIIEQ:
4018 case RISCV::QC_SELECTIINE:
4019 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
4020 case RISCV::QC_MVEQ:
4021 case RISCV::QC_MVNE:
4022 case RISCV::QC_MVLT:
4023 case RISCV::QC_MVGE:
4024 case RISCV::QC_MVLTU:
4025 case RISCV::QC_MVGEU:
4026 case RISCV::QC_MVEQI:
4027 case RISCV::QC_MVNEI:
4028 case RISCV::QC_MVLTI:
4029 case RISCV::QC_MVGEI:
4030 case RISCV::QC_MVLTUI:
4031 case RISCV::QC_MVGEUI:
4032 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4033 case RISCV::TH_MULA:
4034 case RISCV::TH_MULAW:
4035 case RISCV::TH_MULAH:
4036 case RISCV::TH_MULS:
4037 case RISCV::TH_MULSW:
4038 case RISCV::TH_MULSH:
4040 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4041 case RISCV::PseudoCCMOVGPRNoX0:
4042 case RISCV::PseudoCCMOVGPR:
4044 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
4071 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4098 unsigned CommutableOpIdx1 = 1;
4099 unsigned CommutableOpIdx2 = 3;
4100 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4121 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4123 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4127 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4128 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4134 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4135 SrcOpIdx2 == CommuteAnyOperandIndex) {
4138 unsigned CommutableOpIdx1 = SrcOpIdx1;
4139 if (SrcOpIdx1 == SrcOpIdx2) {
4142 CommutableOpIdx1 = 1;
4143 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4145 CommutableOpIdx1 = SrcOpIdx2;
4150 unsigned CommutableOpIdx2;
4151 if (CommutableOpIdx1 != 1) {
4153 CommutableOpIdx2 = 1;
4155 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4160 if (Op1Reg !=
MI.getOperand(2).getReg())
4161 CommutableOpIdx2 = 2;
4163 CommutableOpIdx2 = 3;
4168 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4181#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4182 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4183 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4186#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4187 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4188 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4189 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4190 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4191 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4192 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4193 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4196#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4197 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4198 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4201#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4202 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4203 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4204 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4205 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4207#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4208 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4209 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4211#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4212 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4213 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4215#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4216 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4217 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4218 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4219 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4221#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4222 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4223 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4224 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4225 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4231 unsigned OpIdx2)
const {
4234 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4238 switch (
MI.getOpcode()) {
4239 case RISCV::TH_MVEQZ:
4240 case RISCV::TH_MVNEZ: {
4241 auto &WorkingMI = cloneIfNew(
MI);
4242 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4243 : RISCV::TH_MVEQZ));
4247 case RISCV::QC_SELECTIEQ:
4248 case RISCV::QC_SELECTINE:
4249 case RISCV::QC_SELECTIIEQ:
4250 case RISCV::QC_SELECTIINE:
4252 case RISCV::QC_MVEQ:
4253 case RISCV::QC_MVNE:
4254 case RISCV::QC_MVLT:
4255 case RISCV::QC_MVGE:
4256 case RISCV::QC_MVLTU:
4257 case RISCV::QC_MVGEU:
4258 case RISCV::QC_MVEQI:
4259 case RISCV::QC_MVNEI:
4260 case RISCV::QC_MVLTI:
4261 case RISCV::QC_MVGEI:
4262 case RISCV::QC_MVLTUI:
4263 case RISCV::QC_MVGEUI: {
4264 auto &WorkingMI = cloneIfNew(
MI);
4269 case RISCV::PseudoCCMOVGPRNoX0:
4270 case RISCV::PseudoCCMOVGPR: {
4274 auto &WorkingMI = cloneIfNew(
MI);
4275 WorkingMI.getOperand(3).setImm(CC);
4299 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4300 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4302 switch (
MI.getOpcode()) {
4325 auto &WorkingMI = cloneIfNew(
MI);
4326 WorkingMI.setDesc(
get(
Opc));
4336 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4339 if (OpIdx1 == 3 || OpIdx2 == 3) {
4341 switch (
MI.getOpcode()) {
4352 auto &WorkingMI = cloneIfNew(
MI);
4353 WorkingMI.setDesc(
get(
Opc));
4365#undef CASE_VMA_CHANGE_OPCODE_COMMON
4366#undef CASE_VMA_CHANGE_OPCODE_LMULS
4367#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4368#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4369#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4370#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4371#undef CASE_VFMA_CHANGE_OPCODE_VV
4372#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4374#undef CASE_RVV_OPCODE_UNMASK_LMUL
4375#undef CASE_RVV_OPCODE_MASK_LMUL
4376#undef CASE_RVV_OPCODE_LMUL
4377#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4378#undef CASE_RVV_OPCODE_UNMASK
4379#undef CASE_RVV_OPCODE_MASK_WIDEN
4380#undef CASE_RVV_OPCODE_MASK
4381#undef CASE_RVV_OPCODE_WIDEN
4382#undef CASE_RVV_OPCODE
4384#undef CASE_VMA_OPCODE_COMMON
4385#undef CASE_VMA_OPCODE_LMULS
4386#undef CASE_VFMA_OPCODE_COMMON
4387#undef CASE_VFMA_OPCODE_LMULS_M1
4388#undef CASE_VFMA_OPCODE_LMULS_MF2
4389#undef CASE_VFMA_OPCODE_LMULS_MF4
4390#undef CASE_VFMA_OPCODE_VV
4391#undef CASE_VFMA_SPLATS
4394 switch (
MI.getOpcode()) {
4402 if (
MI.getOperand(1).getReg() == RISCV::X0)
4403 commuteInstruction(
MI);
4405 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4406 MI.getOperand(2).ChangeToImmediate(0);
4407 MI.setDesc(
get(RISCV::ADDI));
4411 if (
MI.getOpcode() == RISCV::XOR &&
4412 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4413 MI.getOperand(1).setReg(RISCV::X0);
4414 MI.getOperand(2).ChangeToImmediate(0);
4415 MI.setDesc(
get(RISCV::ADDI));
4422 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4423 MI.setDesc(
get(RISCV::ADDI));
4429 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4430 MI.getOperand(2).ChangeToImmediate(0);
4431 MI.setDesc(
get(RISCV::ADDI));
4437 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4438 MI.getOperand(2).ChangeToImmediate(0);
4439 MI.setDesc(
get(RISCV::ADDIW));
4446 if (
MI.getOperand(1).getReg() == RISCV::X0)
4447 commuteInstruction(
MI);
4449 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4450 MI.getOperand(2).ChangeToImmediate(0);
4451 MI.setDesc(
get(RISCV::ADDIW));
4456 case RISCV::SH1ADD_UW:
4458 case RISCV::SH2ADD_UW:
4460 case RISCV::SH3ADD_UW:
4462 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4463 MI.removeOperand(1);
4465 MI.setDesc(
get(RISCV::ADDI));
4469 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4470 MI.removeOperand(2);
4471 unsigned Opc =
MI.getOpcode();
4472 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4473 Opc == RISCV::SH3ADD_UW) {
4475 MI.setDesc(
get(RISCV::SLLI_UW));
4479 MI.setDesc(
get(RISCV::SLLI));
4493 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4494 MI.getOperand(2).getReg() == RISCV::X0) {
4495 MI.getOperand(1).setReg(RISCV::X0);
4496 MI.getOperand(2).ChangeToImmediate(0);
4497 MI.setDesc(
get(RISCV::ADDI));
4503 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4504 MI.getOperand(2).setImm(0);
4505 MI.setDesc(
get(RISCV::ADDI));
4513 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4514 MI.getOperand(2).ChangeToImmediate(0);
4515 MI.setDesc(
get(RISCV::ADDI));
4519 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4520 MI.getOperand(2).ChangeToImmediate(0);
4521 MI.setDesc(
get(RISCV::ADDI));
4529 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4530 MI.getOperand(2).ChangeToImmediate(0);
4531 MI.setDesc(
get(RISCV::ADDI));
4541 case RISCV::SLLI_UW:
4543 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4544 MI.getOperand(2).setImm(0);
4545 MI.setDesc(
get(RISCV::ADDI));
4553 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4554 MI.getOperand(2).getReg() == RISCV::X0) {
4555 MI.getOperand(2).ChangeToImmediate(0);
4556 MI.setDesc(
get(RISCV::ADDI));
4560 if (
MI.getOpcode() == RISCV::ADD_UW &&
4561 MI.getOperand(1).getReg() == RISCV::X0) {
4562 MI.removeOperand(1);
4564 MI.setDesc(
get(RISCV::ADDI));
4570 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4571 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4572 MI.setDesc(
get(RISCV::ADDI));
4578 case RISCV::ZEXT_H_RV32:
4579 case RISCV::ZEXT_H_RV64:
4582 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4584 MI.setDesc(
get(RISCV::ADDI));
4593 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4594 MI.getOperand(2).ChangeToImmediate(0);
4595 MI.setDesc(
get(RISCV::ADDI));
4602 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4604 MI.removeOperand(0);
4605 MI.insert(
MI.operands_begin() + 1, {MO0});
4610 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4612 MI.removeOperand(0);
4613 MI.insert(
MI.operands_begin() + 1, {MO0});
4614 MI.setDesc(
get(RISCV::BNE));
4619 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4621 MI.removeOperand(0);
4622 MI.insert(
MI.operands_begin() + 1, {MO0});
4623 MI.setDesc(
get(RISCV::BEQ));
4631#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4632 RISCV::PseudoV##OP##_##LMUL##_TIED
4634#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4635 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4636 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4637 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4638 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4639 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4640 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4642#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4643 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4644 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4647#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4648 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4649 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4650 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4651 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4652 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4653 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4656#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4657 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4659#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4660 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4661 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4662 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4663 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4664 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4665 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4666 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4667 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4668 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4670#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4671 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4672 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4675#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4676 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4677 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4678 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4679 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4680 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4681 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4682 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4683 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4684 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4686#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4687 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4688 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4689 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4690 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4691 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4693#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4694 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4695 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4696 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4697 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4698 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4705 switch (
MI.getOpcode()) {
4713 MI.getNumExplicitOperands() == 7 &&
4714 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4721 switch (
MI.getOpcode()) {
4733 .
add(
MI.getOperand(0))
4735 .
add(
MI.getOperand(1))
4736 .
add(
MI.getOperand(2))
4737 .
add(
MI.getOperand(3))
4738 .
add(
MI.getOperand(4))
4739 .
add(
MI.getOperand(5))
4740 .
add(
MI.getOperand(6));
4749 MI.getNumExplicitOperands() == 6);
4756 switch (
MI.getOpcode()) {
4768 .
add(
MI.getOperand(0))
4770 .
add(
MI.getOperand(1))
4771 .
add(
MI.getOperand(2))
4772 .
add(
MI.getOperand(3))
4773 .
add(
MI.getOperand(4))
4774 .
add(
MI.getOperand(5));
4781 unsigned NumOps =
MI.getNumOperands();
4784 if (
Op.isReg() &&
Op.isKill())
4792 if (
MI.getOperand(0).isEarlyClobber()) {
4806#undef CASE_WIDEOP_OPCODE_COMMON
4807#undef CASE_WIDEOP_OPCODE_LMULS
4808#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4809#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4810#undef CASE_FP_WIDEOP_OPCODE_COMMON
4811#undef CASE_FP_WIDEOP_OPCODE_LMULS
4812#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4813#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4822 if (ShiftAmount == 0)
4828 }
else if (
int ShXAmount, ShiftAmount;
4830 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4833 switch (ShXAmount) {
4835 Opc = RISCV::SH1ADD;
4838 Opc = RISCV::SH2ADD;
4841 Opc = RISCV::SH3ADD;
4856 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4867 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4877 }
else if (
STI.hasStdExtZmmul()) {
4878 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4887 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4888 if (Amount & (1U << ShiftAmount)) {
4892 .
addImm(ShiftAmount - PrevShiftAmount)
4894 if (Amount >> (ShiftAmount + 1)) {
4897 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4908 PrevShiftAmount = ShiftAmount;
4911 assert(Acc &&
"Expected valid accumulator");
4921 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4929 ?
STI.getTailDupAggressiveThreshold()
4936 unsigned Opcode =
MI.getOpcode();
4937 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4946 return MI.isCopy() &&
MI.getOperand(0).getReg().isPhysical() &&
4948 TRI->getMinimalPhysRegClass(
MI.getOperand(0).getReg()));
4951std::optional<std::pair<unsigned, unsigned>>
4955 return std::nullopt;
4956 case RISCV::PseudoVSPILL2_M1:
4957 case RISCV::PseudoVRELOAD2_M1:
4958 return std::make_pair(2u, 1u);
4959 case RISCV::PseudoVSPILL2_M2:
4960 case RISCV::PseudoVRELOAD2_M2:
4961 return std::make_pair(2u, 2u);
4962 case RISCV::PseudoVSPILL2_M4:
4963 case RISCV::PseudoVRELOAD2_M4:
4964 return std::make_pair(2u, 4u);
4965 case RISCV::PseudoVSPILL3_M1:
4966 case RISCV::PseudoVRELOAD3_M1:
4967 return std::make_pair(3u, 1u);
4968 case RISCV::PseudoVSPILL3_M2:
4969 case RISCV::PseudoVRELOAD3_M2:
4970 return std::make_pair(3u, 2u);
4971 case RISCV::PseudoVSPILL4_M1:
4972 case RISCV::PseudoVRELOAD4_M1:
4973 return std::make_pair(4u, 1u);
4974 case RISCV::PseudoVSPILL4_M2:
4975 case RISCV::PseudoVRELOAD4_M2:
4976 return std::make_pair(4u, 2u);
4977 case RISCV::PseudoVSPILL5_M1:
4978 case RISCV::PseudoVRELOAD5_M1:
4979 return std::make_pair(5u, 1u);
4980 case RISCV::PseudoVSPILL6_M1:
4981 case RISCV::PseudoVRELOAD6_M1:
4982 return std::make_pair(6u, 1u);
4983 case RISCV::PseudoVSPILL7_M1:
4984 case RISCV::PseudoVRELOAD7_M1:
4985 return std::make_pair(7u, 1u);
4986 case RISCV::PseudoVSPILL8_M1:
4987 case RISCV::PseudoVRELOAD8_M1:
4988 return std::make_pair(8u, 1u);
4993 int16_t MI1FrmOpIdx =
4994 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4995 int16_t MI2FrmOpIdx =
4996 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4997 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
5004std::optional<unsigned>
5008 return std::nullopt;
5011 case RISCV::VSLL_VX:
5012 case RISCV::VSRL_VX:
5013 case RISCV::VSRA_VX:
5015 case RISCV::VSSRL_VX:
5016 case RISCV::VSSRA_VX:
5018 case RISCV::VROL_VX:
5019 case RISCV::VROR_VX:
5024 case RISCV::VNSRL_WX:
5025 case RISCV::VNSRA_WX:
5027 case RISCV::VNCLIPU_WX:
5028 case RISCV::VNCLIP_WX:
5030 case RISCV::VWSLL_VX:
5035 case RISCV::VADD_VX:
5036 case RISCV::VSUB_VX:
5037 case RISCV::VRSUB_VX:
5039 case RISCV::VWADDU_VX:
5040 case RISCV::VWSUBU_VX:
5041 case RISCV::VWADD_VX:
5042 case RISCV::VWSUB_VX:
5043 case RISCV::VWADDU_WX:
5044 case RISCV::VWSUBU_WX:
5045 case RISCV::VWADD_WX:
5046 case RISCV::VWSUB_WX:
5048 case RISCV::VADC_VXM:
5049 case RISCV::VADC_VIM:
5050 case RISCV::VMADC_VXM:
5051 case RISCV::VMADC_VIM:
5052 case RISCV::VMADC_VX:
5053 case RISCV::VSBC_VXM:
5054 case RISCV::VMSBC_VXM:
5055 case RISCV::VMSBC_VX:
5057 case RISCV::VAND_VX:
5059 case RISCV::VXOR_VX:
5061 case RISCV::VMSEQ_VX:
5062 case RISCV::VMSNE_VX:
5063 case RISCV::VMSLTU_VX:
5064 case RISCV::VMSLT_VX:
5065 case RISCV::VMSLEU_VX:
5066 case RISCV::VMSLE_VX:
5067 case RISCV::VMSGTU_VX:
5068 case RISCV::VMSGT_VX:
5070 case RISCV::VMINU_VX:
5071 case RISCV::VMIN_VX:
5072 case RISCV::VMAXU_VX:
5073 case RISCV::VMAX_VX:
5075 case RISCV::VMUL_VX:
5076 case RISCV::VMULH_VX:
5077 case RISCV::VMULHU_VX:
5078 case RISCV::VMULHSU_VX:
5080 case RISCV::VDIVU_VX:
5081 case RISCV::VDIV_VX:
5082 case RISCV::VREMU_VX:
5083 case RISCV::VREM_VX:
5085 case RISCV::VWMUL_VX:
5086 case RISCV::VWMULU_VX:
5087 case RISCV::VWMULSU_VX:
5089 case RISCV::VMACC_VX:
5090 case RISCV::VNMSAC_VX:
5091 case RISCV::VMADD_VX:
5092 case RISCV::VNMSUB_VX:
5094 case RISCV::VWMACCU_VX:
5095 case RISCV::VWMACC_VX:
5096 case RISCV::VWMACCSU_VX:
5097 case RISCV::VWMACCUS_VX:
5099 case RISCV::VMERGE_VXM:
5101 case RISCV::VMV_V_X:
5103 case RISCV::VSADDU_VX:
5104 case RISCV::VSADD_VX:
5105 case RISCV::VSSUBU_VX:
5106 case RISCV::VSSUB_VX:
5108 case RISCV::VAADDU_VX:
5109 case RISCV::VAADD_VX:
5110 case RISCV::VASUBU_VX:
5111 case RISCV::VASUB_VX:
5113 case RISCV::VSMUL_VX:
5115 case RISCV::VMV_S_X:
5117 case RISCV::VANDN_VX:
5118 return 1U << Log2SEW;
5124 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5127 return RVV->BaseInstr;
5137 unsigned Scaled = Log2SEW + (DestEEW - 1);
5151 return std::nullopt;
5156 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5157 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5158 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5159 LHS.getReg() == RHS.getReg())
5163 if (LHS.isImm() && LHS.getImm() == 0)
5169 if (!LHSImm || !RHSImm)
5171 return LHSImm <= RHSImm;
5183 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5185 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5195 std::optional<bool> createTripCountGreaterCondition(
5196 int TC, MachineBasicBlock &
MBB,
5197 SmallVectorImpl<MachineOperand> &CondParam)
override {
5205 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5207 void adjustTripCount(
int TripCountAdjust)
override {}
5211std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5219 if (
TBB == LoopBB && FBB == LoopBB)
5226 assert((
TBB == LoopBB || FBB == LoopBB) &&
5227 "The Loop must be a single-basic-block loop");
5238 if (!Reg.isVirtual())
5240 return MRI.getVRegDef(Reg);
5245 if (LHS && LHS->isPHI())
5247 if (RHS && RHS->isPHI())
5250 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5256 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5273 case RISCV::FDIV_H_INX:
5274 case RISCV::FDIV_S_INX:
5275 case RISCV::FDIV_D_INX:
5276 case RISCV::FDIV_D_IN32X:
5277 case RISCV::FSQRT_H:
5278 case RISCV::FSQRT_S:
5279 case RISCV::FSQRT_D:
5280 case RISCV::FSQRT_H_INX:
5281 case RISCV::FSQRT_S_INX:
5282 case RISCV::FSQRT_D_INX:
5283 case RISCV::FSQRT_D_IN32X:
5285 case RISCV::VDIV_VV:
5286 case RISCV::VDIV_VX:
5287 case RISCV::VDIVU_VV:
5288 case RISCV::VDIVU_VX:
5289 case RISCV::VREM_VV:
5290 case RISCV::VREM_VX:
5291 case RISCV::VREMU_VV:
5292 case RISCV::VREMU_VX:
5294 case RISCV::VFDIV_VV:
5295 case RISCV::VFDIV_VF:
5296 case RISCV::VFRDIV_VF:
5297 case RISCV::VFSQRT_V:
5298 case RISCV::VFRSQRT7_V:
5304 if (
MI->getOpcode() != TargetOpcode::COPY)
5309 Register DstReg =
MI->getOperand(0).getReg();
5311 ?
MRI.getRegClass(DstReg)
5312 :
TRI->getMinimalPhysRegClass(DstReg);
5322 auto [RCLMul, RCFractional] =
5324 return (!RCFractional && LMul == RCLMul) || (RCFractional && LMul == 1);
5328 if (
MI.memoperands_empty())
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
#define CASE_OPERAND_UIMM_LSB_ZEROS(BITS, SUFFIX)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addUse(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool isVRegCopy(const MachineInstr *MI, unsigned LMul=0) const
Return true if MI is a COPY to a vector register of a specific LMul, or any kind of vector registers ...
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool analyzeCandidate(outliner::Candidate &C) const
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
bool requiresNTLHint(const MachineInstr &MI) const
Return true if the instruction requires an NTL hint to be emitted.
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static int getVXRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
static bool isValidRoundingMode(unsigned Mode)
static StringRef roundingModeToString(RoundingMode RndMode)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
bool isVectorCopy(const TargetRegisterInfo *TRI, const MachineInstr &MI)
Return true if MI is a copy that will be lowered to one or more vmvNr.vs.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
constexpr RegState getKillRegState(bool B)
static const MachineMemOperand::Flags MONontemporalBit0
constexpr RegState getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
constexpr RegState getRenamableRegState(bool B)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
constexpr RegState getDefRegState(bool B)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
This struct is a compact representation of a valid (non-zero power of two) alignment.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.