41#define GEN_CHECK_COMPRESS_INSTR
42#include "RISCVGenCompressInstEmitter.inc"
44#define GET_INSTRINFO_CTOR_DTOR
45#define GET_INSTRINFO_NAMED_OPS
46#include "RISCVGenInstrInfo.inc"
48#define DEBUG_TYPE "riscv-instr-info"
50 "Number of registers within vector register groups spilled");
52 "Number of registers within vector register groups reloaded");
56 cl::desc(
"Prefer whole register move for vector registers."));
59 "riscv-force-machine-combiner-strategy",
cl::Hidden,
60 cl::desc(
"Force machine combiner to use a specific strategy for machine "
61 "trace metrics evaluation."),
66 "MinInstrCount strategy.")));
72#define GET_RISCVVPseudosTable_IMPL
73#include "RISCVGenSearchableTables.inc"
79#define GET_RISCVMaskedPseudosTable_IMPL
80#include "RISCVGenSearchableTables.inc"
86 RISCV::ADJCALLSTACKUP),
89#define GET_INSTRINFO_HELPERS
90#include "RISCVGenInstrInfo.inc"
93 if (
STI.hasStdExtZca())
102 int &FrameIndex)
const {
112 case RISCV::VL1RE8_V:
113 case RISCV::VL1RE16_V:
114 case RISCV::VL1RE32_V:
115 case RISCV::VL1RE64_V:
118 case RISCV::VL2RE8_V:
119 case RISCV::VL2RE16_V:
120 case RISCV::VL2RE32_V:
121 case RISCV::VL2RE64_V:
124 case RISCV::VL4RE8_V:
125 case RISCV::VL4RE16_V:
126 case RISCV::VL4RE32_V:
127 case RISCV::VL4RE64_V:
130 case RISCV::VL8RE8_V:
131 case RISCV::VL8RE16_V:
132 case RISCV::VL8RE32_V:
133 case RISCV::VL8RE64_V:
141 switch (
MI.getOpcode()) {
165 case RISCV::VL1RE8_V:
166 case RISCV::VL2RE8_V:
167 case RISCV::VL4RE8_V:
168 case RISCV::VL8RE8_V:
169 if (!
MI.getOperand(1).isFI())
171 FrameIndex =
MI.getOperand(1).getIndex();
174 return MI.getOperand(0).getReg();
177 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
178 MI.getOperand(2).getImm() == 0) {
179 FrameIndex =
MI.getOperand(1).getIndex();
180 return MI.getOperand(0).getReg();
187 int &FrameIndex)
const {
195 switch (
MI.getOpcode()) {
220 if (!
MI.getOperand(1).isFI())
222 FrameIndex =
MI.getOperand(1).getIndex();
225 return MI.getOperand(0).getReg();
228 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
229 MI.getOperand(2).getImm() == 0) {
230 FrameIndex =
MI.getOperand(1).getIndex();
231 return MI.getOperand(0).getReg();
241 case RISCV::VFMV_V_F:
244 case RISCV::VFMV_S_F:
246 return MI.getOperand(1).isUndef();
254 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
265 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
266 "Unexpected COPY instruction.");
270 bool FoundDef =
false;
271 bool FirstVSetVLI =
false;
272 unsigned FirstSEW = 0;
275 if (
MBBI->isMetaInstruction())
278 if (RISCVInstrInfo::isVectorConfigInstr(*
MBBI)) {
288 unsigned FirstVType =
MBBI->getOperand(2).getImm();
293 if (FirstLMul != LMul)
298 if (!RISCVInstrInfo::isVLPreservingConfig(*
MBBI))
304 unsigned VType =
MBBI->getOperand(2).getImm();
322 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
324 }
else if (
MBBI->getNumDefs()) {
327 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
333 if (!MO.isReg() || !MO.isDef())
335 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
350 if (MO.getReg() != SrcReg)
391 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
392 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
394 assert(!Fractional &&
"It is impossible be fractional lmul here.");
395 unsigned NumRegs = NF * LMulVal;
401 SrcEncoding += NumRegs - 1;
402 DstEncoding += NumRegs - 1;
408 unsigned,
unsigned> {
416 uint16_t Diff = DstEncoding - SrcEncoding;
417 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
418 DstEncoding % 8 == 7)
420 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
421 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
422 DstEncoding % 4 == 3)
424 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
425 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
426 DstEncoding % 2 == 1)
428 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
431 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
436 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
438 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
439 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
441 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
442 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
444 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
447 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
450 while (
I != NumRegs) {
455 auto [LMulCopied, RegClass,
Opc, VVOpc, VIOpc] =
456 GetCopyInfo(SrcEncoding, DstEncoding);
460 if (LMul == LMulCopied &&
463 if (DefMBBI->getOpcode() == VIOpc)
470 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
472 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
480 MIB = MIB.add(DefMBBI->getOperand(2));
488 MIB.addImm(Log2SEW ? Log2SEW : 3);
500 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
501 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
510 bool RenamableDest,
bool RenamableSrc)
const {
514 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
521 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
527 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
533 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
534 if (
STI.isRV32() &&
STI.hasStdExtZdinx()) {
543 MCRegister EvenReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even);
544 MCRegister OddReg =
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd);
546 if (OddReg == RISCV::DUMMY_REG_PAIR_WITH_X0)
548 assert(DstReg != RISCV::X0_Pair &&
"Cannot write to X0_Pair");
552 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
553 .
addReg(EvenReg, KillFlag)
556 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
563 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
564 RISCV::GPRRegClass.
contains(DstReg)) {
566 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
571 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
573 if (
STI.hasStdExtZfh()) {
574 Opc = RISCV::FSGNJ_H;
577 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
578 "Unexpected extensions");
580 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
581 &RISCV::FPR32RegClass);
582 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
583 &RISCV::FPR32RegClass);
584 Opc = RISCV::FSGNJ_S;
588 .
addReg(SrcReg, KillFlag);
592 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
595 .
addReg(SrcReg, KillFlag);
599 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
602 .
addReg(SrcReg, KillFlag);
606 if (RISCV::FPR32RegClass.
contains(DstReg) &&
607 RISCV::GPRRegClass.
contains(SrcReg)) {
609 .
addReg(SrcReg, KillFlag);
613 if (RISCV::GPRRegClass.
contains(DstReg) &&
614 RISCV::FPR32RegClass.
contains(SrcReg)) {
616 .
addReg(SrcReg, KillFlag);
620 if (RISCV::FPR64RegClass.
contains(DstReg) &&
621 RISCV::GPRRegClass.
contains(SrcReg)) {
622 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
624 .
addReg(SrcReg, KillFlag);
628 if (RISCV::GPRRegClass.
contains(DstReg) &&
629 RISCV::FPR64RegClass.
contains(SrcReg)) {
630 assert(
STI.getXLen() == 64 &&
"Unexpected GPR size");
632 .
addReg(SrcReg, KillFlag);
638 TRI->getCommonMinimalPhysRegClass(SrcReg, DstReg);
649 Register SrcReg,
bool IsKill,
int FI,
657 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
658 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::SW
660 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
661 Opcode = RISCV::SH_INX;
662 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
663 Opcode = RISCV::SW_INX;
664 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
665 Opcode = RISCV::PseudoRV32ZdinxSD;
666 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
668 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
670 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
672 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
673 Opcode = RISCV::VS1R_V;
674 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
675 Opcode = RISCV::VS2R_V;
676 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
677 Opcode = RISCV::VS4R_V;
678 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
679 Opcode = RISCV::VS8R_V;
680 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
681 Opcode = RISCV::PseudoVSPILL2_M1;
682 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
683 Opcode = RISCV::PseudoVSPILL2_M2;
684 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
685 Opcode = RISCV::PseudoVSPILL2_M4;
686 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
687 Opcode = RISCV::PseudoVSPILL3_M1;
688 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
689 Opcode = RISCV::PseudoVSPILL3_M2;
690 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
691 Opcode = RISCV::PseudoVSPILL4_M1;
692 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
693 Opcode = RISCV::PseudoVSPILL4_M2;
694 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
695 Opcode = RISCV::PseudoVSPILL5_M1;
696 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
697 Opcode = RISCV::PseudoVSPILL6_M1;
698 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
699 Opcode = RISCV::PseudoVSPILL7_M1;
700 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
701 Opcode = RISCV::PseudoVSPILL8_M1;
743 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
744 Opcode = RegInfo.getRegSizeInBits(RISCV::GPRRegClass) == 32 ? RISCV::LW
746 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
747 Opcode = RISCV::LH_INX;
748 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
749 Opcode = RISCV::LW_INX;
750 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
751 Opcode = RISCV::PseudoRV32ZdinxLD;
752 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
754 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
756 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
758 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
759 Opcode = RISCV::VL1RE8_V;
760 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
761 Opcode = RISCV::VL2RE8_V;
762 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
763 Opcode = RISCV::VL4RE8_V;
764 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
765 Opcode = RISCV::VL8RE8_V;
766 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
767 Opcode = RISCV::PseudoVRELOAD2_M1;
768 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
769 Opcode = RISCV::PseudoVRELOAD2_M2;
770 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
771 Opcode = RISCV::PseudoVRELOAD2_M4;
772 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
773 Opcode = RISCV::PseudoVRELOAD3_M1;
774 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
775 Opcode = RISCV::PseudoVRELOAD3_M2;
776 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
777 Opcode = RISCV::PseudoVRELOAD4_M1;
778 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
779 Opcode = RISCV::PseudoVRELOAD4_M2;
780 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
781 Opcode = RISCV::PseudoVRELOAD5_M1;
782 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
783 Opcode = RISCV::PseudoVRELOAD6_M1;
784 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
785 Opcode = RISCV::PseudoVRELOAD7_M1;
786 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
787 Opcode = RISCV::PseudoVRELOAD8_M1;
825 if (
Ops.size() != 1 ||
Ops[0] != 1)
828 switch (
MI.getOpcode()) {
830 if (RISCVInstrInfo::isSEXT_W(
MI))
832 if (RISCVInstrInfo::isZEXT_W(
MI))
834 if (RISCVInstrInfo::isZEXT_B(
MI))
841 case RISCV::ZEXT_H_RV32:
842 case RISCV::ZEXT_H_RV64:
849 case RISCV::VMV_X_S: {
852 if (ST.getXLen() < (1U << Log2SEW))
867 case RISCV::VFMV_F_S: {
894 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(*LoadOpc),
903 return RISCV::PseudoCCLB;
905 return RISCV::PseudoCCLBU;
907 return RISCV::PseudoCCLH;
909 return RISCV::PseudoCCLHU;
911 return RISCV::PseudoCCLW;
913 return RISCV::PseudoCCLWU;
915 return RISCV::PseudoCCLD;
926 if (
MI.getOpcode() != RISCV::PseudoCCMOVGPR)
931 if (!
STI.hasShortForwardBranchILoad() || !PredOpc)
935 if (
Ops.size() != 1 || (
Ops[0] != 4 &&
Ops[0] != 5))
938 bool Invert =
Ops[0] == 5;
942 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
947 MI.getDebugLoc(),
get(PredOpc), DestReg)
948 .
add({
MI.getOperand(1),
MI.getOperand(2)});
972 bool DstIsDead)
const {
988 bool SrcRenamable =
false;
992 bool LastItem = ++Num == Seq.
size();
997 switch (Inst.getOpndKind()) {
1007 .
addReg(SrcReg, SrcRegState)
1014 .
addReg(SrcReg, SrcRegState)
1015 .
addReg(SrcReg, SrcRegState)
1021 .
addReg(SrcReg, SrcRegState)
1029 SrcRenamable = DstRenamable;
1039 case RISCV::CV_BEQIMM:
1040 case RISCV::QC_BEQI:
1041 case RISCV::QC_E_BEQI:
1042 case RISCV::NDS_BBC:
1043 case RISCV::NDS_BEQC:
1047 case RISCV::QC_BNEI:
1048 case RISCV::QC_E_BNEI:
1049 case RISCV::CV_BNEIMM:
1050 case RISCV::NDS_BBS:
1051 case RISCV::NDS_BNEC:
1054 case RISCV::QC_BLTI:
1055 case RISCV::QC_E_BLTI:
1058 case RISCV::QC_BGEI:
1059 case RISCV::QC_E_BGEI:
1062 case RISCV::QC_BLTUI:
1063 case RISCV::QC_E_BLTUI:
1066 case RISCV::QC_BGEUI:
1067 case RISCV::QC_E_BGEUI:
1099 "Unknown conditional branch");
1110 case RISCV::QC_MVEQ:
1111 return RISCV::QC_MVNE;
1112 case RISCV::QC_MVNE:
1113 return RISCV::QC_MVEQ;
1114 case RISCV::QC_MVLT:
1115 return RISCV::QC_MVGE;
1116 case RISCV::QC_MVGE:
1117 return RISCV::QC_MVLT;
1118 case RISCV::QC_MVLTU:
1119 return RISCV::QC_MVGEU;
1120 case RISCV::QC_MVGEU:
1121 return RISCV::QC_MVLTU;
1122 case RISCV::QC_MVEQI:
1123 return RISCV::QC_MVNEI;
1124 case RISCV::QC_MVNEI:
1125 return RISCV::QC_MVEQI;
1126 case RISCV::QC_MVLTI:
1127 return RISCV::QC_MVGEI;
1128 case RISCV::QC_MVGEI:
1129 return RISCV::QC_MVLTI;
1130 case RISCV::QC_MVLTUI:
1131 return RISCV::QC_MVGEUI;
1132 case RISCV::QC_MVGEUI:
1133 return RISCV::QC_MVLTUI;
1138 switch (SelectOpc) {
1157 case RISCV::Select_GPR_Using_CC_Imm5_Zibi:
1167 case RISCV::Select_GPR_Using_CC_SImm5_CV:
1172 return RISCV::CV_BEQIMM;
1174 return RISCV::CV_BNEIMM;
1177 case RISCV::Select_GPRNoX0_Using_CC_SImm5NonZero_QC:
1182 return RISCV::QC_BEQI;
1184 return RISCV::QC_BNEI;
1186 return RISCV::QC_BLTI;
1188 return RISCV::QC_BGEI;
1191 case RISCV::Select_GPRNoX0_Using_CC_UImm5NonZero_QC:
1196 return RISCV::QC_BLTUI;
1198 return RISCV::QC_BGEUI;
1201 case RISCV::Select_GPRNoX0_Using_CC_SImm16NonZero_QC:
1206 return RISCV::QC_E_BEQI;
1208 return RISCV::QC_E_BNEI;
1210 return RISCV::QC_E_BLTI;
1212 return RISCV::QC_E_BGEI;
1215 case RISCV::Select_GPRNoX0_Using_CC_UImm16NonZero_QC:
1220 return RISCV::QC_E_BLTUI;
1222 return RISCV::QC_E_BGEUI;
1225 case RISCV::Select_GPR_Using_CC_UImmLog2XLen_NDS:
1230 return RISCV::NDS_BBC;
1232 return RISCV::NDS_BBS;
1235 case RISCV::Select_GPR_Using_CC_UImm7_NDS:
1240 return RISCV::NDS_BEQC;
1242 return RISCV::NDS_BNEC;
1271 bool AllowModify)
const {
1272 TBB = FBB =
nullptr;
1277 if (
I ==
MBB.end() || !isUnpredicatedTerminator(*
I))
1283 int NumTerminators = 0;
1284 for (
auto J =
I.getReverse(); J !=
MBB.rend() && isUnpredicatedTerminator(*J);
1287 if (J->getDesc().isUnconditionalBranch() ||
1288 J->getDesc().isIndirectBranch()) {
1295 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.end()) {
1296 while (std::next(FirstUncondOrIndirectBr) !=
MBB.end()) {
1297 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1300 I = FirstUncondOrIndirectBr;
1304 if (
I->getDesc().isIndirectBranch())
1308 if (
I->isPreISelOpcode())
1312 if (NumTerminators > 2)
1316 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1322 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1328 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1329 I->getDesc().isUnconditionalBranch()) {
1340 int *BytesRemoved)
const {
1347 if (!
I->getDesc().isUnconditionalBranch() &&
1348 !
I->getDesc().isConditionalBranch())
1354 I->eraseFromParent();
1358 if (
I ==
MBB.begin())
1361 if (!
I->getDesc().isConditionalBranch())
1367 I->eraseFromParent();
1380 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1382 "RISC-V branch conditions have two components!");
1416 assert(RS &&
"RegScavenger required for long branching");
1418 "new block should be inserted for expanding unconditional branch");
1421 "restore block should be inserted for restoring clobbered registers");
1430 "Branch offsets outside of the signed 32-bit range not supported");
1435 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1436 auto II =
MBB.end();
1442 RS->enterBasicBlockEnd(
MBB);
1444 if (
STI.hasStdExtZicfilp())
1445 RC = &RISCV::GPRX7RegClass;
1447 RS->scavengeRegisterBackwards(*RC,
MI.getIterator(),
1451 RS->setRegUsed(TmpGPR);
1456 TmpGPR =
STI.hasStdExtE() ? RISCV::X9 : RISCV::X27;
1458 if (
STI.hasStdExtZicfilp())
1462 if (FrameIndex == -1)
1467 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1470 MI.getOperand(1).setMBB(&RestoreBB);
1474 TRI->eliminateFrameIndex(RestoreBB.
back(),
1478 MRI.replaceRegWith(ScratchReg, TmpGPR);
1479 MRI.clearVirtRegs();
1484 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1489 Cond[0].setImm(RISCV::BNE);
1492 Cond[0].setImm(RISCV::BNEI);
1495 Cond[0].setImm(RISCV::BEQ);
1498 Cond[0].setImm(RISCV::BEQI);
1501 Cond[0].setImm(RISCV::BGE);
1504 Cond[0].setImm(RISCV::BLT);
1507 Cond[0].setImm(RISCV::BGEU);
1510 Cond[0].setImm(RISCV::BLTU);
1512 case RISCV::CV_BEQIMM:
1513 Cond[0].setImm(RISCV::CV_BNEIMM);
1515 case RISCV::CV_BNEIMM:
1516 Cond[0].setImm(RISCV::CV_BEQIMM);
1518 case RISCV::QC_BEQI:
1519 Cond[0].setImm(RISCV::QC_BNEI);
1521 case RISCV::QC_BNEI:
1522 Cond[0].setImm(RISCV::QC_BEQI);
1524 case RISCV::QC_BGEI:
1525 Cond[0].setImm(RISCV::QC_BLTI);
1527 case RISCV::QC_BLTI:
1528 Cond[0].setImm(RISCV::QC_BGEI);
1530 case RISCV::QC_BGEUI:
1531 Cond[0].setImm(RISCV::QC_BLTUI);
1533 case RISCV::QC_BLTUI:
1534 Cond[0].setImm(RISCV::QC_BGEUI);
1536 case RISCV::QC_E_BEQI:
1537 Cond[0].setImm(RISCV::QC_E_BNEI);
1539 case RISCV::QC_E_BNEI:
1540 Cond[0].setImm(RISCV::QC_E_BEQI);
1542 case RISCV::QC_E_BGEI:
1543 Cond[0].setImm(RISCV::QC_E_BLTI);
1545 case RISCV::QC_E_BLTI:
1546 Cond[0].setImm(RISCV::QC_E_BGEI);
1548 case RISCV::QC_E_BGEUI:
1549 Cond[0].setImm(RISCV::QC_E_BLTUI);
1551 case RISCV::QC_E_BLTUI:
1552 Cond[0].setImm(RISCV::QC_E_BGEUI);
1554 case RISCV::NDS_BBC:
1555 Cond[0].setImm(RISCV::NDS_BBS);
1557 case RISCV::NDS_BBS:
1558 Cond[0].setImm(RISCV::NDS_BBC);
1560 case RISCV::NDS_BEQC:
1561 Cond[0].setImm(RISCV::NDS_BNEC);
1563 case RISCV::NDS_BNEC:
1564 Cond[0].setImm(RISCV::NDS_BEQC);
1574 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1575 MI->getOperand(1).getReg() == RISCV::X0) {
1576 Imm =
MI->getOperand(2).getImm();
1589 if (Reg == RISCV::X0) {
1593 return Reg.isVirtual() &&
isLoadImm(
MRI.getVRegDef(Reg), Imm);
1597 bool IsSigned =
false;
1598 bool IsEquality =
false;
1599 switch (
MI.getOpcode()) {
1635 MI.eraseFromParent();
1661 auto searchConst = [&](int64_t C1) ->
Register {
1663 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1666 I.getOperand(0).getReg().isVirtual();
1669 return DefC1->getOperand(0).getReg();
1682 MRI.hasOneUse(LHS.getReg()) && (IsSigned || C0 != -1)) {
1684 if (
Register RegZ = searchConst(C0 + 1)) {
1691 MRI.clearKillFlags(RegZ);
1692 MI.eraseFromParent();
1703 MRI.hasOneUse(RHS.getReg())) {
1705 if (
Register RegZ = searchConst(C0 - 1)) {
1712 MRI.clearKillFlags(RegZ);
1713 MI.eraseFromParent();
1723 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1725 int NumOp =
MI.getNumExplicitOperands();
1726 return MI.getOperand(NumOp - 1).getMBB();
1730 int64_t BrOffset)
const {
1731 unsigned XLen =
STI.getXLen();
1738 case RISCV::NDS_BBC:
1739 case RISCV::NDS_BBS:
1740 case RISCV::NDS_BEQC:
1741 case RISCV::NDS_BNEC:
1751 case RISCV::CV_BEQIMM:
1752 case RISCV::CV_BNEIMM:
1753 case RISCV::QC_BEQI:
1754 case RISCV::QC_BNEI:
1755 case RISCV::QC_BGEI:
1756 case RISCV::QC_BLTI:
1757 case RISCV::QC_BLTUI:
1758 case RISCV::QC_BGEUI:
1759 case RISCV::QC_E_BEQI:
1760 case RISCV::QC_E_BNEI:
1761 case RISCV::QC_E_BGEI:
1762 case RISCV::QC_E_BLTI:
1763 case RISCV::QC_E_BLTUI:
1764 case RISCV::QC_E_BGEUI:
1767 case RISCV::PseudoBR:
1769 case RISCV::PseudoJump:
1780 case RISCV::ADD:
return RISCV::PseudoCCADD;
1781 case RISCV::SUB:
return RISCV::PseudoCCSUB;
1782 case RISCV::SLL:
return RISCV::PseudoCCSLL;
1783 case RISCV::SRL:
return RISCV::PseudoCCSRL;
1784 case RISCV::SRA:
return RISCV::PseudoCCSRA;
1785 case RISCV::AND:
return RISCV::PseudoCCAND;
1786 case RISCV::OR:
return RISCV::PseudoCCOR;
1787 case RISCV::XOR:
return RISCV::PseudoCCXOR;
1788 case RISCV::MAX:
return RISCV::PseudoCCMAX;
1789 case RISCV::MAXU:
return RISCV::PseudoCCMAXU;
1790 case RISCV::MIN:
return RISCV::PseudoCCMIN;
1791 case RISCV::MINU:
return RISCV::PseudoCCMINU;
1792 case RISCV::MUL:
return RISCV::PseudoCCMUL;
1793 case RISCV::LUI:
return RISCV::PseudoCCLUI;
1794 case RISCV::QC_LI:
return RISCV::PseudoCCQC_LI;
1795 case RISCV::QC_E_LI:
return RISCV::PseudoCCQC_E_LI;
1797 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
1798 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
1799 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
1800 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
1801 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
1802 case RISCV::ORI:
return RISCV::PseudoCCORI;
1803 case RISCV::XORI:
return RISCV::PseudoCCXORI;
1805 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
1806 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
1807 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
1808 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
1809 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
1811 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
1812 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
1813 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
1814 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
1816 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
1817 case RISCV::ORN:
return RISCV::PseudoCCORN;
1818 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
1820 case RISCV::NDS_BFOS:
return RISCV::PseudoCCNDS_BFOS;
1821 case RISCV::NDS_BFOZ:
return RISCV::PseudoCCNDS_BFOZ;
1825 return RISCV::INSTRUCTION_LIST_END;
1834 if (!
Reg.isVirtual())
1836 if (!
MRI.hasOneNonDBGUse(
Reg))
1842 if (!STI.hasShortForwardBranchIMinMax() &&
1843 (
MI->getOpcode() == RISCV::MAX ||
MI->getOpcode() == RISCV::MIN ||
1844 MI->getOpcode() == RISCV::MINU ||
MI->getOpcode() == RISCV::MAXU))
1847 if (!STI.hasShortForwardBranchIMul() &&
MI->getOpcode() == RISCV::MUL)
1854 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1855 MI->getOperand(1).getReg() == RISCV::X0)
1860 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1870 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1873 bool DontMoveAcrossStores =
true;
1874 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1881 unsigned &TrueOp,
unsigned &FalseOp,
1882 bool &Optimizable)
const {
1883 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1884 "Unknown select instruction");
1894 Cond.push_back(
MI.getOperand(1));
1895 Cond.push_back(
MI.getOperand(2));
1896 Cond.push_back(
MI.getOperand(3));
1898 Optimizable =
STI.hasShortForwardBranchIALU();
1905 bool PreferFalse)
const {
1906 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1907 "Unknown select instruction");
1908 if (!
STI.hasShortForwardBranchIALU())
1914 bool Invert = !
DefMI;
1922 Register DestReg =
MI.getOperand(0).getReg();
1924 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1928 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1935 NewMI.
add(
MI.getOperand(1));
1936 NewMI.
add(
MI.getOperand(2));
1945 NewMI.
add(FalseReg);
1960 if (
DefMI->getParent() !=
MI.getParent())
1964 DefMI->eraseFromParent();
1969 if (
MI.isMetaInstruction())
1972 unsigned Opcode =
MI.getOpcode();
1974 if (Opcode == TargetOpcode::INLINEASM ||
1975 Opcode == TargetOpcode::INLINEASM_BR) {
1977 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1981 if (!
MI.memoperands_empty()) {
1984 if (
STI.hasStdExtZca()) {
1985 if (isCompressibleInst(
MI,
STI))
1993 if (Opcode == TargetOpcode::BUNDLE)
1994 return getInstBundleLength(
MI);
1996 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1997 if (isCompressibleInst(
MI,
STI))
2002 case RISCV::PseudoMV_FPR16INX:
2003 case RISCV::PseudoMV_FPR32INX:
2005 return STI.hasStdExtZca() ? 2 : 4;
2006 case TargetOpcode::STACKMAP:
2009 case TargetOpcode::PATCHPOINT:
2012 case TargetOpcode::STATEPOINT: {
2016 return std::max(NumBytes, 8U);
2018 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
2019 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
2020 case TargetOpcode::PATCHABLE_TAIL_CALL: {
2023 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
2024 F.hasFnAttribute(
"patchable-function-entry")) {
2026 if (
F.getFnAttribute(
"patchable-function-entry")
2028 .getAsInteger(10, Num))
2029 return get(Opcode).getSize();
2032 return (
STI.hasStdExtZca() ? 2 : 4) * Num;
2036 return STI.is64Bit() ? 68 : 44;
2039 return get(Opcode).getSize();
2043unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
2047 while (++
I != E &&
I->isInsideBundle()) {
2048 assert(!
I->isBundle() &&
"No nested bundle!");
2055 const unsigned Opcode =
MI.getOpcode();
2059 case RISCV::FSGNJ_D:
2060 case RISCV::FSGNJ_S:
2061 case RISCV::FSGNJ_H:
2062 case RISCV::FSGNJ_D_INX:
2063 case RISCV::FSGNJ_D_IN32X:
2064 case RISCV::FSGNJ_S_INX:
2065 case RISCV::FSGNJ_H_INX:
2067 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2068 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
2072 return (
MI.getOperand(1).isReg() &&
2073 MI.getOperand(1).getReg() == RISCV::X0) ||
2074 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
2076 return MI.isAsCheapAsAMove();
2079std::optional<DestSourcePair>
2083 switch (
MI.getOpcode()) {
2089 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2090 MI.getOperand(2).isReg())
2092 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2093 MI.getOperand(1).isReg())
2098 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
2099 MI.getOperand(2).getImm() == 0)
2103 if (
MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0 &&
2104 MI.getOperand(1).isReg())
2108 case RISCV::SH1ADD_UW:
2110 case RISCV::SH2ADD_UW:
2112 case RISCV::SH3ADD_UW:
2113 if (
MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0 &&
2114 MI.getOperand(2).isReg())
2117 case RISCV::FSGNJ_D:
2118 case RISCV::FSGNJ_S:
2119 case RISCV::FSGNJ_H:
2120 case RISCV::FSGNJ_D_INX:
2121 case RISCV::FSGNJ_D_IN32X:
2122 case RISCV::FSGNJ_S_INX:
2123 case RISCV::FSGNJ_H_INX:
2125 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
2126 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
2130 return std::nullopt;
2138 const auto &SchedModel =
STI.getSchedModel();
2139 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
2151 RISCV::getNamedOperandIdx(Root.
getOpcode(), RISCV::OpName::frm);
2155 return RISCV::getNamedOperandIdx(
MI->getOpcode(),
2156 RISCV::OpName::frm) < 0;
2158 "New instructions require FRM whereas the old one does not have it");
2165 for (
auto *NewMI : InsInstrs) {
2167 if (
static_cast<unsigned>(RISCV::getNamedOperandIdx(
2168 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
2210bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
2211 bool Invert)
const {
2212#define OPCODE_LMUL_CASE(OPC) \
2213 case RISCV::OPC##_M1: \
2214 case RISCV::OPC##_M2: \
2215 case RISCV::OPC##_M4: \
2216 case RISCV::OPC##_M8: \
2217 case RISCV::OPC##_MF2: \
2218 case RISCV::OPC##_MF4: \
2219 case RISCV::OPC##_MF8
2221#define OPCODE_LMUL_MASK_CASE(OPC) \
2222 case RISCV::OPC##_M1_MASK: \
2223 case RISCV::OPC##_M2_MASK: \
2224 case RISCV::OPC##_M4_MASK: \
2225 case RISCV::OPC##_M8_MASK: \
2226 case RISCV::OPC##_MF2_MASK: \
2227 case RISCV::OPC##_MF4_MASK: \
2228 case RISCV::OPC##_MF8_MASK
2233 Opcode = *InvOpcode;
2250#undef OPCODE_LMUL_MASK_CASE
2251#undef OPCODE_LMUL_CASE
2254bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
2261 const TargetRegisterInfo *
TRI =
MRI->getTargetRegisterInfo();
2265 const uint64_t TSFlags =
Desc.TSFlags;
2267 auto checkImmOperand = [&](
unsigned OpIdx) {
2271 auto checkRegOperand = [&](
unsigned OpIdx) {
2279 if (!checkRegOperand(1))
2294 bool SeenMI2 =
false;
2295 for (
auto End =
MBB->
rend(), It = It1; It != End; ++It) {
2304 if (It->modifiesRegister(RISCV::V0,
TRI)) {
2305 Register SrcReg = It->getOperand(1).getReg();
2323 if (MI1VReg != SrcReg)
2332 assert(SeenMI2 &&
"Prev is expected to appear before Root");
2371bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
2372 bool &Commuted)
const {
2376 "Expect the present of passthrough operand.");
2382 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
2383 areRVVInstsReassociable(Inst, *MI2);
2387 return areRVVInstsReassociable(Inst, *MI1) &&
2388 (isVectorAssociativeAndCommutative(*MI1) ||
2389 isVectorAssociativeAndCommutative(*MI1,
true)) &&
2396 if (!isVectorAssociativeAndCommutative(Inst) &&
2397 !isVectorAssociativeAndCommutative(Inst,
true))
2409 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
2411 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
2423 for (
unsigned I = 0;
I < 5; ++
I)
2429 bool &Commuted)
const {
2430 if (isVectorAssociativeAndCommutative(Inst) ||
2431 isVectorAssociativeAndCommutative(Inst,
true))
2432 return hasReassociableVectorSibling(Inst, Commuted);
2438 unsigned OperandIdx = Commuted ? 2 : 1;
2442 int16_t InstFrmOpIdx =
2443 RISCV::getNamedOperandIdx(Inst.
getOpcode(), RISCV::OpName::frm);
2444 int16_t SiblingFrmOpIdx =
2445 RISCV::getNamedOperandIdx(Sibling.
getOpcode(), RISCV::OpName::frm);
2447 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
2452 bool Invert)
const {
2453 if (isVectorAssociativeAndCommutative(Inst, Invert))
2461 Opc = *InverseOpcode;
2506std::optional<unsigned>
2508#define RVV_OPC_LMUL_CASE(OPC, INV) \
2509 case RISCV::OPC##_M1: \
2510 return RISCV::INV##_M1; \
2511 case RISCV::OPC##_M2: \
2512 return RISCV::INV##_M2; \
2513 case RISCV::OPC##_M4: \
2514 return RISCV::INV##_M4; \
2515 case RISCV::OPC##_M8: \
2516 return RISCV::INV##_M8; \
2517 case RISCV::OPC##_MF2: \
2518 return RISCV::INV##_MF2; \
2519 case RISCV::OPC##_MF4: \
2520 return RISCV::INV##_MF4; \
2521 case RISCV::OPC##_MF8: \
2522 return RISCV::INV##_MF8
2524#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2525 case RISCV::OPC##_M1_MASK: \
2526 return RISCV::INV##_M1_MASK; \
2527 case RISCV::OPC##_M2_MASK: \
2528 return RISCV::INV##_M2_MASK; \
2529 case RISCV::OPC##_M4_MASK: \
2530 return RISCV::INV##_M4_MASK; \
2531 case RISCV::OPC##_M8_MASK: \
2532 return RISCV::INV##_M8_MASK; \
2533 case RISCV::OPC##_MF2_MASK: \
2534 return RISCV::INV##_MF2_MASK; \
2535 case RISCV::OPC##_MF4_MASK: \
2536 return RISCV::INV##_MF4_MASK; \
2537 case RISCV::OPC##_MF8_MASK: \
2538 return RISCV::INV##_MF8_MASK
2542 return std::nullopt;
2544 return RISCV::FSUB_H;
2546 return RISCV::FSUB_S;
2548 return RISCV::FSUB_D;
2550 return RISCV::FADD_H;
2552 return RISCV::FADD_S;
2554 return RISCV::FADD_D;
2571#undef RVV_OPC_LMUL_MASK_CASE
2572#undef RVV_OPC_LMUL_CASE
2577 bool DoRegPressureReduce) {
2593 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2604 bool DoRegPressureReduce) {
2611 DoRegPressureReduce)) {
2617 DoRegPressureReduce)) {
2627 bool DoRegPressureReduce) {
2635 unsigned CombineOpc) {
2642 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2645 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2656 unsigned OuterShiftAmt) {
2662 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2689 case RISCV::SH1ADD_UW:
2691 case RISCV::SH2ADD_UW:
2693 case RISCV::SH3ADD_UW:
2739 bool DoRegPressureReduce)
const {
2748 DoRegPressureReduce);
2756 return RISCV::FMADD_H;
2758 return RISCV::FMADD_S;
2760 return RISCV::FMADD_D;
2805 bool Mul1IsKill = Mul1.
isKill();
2806 bool Mul2IsKill = Mul2.
isKill();
2807 bool AddendIsKill = Addend.
isKill();
2816 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2841 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2848 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2851 switch (InnerShiftAmt - OuterShiftAmt) {
2855 InnerOpc = RISCV::ADD;
2858 InnerOpc = RISCV::SH1ADD;
2861 InnerOpc = RISCV::SH2ADD;
2864 InnerOpc = RISCV::SH3ADD;
2872 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2882 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2899 DelInstrs, InstrIdxForVirtReg);
2926 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2928 unsigned OpType = Operand.OperandType;
2934 ErrInfo =
"Expected an immediate operand.";
2937 int64_t Imm = MO.
getImm();
2944#define CASE_OPERAND_UIMM(NUM) \
2945 case RISCVOp::OPERAND_UIMM##NUM: \
2946 Ok = isUInt<NUM>(Imm); \
2948#define CASE_OPERAND_SIMM(NUM) \
2949 case RISCVOp::OPERAND_SIMM##NUM: \
2950 Ok = isInt<NUM>(Imm); \
2981 Ok = Imm >= 1 && Imm <= 32;
3023 Ok = (
isUInt<5>(Imm) && Imm != 0) || Imm == -1;
3033 Ok = Imm >= -15 && Imm <= 16;
3061 Ok = Ok && Imm != 0;
3064 Ok = (
isUInt<5>(Imm) && Imm != 0) || (Imm >= 0xfffe0 && Imm <= 0xfffff);
3067 Ok = Imm >= 0 && Imm <= 10;
3070 Ok = Imm >= 0 && Imm <= 7;
3073 Ok = Imm >= 1 && Imm <= 10;
3076 Ok = Imm >= 2 && Imm <= 14;
3085 Ok = Imm >= 0 && Imm <= 48 && Imm % 16 == 0;
3120 Ok = Imm == 1 || Imm == 2 || Imm == 4;
3124 ErrInfo =
"Invalid immediate";
3133 ErrInfo =
"Expected a non-register operand.";
3137 ErrInfo =
"Invalid immediate";
3146 ErrInfo =
"Expected a non-register operand.";
3150 ErrInfo =
"Invalid immediate";
3158 ErrInfo =
"Expected a non-register operand.";
3162 ErrInfo =
"Invalid immediate";
3168 int64_t Imm = MO.
getImm();
3171 ErrInfo =
"Invalid immediate";
3174 }
else if (!MO.
isReg()) {
3175 ErrInfo =
"Expected a register or immediate operand.";
3185 if (!
Op.isImm() && !
Op.isReg()) {
3186 ErrInfo =
"Invalid operand type for VL operand";
3189 if (
Op.isReg() &&
Op.getReg().isValid()) {
3191 auto *RC =
MRI.getRegClass(
Op.getReg());
3192 if (!RISCV::GPRNoX0RegClass.hasSubClassEq(RC)) {
3193 ErrInfo =
"Invalid register class for VL operand";
3198 ErrInfo =
"VL operand w/o SEW operand?";
3204 if (!
MI.getOperand(
OpIdx).isImm()) {
3205 ErrInfo =
"SEW value expected to be an immediate";
3210 ErrInfo =
"Unexpected SEW value";
3213 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3215 ErrInfo =
"Unexpected SEW value";
3221 if (!
MI.getOperand(
OpIdx).isImm()) {
3222 ErrInfo =
"Policy operand expected to be an immediate";
3227 ErrInfo =
"Invalid Policy Value";
3231 ErrInfo =
"policy operand w/o VL operand?";
3239 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
3240 ErrInfo =
"policy operand w/o tied operand?";
3247 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
3248 ErrInfo =
"dynamic rounding mode should read FRM";
3270 case RISCV::LD_RV32:
3280 case RISCV::SD_RV32:
3296 int64_t NewOffset = OldOffset + Disp;
3318 "Addressing mode not supported for folding");
3392 case RISCV::LD_RV32:
3395 case RISCV::SD_RV32:
3402 OffsetIsScalable =
false;
3418 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
3426 if (MO1->getAddrSpace() != MO2->getAddrSpace())
3429 auto Base1 = MO1->getValue();
3430 auto Base2 = MO2->getValue();
3431 if (!Base1 || !Base2)
3439 return Base1 == Base2;
3445 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
3446 unsigned NumBytes)
const {
3449 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
3454 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
3460 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
3466 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
3516 int64_t OffsetA = 0, OffsetB = 0;
3522 int LowOffset = std::min(OffsetA, OffsetB);
3523 int HighOffset = std::max(OffsetA, OffsetB);
3524 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
3526 LowOffset + (
int)LowWidth.
getValue() <= HighOffset)
3533std::pair<unsigned, unsigned>
3536 return std::make_pair(TF & Mask, TF & ~Mask);
3542 static const std::pair<unsigned, const char *> TargetFlags[] = {
3543 {MO_CALL,
"riscv-call"},
3544 {MO_LO,
"riscv-lo"},
3545 {MO_HI,
"riscv-hi"},
3546 {MO_PCREL_LO,
"riscv-pcrel-lo"},
3547 {MO_PCREL_HI,
"riscv-pcrel-hi"},
3548 {MO_GOT_HI,
"riscv-got-hi"},
3549 {MO_TPREL_LO,
"riscv-tprel-lo"},
3550 {MO_TPREL_HI,
"riscv-tprel-hi"},
3551 {MO_TPREL_ADD,
"riscv-tprel-add"},
3552 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
3553 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
3554 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
3555 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
3556 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
3557 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
3565 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
3578 unsigned &Flags)
const {
3597 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
3598 F.hasFnAttribute(
"patchable-function-entry");
3603 return MI.readsRegister(RegNo,
TRI) ||
3604 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
3609 return MI.modifiesRegister(RegNo,
TRI) ||
3610 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
3614 if (!
MBB.back().isReturn())
3637 if (
C.back().isReturn()) {
3639 "The candidate who uses return instruction must be outlined "
3652 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3655std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3658 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3659 unsigned MinRepeats)
const {
3665 if (RepeatedSequenceLocs.size() < MinRepeats)
3666 return std::nullopt;
3670 unsigned InstrSizeCExt =
3672 unsigned CallOverhead = 0, FrameOverhead = 0;
3675 unsigned CFICount = 0;
3676 for (
auto &
I : Candidate) {
3677 if (
I.isCFIInstruction())
3688 std::vector<MCCFIInstruction> CFIInstructions =
3689 C.getMF()->getFrameInstructions();
3691 if (CFICount > 0 && CFICount != CFIInstructions.size())
3692 return std::nullopt;
3700 CallOverhead = 4 + InstrSizeCExt;
3707 FrameOverhead = InstrSizeCExt;
3713 return std::nullopt;
3715 for (
auto &
C : RepeatedSequenceLocs)
3716 C.setCallInfo(MOCI, CallOverhead);
3718 unsigned SequenceSize = 0;
3719 for (
auto &
MI : Candidate)
3722 return std::make_unique<outliner::OutlinedFunction>(
3723 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3729 unsigned Flags)
const {
3733 MBB->getParent()->getSubtarget().getRegisterInfo();
3734 const auto &
F =
MI.getMF()->getFunction();
3739 if (
MI.isCFIInstruction())
3747 for (
const auto &MO :
MI.operands()) {
3752 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3753 F.hasSection() ||
F.getSectionPrefix()))
3770 MBB.addLiveIn(RISCV::X5);
3785 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3793 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3804 return std::nullopt;
3808 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3809 MI.getOperand(2).isImm())
3810 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3812 return std::nullopt;
3820 std::string GenericComment =
3822 if (!GenericComment.empty())
3823 return GenericComment;
3827 return std::string();
3831 return std::string();
3833 std::string Comment;
3840 switch (OpInfo.OperandType) {
3843 unsigned Imm =
Op.getImm();
3848 unsigned Imm =
Op.getImm();
3853 unsigned Imm =
Op.getImm();
3859 unsigned Log2SEW =
Op.getImm();
3860 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3866 unsigned Policy =
Op.getImm();
3868 "Invalid Policy Value");
3878#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3879 RISCV::Pseudo##OP##_##LMUL
3881#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3882 RISCV::Pseudo##OP##_##LMUL##_MASK
3884#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3885 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3886 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3888#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3889 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3890 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3891 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3892 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3893 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3894 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3896#define CASE_RVV_OPCODE_UNMASK(OP) \
3897 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3898 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3900#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3901 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3902 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3903 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3904 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3905 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3906 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3908#define CASE_RVV_OPCODE_MASK(OP) \
3909 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3910 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3912#define CASE_RVV_OPCODE_WIDEN(OP) \
3913 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3914 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3916#define CASE_RVV_OPCODE(OP) \
3917 CASE_RVV_OPCODE_UNMASK(OP): \
3918 case CASE_RVV_OPCODE_MASK(OP)
3922#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3923 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3925#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3926 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3927 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3928 case CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3929 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3930 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3931 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3932 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3935#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3936 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3938#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3939 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3940 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3941 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3942 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3944#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3945 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3946 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3948#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3949 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3950 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3952#define CASE_VFMA_OPCODE_VV(OP) \
3953 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3954 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VV, E16): \
3955 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3956 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3958#define CASE_VFMA_SPLATS(OP) \
3959 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3960 case CASE_VFMA_OPCODE_LMULS_MF4(OP##_ALT, VFPR16, E16): \
3961 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3962 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3966 unsigned &SrcOpIdx1,
3967 unsigned &SrcOpIdx2)
const {
3969 if (!
Desc.isCommutable())
3972 switch (
MI.getOpcode()) {
3973 case RISCV::TH_MVEQZ:
3974 case RISCV::TH_MVNEZ:
3978 if (
MI.getOperand(2).getReg() == RISCV::X0)
3981 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3982 case RISCV::QC_SELECTIEQ:
3983 case RISCV::QC_SELECTINE:
3984 case RISCV::QC_SELECTIIEQ:
3985 case RISCV::QC_SELECTIINE:
3986 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3987 case RISCV::QC_MVEQ:
3988 case RISCV::QC_MVNE:
3989 case RISCV::QC_MVLT:
3990 case RISCV::QC_MVGE:
3991 case RISCV::QC_MVLTU:
3992 case RISCV::QC_MVGEU:
3993 case RISCV::QC_MVEQI:
3994 case RISCV::QC_MVNEI:
3995 case RISCV::QC_MVLTI:
3996 case RISCV::QC_MVGEI:
3997 case RISCV::QC_MVLTUI:
3998 case RISCV::QC_MVGEUI:
3999 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 4);
4000 case RISCV::TH_MULA:
4001 case RISCV::TH_MULAW:
4002 case RISCV::TH_MULAH:
4003 case RISCV::TH_MULS:
4004 case RISCV::TH_MULSW:
4005 case RISCV::TH_MULSH:
4007 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4008 case RISCV::PseudoCCMOVGPRNoX0:
4009 case RISCV::PseudoCCMOVGPR:
4011 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
4038 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
4065 unsigned CommutableOpIdx1 = 1;
4066 unsigned CommutableOpIdx2 = 3;
4067 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4088 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
4090 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
4094 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
4095 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
4101 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
4102 SrcOpIdx2 == CommuteAnyOperandIndex) {
4105 unsigned CommutableOpIdx1 = SrcOpIdx1;
4106 if (SrcOpIdx1 == SrcOpIdx2) {
4109 CommutableOpIdx1 = 1;
4110 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
4112 CommutableOpIdx1 = SrcOpIdx2;
4117 unsigned CommutableOpIdx2;
4118 if (CommutableOpIdx1 != 1) {
4120 CommutableOpIdx2 = 1;
4122 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
4127 if (Op1Reg !=
MI.getOperand(2).getReg())
4128 CommutableOpIdx2 = 2;
4130 CommutableOpIdx2 = 3;
4135 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
4148#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
4149 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
4150 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
4153#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
4154 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
4155 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
4156 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
4157 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
4158 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
4159 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
4160 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
4163#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
4164 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
4165 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
4168#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
4169 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
4170 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
4171 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
4172 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
4174#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
4175 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
4176 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
4178#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
4179 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
4180 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
4182#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
4183 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
4184 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VV, E16) \
4185 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
4186 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
4188#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
4189 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
4190 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP##_ALT, NEWOP##_ALT, VFPR16, E16) \
4191 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
4192 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
4198 unsigned OpIdx2)
const {
4201 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
4205 switch (
MI.getOpcode()) {
4206 case RISCV::TH_MVEQZ:
4207 case RISCV::TH_MVNEZ: {
4208 auto &WorkingMI = cloneIfNew(
MI);
4209 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
4210 : RISCV::TH_MVEQZ));
4214 case RISCV::QC_SELECTIEQ:
4215 case RISCV::QC_SELECTINE:
4216 case RISCV::QC_SELECTIIEQ:
4217 case RISCV::QC_SELECTIINE:
4219 case RISCV::QC_MVEQ:
4220 case RISCV::QC_MVNE:
4221 case RISCV::QC_MVLT:
4222 case RISCV::QC_MVGE:
4223 case RISCV::QC_MVLTU:
4224 case RISCV::QC_MVGEU:
4225 case RISCV::QC_MVEQI:
4226 case RISCV::QC_MVNEI:
4227 case RISCV::QC_MVLTI:
4228 case RISCV::QC_MVGEI:
4229 case RISCV::QC_MVLTUI:
4230 case RISCV::QC_MVGEUI: {
4231 auto &WorkingMI = cloneIfNew(
MI);
4236 case RISCV::PseudoCCMOVGPRNoX0:
4237 case RISCV::PseudoCCMOVGPR: {
4241 auto &WorkingMI = cloneIfNew(
MI);
4242 WorkingMI.getOperand(3).setImm(CC);
4266 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4267 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
4269 switch (
MI.getOpcode()) {
4292 auto &WorkingMI = cloneIfNew(
MI);
4293 WorkingMI.setDesc(
get(
Opc));
4303 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
4306 if (OpIdx1 == 3 || OpIdx2 == 3) {
4308 switch (
MI.getOpcode()) {
4319 auto &WorkingMI = cloneIfNew(
MI);
4320 WorkingMI.setDesc(
get(
Opc));
4332#undef CASE_VMA_CHANGE_OPCODE_COMMON
4333#undef CASE_VMA_CHANGE_OPCODE_LMULS
4334#undef CASE_VFMA_CHANGE_OPCODE_COMMON
4335#undef CASE_VFMA_CHANGE_OPCODE_LMULS_M1
4336#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF2
4337#undef CASE_VFMA_CHANGE_OPCODE_LMULS_MF4
4338#undef CASE_VFMA_CHANGE_OPCODE_VV
4339#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
4341#undef CASE_RVV_OPCODE_UNMASK_LMUL
4342#undef CASE_RVV_OPCODE_MASK_LMUL
4343#undef CASE_RVV_OPCODE_LMUL
4344#undef CASE_RVV_OPCODE_UNMASK_WIDEN
4345#undef CASE_RVV_OPCODE_UNMASK
4346#undef CASE_RVV_OPCODE_MASK_WIDEN
4347#undef CASE_RVV_OPCODE_MASK
4348#undef CASE_RVV_OPCODE_WIDEN
4349#undef CASE_RVV_OPCODE
4351#undef CASE_VMA_OPCODE_COMMON
4352#undef CASE_VMA_OPCODE_LMULS
4353#undef CASE_VFMA_OPCODE_COMMON
4354#undef CASE_VFMA_OPCODE_LMULS_M1
4355#undef CASE_VFMA_OPCODE_LMULS_MF2
4356#undef CASE_VFMA_OPCODE_LMULS_MF4
4357#undef CASE_VFMA_OPCODE_VV
4358#undef CASE_VFMA_SPLATS
4361 switch (
MI.getOpcode()) {
4369 if (
MI.getOperand(1).getReg() == RISCV::X0)
4370 commuteInstruction(
MI);
4372 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4373 MI.getOperand(2).ChangeToImmediate(0);
4374 MI.setDesc(
get(RISCV::ADDI));
4378 if (
MI.getOpcode() == RISCV::XOR &&
4379 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4380 MI.getOperand(1).setReg(RISCV::X0);
4381 MI.getOperand(2).ChangeToImmediate(0);
4382 MI.setDesc(
get(RISCV::ADDI));
4389 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4390 MI.setDesc(
get(RISCV::ADDI));
4396 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4397 MI.getOperand(2).ChangeToImmediate(0);
4398 MI.setDesc(
get(RISCV::ADDI));
4404 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4405 MI.getOperand(2).ChangeToImmediate(0);
4406 MI.setDesc(
get(RISCV::ADDIW));
4413 if (
MI.getOperand(1).getReg() == RISCV::X0)
4414 commuteInstruction(
MI);
4416 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4417 MI.getOperand(2).ChangeToImmediate(0);
4418 MI.setDesc(
get(RISCV::ADDIW));
4423 case RISCV::SH1ADD_UW:
4425 case RISCV::SH2ADD_UW:
4427 case RISCV::SH3ADD_UW:
4429 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4430 MI.removeOperand(1);
4432 MI.setDesc(
get(RISCV::ADDI));
4436 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4437 MI.removeOperand(2);
4438 unsigned Opc =
MI.getOpcode();
4439 if (
Opc == RISCV::SH1ADD_UW ||
Opc == RISCV::SH2ADD_UW ||
4440 Opc == RISCV::SH3ADD_UW) {
4442 MI.setDesc(
get(RISCV::SLLI_UW));
4446 MI.setDesc(
get(RISCV::SLLI));
4460 if (
MI.getOperand(1).getReg() == RISCV::X0 ||
4461 MI.getOperand(2).getReg() == RISCV::X0) {
4462 MI.getOperand(1).setReg(RISCV::X0);
4463 MI.getOperand(2).ChangeToImmediate(0);
4464 MI.setDesc(
get(RISCV::ADDI));
4470 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4471 MI.getOperand(2).setImm(0);
4472 MI.setDesc(
get(RISCV::ADDI));
4480 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4481 MI.getOperand(2).ChangeToImmediate(0);
4482 MI.setDesc(
get(RISCV::ADDI));
4486 if (
MI.getOperand(2).getReg() == RISCV::X0) {
4487 MI.getOperand(2).ChangeToImmediate(0);
4488 MI.setDesc(
get(RISCV::ADDI));
4496 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4497 MI.getOperand(2).ChangeToImmediate(0);
4498 MI.setDesc(
get(RISCV::ADDI));
4508 case RISCV::SLLI_UW:
4510 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4511 MI.getOperand(2).setImm(0);
4512 MI.setDesc(
get(RISCV::ADDI));
4520 if (
MI.getOperand(1).getReg() == RISCV::X0 &&
4521 MI.getOperand(2).getReg() == RISCV::X0) {
4522 MI.getOperand(2).ChangeToImmediate(0);
4523 MI.setDesc(
get(RISCV::ADDI));
4527 if (
MI.getOpcode() == RISCV::ADD_UW &&
4528 MI.getOperand(1).getReg() == RISCV::X0) {
4529 MI.removeOperand(1);
4531 MI.setDesc(
get(RISCV::ADDI));
4537 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4538 MI.getOperand(2).setImm(
MI.getOperand(2).getImm() != 0);
4539 MI.setDesc(
get(RISCV::ADDI));
4545 case RISCV::ZEXT_H_RV32:
4546 case RISCV::ZEXT_H_RV64:
4549 if (
MI.getOperand(1).getReg() == RISCV::X0) {
4551 MI.setDesc(
get(RISCV::ADDI));
4560 if (
MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg()) {
4561 MI.getOperand(2).ChangeToImmediate(0);
4562 MI.setDesc(
get(RISCV::ADDI));
4569 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4571 MI.removeOperand(0);
4572 MI.insert(
MI.operands_begin() + 1, {MO0});
4577 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4579 MI.removeOperand(0);
4580 MI.insert(
MI.operands_begin() + 1, {MO0});
4581 MI.setDesc(
get(RISCV::BNE));
4586 if (
MI.getOperand(0).getReg() == RISCV::X0) {
4588 MI.removeOperand(0);
4589 MI.insert(
MI.operands_begin() + 1, {MO0});
4590 MI.setDesc(
get(RISCV::BEQ));
4598#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
4599 RISCV::PseudoV##OP##_##LMUL##_TIED
4601#define CASE_WIDEOP_OPCODE_LMULS(OP) \
4602 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
4603 case CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
4604 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
4605 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
4606 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
4607 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
4609#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
4610 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
4611 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
4614#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4615 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
4616 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
4617 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
4618 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
4619 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
4620 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
4623#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
4624 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
4626#define CASE_FP_WIDEOP_OPCODE_LMULS(OP) \
4627 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4628 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4629 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
4630 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4631 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
4632 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4633 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
4634 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
4635 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
4637#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
4638 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
4639 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
4642#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
4643 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4644 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4645 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
4646 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4647 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
4648 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4649 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
4650 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
4651 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
4653#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP) \
4654 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
4655 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
4656 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
4657 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
4658 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16)
4660#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP) \
4661 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
4662 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
4663 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
4664 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
4665 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16)
4672 switch (
MI.getOpcode()) {
4680 MI.getNumExplicitOperands() == 7 &&
4681 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
4688 switch (
MI.getOpcode()) {
4700 .
add(
MI.getOperand(0))
4702 .
add(
MI.getOperand(1))
4703 .
add(
MI.getOperand(2))
4704 .
add(
MI.getOperand(3))
4705 .
add(
MI.getOperand(4))
4706 .
add(
MI.getOperand(5))
4707 .
add(
MI.getOperand(6));
4716 MI.getNumExplicitOperands() == 6);
4723 switch (
MI.getOpcode()) {
4735 .
add(
MI.getOperand(0))
4737 .
add(
MI.getOperand(1))
4738 .
add(
MI.getOperand(2))
4739 .
add(
MI.getOperand(3))
4740 .
add(
MI.getOperand(4))
4741 .
add(
MI.getOperand(5));
4748 unsigned NumOps =
MI.getNumOperands();
4751 if (
Op.isReg() &&
Op.isKill())
4759 if (
MI.getOperand(0).isEarlyClobber()) {
4773#undef CASE_WIDEOP_OPCODE_COMMON
4774#undef CASE_WIDEOP_OPCODE_LMULS
4775#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
4776#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
4777#undef CASE_FP_WIDEOP_OPCODE_COMMON
4778#undef CASE_FP_WIDEOP_OPCODE_LMULS
4779#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
4780#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
4789 if (ShiftAmount == 0)
4795 }
else if (
int ShXAmount, ShiftAmount;
4797 (ShXAmount =
isShifted359(Amount, ShiftAmount)) != 0) {
4800 switch (ShXAmount) {
4802 Opc = RISCV::SH1ADD;
4805 Opc = RISCV::SH2ADD;
4808 Opc = RISCV::SH3ADD;
4823 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4834 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4844 }
else if (
STI.hasStdExtZmmul()) {
4845 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4854 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
4855 if (Amount & (1U << ShiftAmount)) {
4859 .
addImm(ShiftAmount - PrevShiftAmount)
4861 if (Amount >> (ShiftAmount + 1)) {
4864 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
4875 PrevShiftAmount = ShiftAmount;
4878 assert(Acc &&
"Expected valid accumulator");
4888 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
4896 ?
STI.getTailDupAggressiveThreshold()
4903 unsigned Opcode =
MI.getOpcode();
4904 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4910std::optional<std::pair<unsigned, unsigned>>
4914 return std::nullopt;
4915 case RISCV::PseudoVSPILL2_M1:
4916 case RISCV::PseudoVRELOAD2_M1:
4917 return std::make_pair(2u, 1u);
4918 case RISCV::PseudoVSPILL2_M2:
4919 case RISCV::PseudoVRELOAD2_M2:
4920 return std::make_pair(2u, 2u);
4921 case RISCV::PseudoVSPILL2_M4:
4922 case RISCV::PseudoVRELOAD2_M4:
4923 return std::make_pair(2u, 4u);
4924 case RISCV::PseudoVSPILL3_M1:
4925 case RISCV::PseudoVRELOAD3_M1:
4926 return std::make_pair(3u, 1u);
4927 case RISCV::PseudoVSPILL3_M2:
4928 case RISCV::PseudoVRELOAD3_M2:
4929 return std::make_pair(3u, 2u);
4930 case RISCV::PseudoVSPILL4_M1:
4931 case RISCV::PseudoVRELOAD4_M1:
4932 return std::make_pair(4u, 1u);
4933 case RISCV::PseudoVSPILL4_M2:
4934 case RISCV::PseudoVRELOAD4_M2:
4935 return std::make_pair(4u, 2u);
4936 case RISCV::PseudoVSPILL5_M1:
4937 case RISCV::PseudoVRELOAD5_M1:
4938 return std::make_pair(5u, 1u);
4939 case RISCV::PseudoVSPILL6_M1:
4940 case RISCV::PseudoVRELOAD6_M1:
4941 return std::make_pair(6u, 1u);
4942 case RISCV::PseudoVSPILL7_M1:
4943 case RISCV::PseudoVRELOAD7_M1:
4944 return std::make_pair(7u, 1u);
4945 case RISCV::PseudoVSPILL8_M1:
4946 case RISCV::PseudoVRELOAD8_M1:
4947 return std::make_pair(8u, 1u);
4952 int16_t MI1FrmOpIdx =
4953 RISCV::getNamedOperandIdx(MI1.
getOpcode(), RISCV::OpName::frm);
4954 int16_t MI2FrmOpIdx =
4955 RISCV::getNamedOperandIdx(MI2.
getOpcode(), RISCV::OpName::frm);
4956 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4963std::optional<unsigned>
4967 return std::nullopt;
4970 case RISCV::VSLL_VX:
4971 case RISCV::VSRL_VX:
4972 case RISCV::VSRA_VX:
4974 case RISCV::VSSRL_VX:
4975 case RISCV::VSSRA_VX:
4977 case RISCV::VROL_VX:
4978 case RISCV::VROR_VX:
4983 case RISCV::VNSRL_WX:
4984 case RISCV::VNSRA_WX:
4986 case RISCV::VNCLIPU_WX:
4987 case RISCV::VNCLIP_WX:
4989 case RISCV::VWSLL_VX:
4994 case RISCV::VADD_VX:
4995 case RISCV::VSUB_VX:
4996 case RISCV::VRSUB_VX:
4998 case RISCV::VWADDU_VX:
4999 case RISCV::VWSUBU_VX:
5000 case RISCV::VWADD_VX:
5001 case RISCV::VWSUB_VX:
5002 case RISCV::VWADDU_WX:
5003 case RISCV::VWSUBU_WX:
5004 case RISCV::VWADD_WX:
5005 case RISCV::VWSUB_WX:
5007 case RISCV::VADC_VXM:
5008 case RISCV::VADC_VIM:
5009 case RISCV::VMADC_VXM:
5010 case RISCV::VMADC_VIM:
5011 case RISCV::VMADC_VX:
5012 case RISCV::VSBC_VXM:
5013 case RISCV::VMSBC_VXM:
5014 case RISCV::VMSBC_VX:
5016 case RISCV::VAND_VX:
5018 case RISCV::VXOR_VX:
5020 case RISCV::VMSEQ_VX:
5021 case RISCV::VMSNE_VX:
5022 case RISCV::VMSLTU_VX:
5023 case RISCV::VMSLT_VX:
5024 case RISCV::VMSLEU_VX:
5025 case RISCV::VMSLE_VX:
5026 case RISCV::VMSGTU_VX:
5027 case RISCV::VMSGT_VX:
5029 case RISCV::VMINU_VX:
5030 case RISCV::VMIN_VX:
5031 case RISCV::VMAXU_VX:
5032 case RISCV::VMAX_VX:
5034 case RISCV::VMUL_VX:
5035 case RISCV::VMULH_VX:
5036 case RISCV::VMULHU_VX:
5037 case RISCV::VMULHSU_VX:
5039 case RISCV::VDIVU_VX:
5040 case RISCV::VDIV_VX:
5041 case RISCV::VREMU_VX:
5042 case RISCV::VREM_VX:
5044 case RISCV::VWMUL_VX:
5045 case RISCV::VWMULU_VX:
5046 case RISCV::VWMULSU_VX:
5048 case RISCV::VMACC_VX:
5049 case RISCV::VNMSAC_VX:
5050 case RISCV::VMADD_VX:
5051 case RISCV::VNMSUB_VX:
5053 case RISCV::VWMACCU_VX:
5054 case RISCV::VWMACC_VX:
5055 case RISCV::VWMACCSU_VX:
5056 case RISCV::VWMACCUS_VX:
5058 case RISCV::VMERGE_VXM:
5060 case RISCV::VMV_V_X:
5062 case RISCV::VSADDU_VX:
5063 case RISCV::VSADD_VX:
5064 case RISCV::VSSUBU_VX:
5065 case RISCV::VSSUB_VX:
5067 case RISCV::VAADDU_VX:
5068 case RISCV::VAADD_VX:
5069 case RISCV::VASUBU_VX:
5070 case RISCV::VASUB_VX:
5072 case RISCV::VSMUL_VX:
5074 case RISCV::VMV_S_X:
5076 case RISCV::VANDN_VX:
5077 return 1U << Log2SEW;
5083 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
5086 return RVV->BaseInstr;
5096 unsigned Scaled = Log2SEW + (DestEEW - 1);
5110 return std::nullopt;
5115 assert((LHS.isImm() || LHS.getParent()->getMF()->getRegInfo().isSSA()) &&
5116 (RHS.isImm() || RHS.getParent()->getMF()->getRegInfo().isSSA()));
5117 if (LHS.isReg() && RHS.isReg() && LHS.getReg().isVirtual() &&
5118 LHS.getReg() == RHS.getReg())
5122 if (LHS.isImm() && LHS.getImm() == 0)
5128 if (!LHSImm || !RHSImm)
5130 return LHSImm <= RHSImm;
5142 : LHS(LHS), RHS(RHS),
Cond(
Cond.begin(),
Cond.end()) {}
5144 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5154 std::optional<bool> createTripCountGreaterCondition(
5155 int TC, MachineBasicBlock &
MBB,
5156 SmallVectorImpl<MachineOperand> &CondParam)
override {
5164 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
5166 void adjustTripCount(
int TripCountAdjust)
override {}
5170std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5178 if (
TBB == LoopBB && FBB == LoopBB)
5185 assert((
TBB == LoopBB || FBB == LoopBB) &&
5186 "The Loop must be a single-basic-block loop");
5197 if (!Reg.isVirtual())
5199 return MRI.getVRegDef(Reg);
5204 if (LHS && LHS->isPHI())
5206 if (RHS && RHS->isPHI())
5209 return std::make_unique<RISCVPipelinerLoopInfo>(LHS, RHS,
Cond);
5215 Opc = RVVMCOpcode ? RVVMCOpcode :
Opc;
5232 case RISCV::FDIV_H_INX:
5233 case RISCV::FDIV_S_INX:
5234 case RISCV::FDIV_D_INX:
5235 case RISCV::FDIV_D_IN32X:
5236 case RISCV::FSQRT_H:
5237 case RISCV::FSQRT_S:
5238 case RISCV::FSQRT_D:
5239 case RISCV::FSQRT_H_INX:
5240 case RISCV::FSQRT_S_INX:
5241 case RISCV::FSQRT_D_INX:
5242 case RISCV::FSQRT_D_IN32X:
5244 case RISCV::VDIV_VV:
5245 case RISCV::VDIV_VX:
5246 case RISCV::VDIVU_VV:
5247 case RISCV::VDIVU_VX:
5248 case RISCV::VREM_VV:
5249 case RISCV::VREM_VX:
5250 case RISCV::VREMU_VV:
5251 case RISCV::VREMU_VX:
5253 case RISCV::VFDIV_VV:
5254 case RISCV::VFDIV_VF:
5255 case RISCV::VFRDIV_VF:
5256 case RISCV::VFSQRT_V:
5257 case RISCV::VFRSQRT7_V:
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
SmallVector< int16_t, MAX_SRC_OPERANDS_NUM > OperandIndices
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
Register const TargetRegisterInfo * TRI
Promote Memory to Register
This file provides utility analysis objects describing memory locations.
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_ALT(OP)
#define CASE_FP_WIDEOP_OPCODE_LMULS(OP)
#define CASE_OPERAND_SIMM(NUM)
static std::optional< unsigned > getLMULForRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static bool analyzeCandidate(outliner::Candidate &C)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
std::optional< unsigned > getFoldedOpcode(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, const RISCVSubtarget &ST)
#define RVV_OPC_LMUL_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
#define CASE_FP_WIDEOP_OPCODE_LMULS_ALT(OP)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
static std::optional< int64_t > getEffectiveImm(const MachineOperand &MO)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
static unsigned getLoadPredicatedOpcode(unsigned Opcode)
static unsigned getSHXADDUWShiftAmount(unsigned Opc)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVVType::VLMUL LMul)
static bool isFMUL(unsigned Opc)
static unsigned getInverseXqcicmOpcode(unsigned Opcode)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isFADD(unsigned Opc)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg)
static bool isLoadImm(const MachineInstr *MI, int64_t &Imm)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, MCRegister RegNo)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII, const RISCVSubtarget &STI)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc=0)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
bool empty() const
empty - Check if the array is empty.
static LLVM_ABI DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
Attempts to merge LocA and LocB into a single location; see DebugLoc::getMergedLocation for more deta...
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
LLVM_ABI void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static LocationSize precise(uint64_t Value)
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
MachineInstrBundleIterator< MachineInstr > iterator
MachineInstrBundleIterator< const MachineInstr, true > const_reverse_iterator
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
LLVM_ABI unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
LLVM_ABI bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
LLVM_ABI bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
LLVM_ABI const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstrIdxForVirtReg) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
static bool isPairableLdStInstOpc(unsigned Opc)
Return true if pairing the given load or store may be paired with another.
RISCVInstrInfo(const RISCVSubtarget &STI)
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
static bool isLdStSafeToPair(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
bool isReMaterializableImpl(const MachineInstr &MI) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool simplifyInstruction(MachineInstr &MI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
static RISCVCC::CondCode getCondFromBranchOpc(unsigned Opc)
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool isHighLatencyDef(int Opc) const override
static bool evaluateCondBranch(RISCVCC::CondCode CC, int64_t C0, int64_t C1)
Return the result of the evaluation of C0 CC C1, where CC is a RISCVCC::CondCode.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
static bool isFromLoadImm(const MachineRegisterInfo &MRI, const MachineOperand &Op, int64_t &Imm)
Return true if the operand is a load immediate instruction and sets Imm to the immediate value.
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
const RISCVRegisterInfo * getRegisterInfo() const override
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< Register, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetInstrInfo * getInstrInfo() const
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Target - Wrapper for Target specific information.
static constexpr TypeSize getFixed(ScalarTy ExactSize)
static constexpr TypeSize getZero()
static constexpr TypeSize getScalable(ScalarTy MinimumSize)
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getInverseBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, unsigned SelectOpc=0)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static MCRegister getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
SmallVector< Inst, 8 > InstSeq
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
@ OPERAND_ATOMIC_ORDERING
static unsigned getNF(uint8_t TSFlags)
static RISCVVType::VLMUL getLMul(uint8_t TSFlags)
static bool isTailAgnostic(unsigned VType)
LLVM_ABI void printXSfmmVType(unsigned VType, raw_ostream &OS)
LLVM_ABI std::pair< unsigned, bool > decodeVLMUL(VLMUL VLMul)
static bool isValidSEW(unsigned SEW)
LLVM_ABI void printVType(unsigned VType, raw_ostream &OS)
static bool isValidXSfmmVType(unsigned VTypeI)
static unsigned getSEW(unsigned VType)
static VLMUL getVLMUL(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
std::optional< unsigned > getVectorLowDemandedScalarBits(unsigned Opcode, unsigned Log2SEW)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
static constexpr unsigned RVVBitsPerBlock
bool isRVVSpill(const MachineInstr &MI)
static constexpr unsigned RVVBytesPerBlock
static constexpr int64_t VLMaxSentinel
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
constexpr bool isInt(int64_t x)
Checks if an integer fits into the given bit width.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
bool isValidAtomicOrdering(Int I)
static const MachineMemOperand::Flags MONontemporalBit0
unsigned getDeadRegState(bool B)
constexpr bool has_single_bit(T Value) noexcept
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
CodeGenOptLevel
Code generation optimization level.
int isShifted359(T Value, int &Shift)
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
constexpr bool isShiftedInt(int64_t x)
Checks if a signed integer is an N bit number shifted left by S.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
constexpr bool isShiftedUInt(uint64_t x)
Checks if a unsigned integer is an N bit number shifted left by S.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
static bool isRVVRegClass(const TargetRegisterClass *RC)
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.