39#define GEN_CHECK_COMPRESS_INSTR
40#include "RISCVGenCompressInstEmitter.inc"
42#define GET_INSTRINFO_CTOR_DTOR
43#define GET_INSTRINFO_NAMED_OPS
44#include "RISCVGenInstrInfo.inc"
48 cl::desc(
"Prefer whole register move for vector registers."));
51 "riscv-force-machine-combiner-strategy",
cl::Hidden,
52 cl::desc(
"Force machine combiner to use a specific strategy for machine "
53 "trace metrics evaluation."),
54 cl::init(MachineTraceStrategy::TS_NumStrategies),
57 clEnumValN(MachineTraceStrategy::TS_MinInstrCount,
"min-instr",
58 "MinInstrCount strategy.")));
64#define GET_RISCVVPseudosTable_IMPL
65#include "RISCVGenSearchableTables.inc"
71#define GET_RISCVMaskedPseudosTable_IMPL
72#include "RISCVGenSearchableTables.inc"
90 int &FrameIndex)
const {
97 unsigned &MemBytes)
const {
98 switch (
MI.getOpcode()) {
123 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
124 MI.getOperand(2).getImm() == 0) {
125 FrameIndex =
MI.getOperand(1).getIndex();
126 return MI.getOperand(0).getReg();
133 int &FrameIndex)
const {
140 unsigned &MemBytes)
const {
141 switch (
MI.getOpcode()) {
163 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
164 MI.getOperand(2).getImm() == 0) {
165 FrameIndex =
MI.getOperand(1).getIndex();
166 return MI.getOperand(0).getReg();
176 case RISCV::VFMV_V_F:
179 case RISCV::VFMV_S_F:
181 return MI.getOperand(1).isUndef();
189 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
200 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
201 "Unexpected COPY instruction.");
205 bool FoundDef =
false;
206 bool FirstVSetVLI =
false;
207 unsigned FirstSEW = 0;
210 if (
MBBI->isMetaInstruction())
213 if (
MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
214 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
215 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
225 unsigned FirstVType =
MBBI->getOperand(2).getImm();
230 if (FirstLMul != LMul)
235 if (
MBBI->getOperand(0).getReg() != RISCV::X0)
237 if (
MBBI->getOperand(1).isImm())
239 if (
MBBI->getOperand(1).getReg() != RISCV::X0)
245 unsigned VType =
MBBI->getOperand(2).getImm();
263 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
265 }
else if (
MBBI->getNumDefs()) {
268 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
274 if (!MO.isReg() || !MO.isDef())
276 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
291 if (MO.getReg() != SrcReg)
332 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
333 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
335 assert(!Fractional &&
"It is impossible be fractional lmul here.");
336 unsigned NumRegs = NF * LMulVal;
342 SrcEncoding += NumRegs - 1;
343 DstEncoding += NumRegs - 1;
349 unsigned,
unsigned> {
357 uint16_t Diff = DstEncoding - SrcEncoding;
358 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
359 DstEncoding % 8 == 7)
361 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
362 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
363 DstEncoding % 4 == 3)
365 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
366 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
367 DstEncoding % 2 == 1)
369 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
372 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
377 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
379 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
380 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
382 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
383 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
385 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
388 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
393 if (&RegClass == &RISCV::VRRegClass)
395 return TRI->getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
397 while (
I != NumRegs) {
402 auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
403 GetCopyInfo(SrcEncoding, DstEncoding);
407 if (LMul == LMulCopied &&
410 if (DefMBBI->getOpcode() == VIOpc)
416 MCRegister ActualSrcReg = FindRegWithEncoding(
417 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
418 MCRegister ActualDstReg = FindRegWithEncoding(
419 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
427 MIB = MIB.add(DefMBBI->getOperand(2));
435 MIB.addImm(Log2SEW ? Log2SEW : 3);
442 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
443 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
452 bool RenamableDest,
bool RenamableSrc)
const {
455 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
463 if (RISCV::GPRF16RegClass.
contains(DstReg, SrcReg)) {
470 if (RISCV::GPRF32RegClass.
contains(DstReg, SrcReg)) {
477 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
480 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
481 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even),
485 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
486 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd),
493 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
494 RISCV::GPRRegClass.
contains(DstReg)) {
496 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
501 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
503 if (
STI.hasStdExtZfh()) {
504 Opc = RISCV::FSGNJ_H;
507 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
508 "Unexpected extensions");
510 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
511 &RISCV::FPR32RegClass);
512 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
513 &RISCV::FPR32RegClass);
514 Opc = RISCV::FSGNJ_S;
522 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
529 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
536 if (RISCV::FPR32RegClass.
contains(DstReg) &&
537 RISCV::GPRRegClass.
contains(SrcReg)) {
543 if (RISCV::GPRRegClass.
contains(DstReg) &&
544 RISCV::FPR32RegClass.
contains(SrcReg)) {
550 if (RISCV::FPR64RegClass.
contains(DstReg) &&
551 RISCV::GPRRegClass.
contains(SrcReg)) {
558 if (RISCV::GPRRegClass.
contains(DstReg) &&
559 RISCV::FPR64RegClass.
contains(SrcReg)) {
568 &RISCV::VRRegClass, &RISCV::VRM2RegClass, &RISCV::VRM4RegClass,
569 &RISCV::VRM8RegClass, &RISCV::VRN2M1RegClass, &RISCV::VRN2M2RegClass,
570 &RISCV::VRN2M4RegClass, &RISCV::VRN3M1RegClass, &RISCV::VRN3M2RegClass,
571 &RISCV::VRN4M1RegClass, &RISCV::VRN4M2RegClass, &RISCV::VRN5M1RegClass,
572 &RISCV::VRN6M1RegClass, &RISCV::VRN7M1RegClass, &RISCV::VRN8M1RegClass};
573 for (
const auto &RegClass : RVVRegClasses) {
574 if (RegClass->contains(DstReg, SrcReg)) {
585 Register SrcReg,
bool IsKill,
int FI,
593 bool IsScalableVector =
true;
594 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
595 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
596 RISCV::SW : RISCV::SD;
597 IsScalableVector =
false;
598 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
599 Opcode = RISCV::SH_INX;
600 IsScalableVector =
false;
601 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
602 Opcode = RISCV::SW_INX;
603 IsScalableVector =
false;
604 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
605 Opcode = RISCV::PseudoRV32ZdinxSD;
606 IsScalableVector =
false;
607 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
609 IsScalableVector =
false;
610 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
612 IsScalableVector =
false;
613 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
615 IsScalableVector =
false;
616 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
617 Opcode = RISCV::VS1R_V;
618 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
619 Opcode = RISCV::VS2R_V;
620 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
621 Opcode = RISCV::VS4R_V;
622 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
623 Opcode = RISCV::VS8R_V;
624 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
625 Opcode = RISCV::PseudoVSPILL2_M1;
626 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
627 Opcode = RISCV::PseudoVSPILL2_M2;
628 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
629 Opcode = RISCV::PseudoVSPILL2_M4;
630 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
631 Opcode = RISCV::PseudoVSPILL3_M1;
632 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
633 Opcode = RISCV::PseudoVSPILL3_M2;
634 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
635 Opcode = RISCV::PseudoVSPILL4_M1;
636 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
637 Opcode = RISCV::PseudoVSPILL4_M2;
638 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
639 Opcode = RISCV::PseudoVSPILL5_M1;
640 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
641 Opcode = RISCV::PseudoVSPILL6_M1;
642 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
643 Opcode = RISCV::PseudoVSPILL7_M1;
644 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
645 Opcode = RISCV::PseudoVSPILL8_M1;
649 if (IsScalableVector) {
682 bool IsScalableVector =
true;
683 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
684 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
685 RISCV::LW : RISCV::LD;
686 IsScalableVector =
false;
687 }
else if (RISCV::GPRF16RegClass.hasSubClassEq(RC)) {
688 Opcode = RISCV::LH_INX;
689 IsScalableVector =
false;
690 }
else if (RISCV::GPRF32RegClass.hasSubClassEq(RC)) {
691 Opcode = RISCV::LW_INX;
692 IsScalableVector =
false;
693 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
694 Opcode = RISCV::PseudoRV32ZdinxLD;
695 IsScalableVector =
false;
696 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
698 IsScalableVector =
false;
699 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
701 IsScalableVector =
false;
702 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
704 IsScalableVector =
false;
705 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
706 Opcode = RISCV::VL1RE8_V;
707 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
708 Opcode = RISCV::VL2RE8_V;
709 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
710 Opcode = RISCV::VL4RE8_V;
711 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
712 Opcode = RISCV::VL8RE8_V;
713 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
714 Opcode = RISCV::PseudoVRELOAD2_M1;
715 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
716 Opcode = RISCV::PseudoVRELOAD2_M2;
717 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
718 Opcode = RISCV::PseudoVRELOAD2_M4;
719 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
720 Opcode = RISCV::PseudoVRELOAD3_M1;
721 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
722 Opcode = RISCV::PseudoVRELOAD3_M2;
723 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
724 Opcode = RISCV::PseudoVRELOAD4_M1;
725 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
726 Opcode = RISCV::PseudoVRELOAD4_M2;
727 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
728 Opcode = RISCV::PseudoVRELOAD5_M1;
729 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
730 Opcode = RISCV::PseudoVRELOAD6_M1;
731 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
732 Opcode = RISCV::PseudoVRELOAD7_M1;
733 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
734 Opcode = RISCV::PseudoVRELOAD8_M1;
738 if (IsScalableVector) {
770 if (Ops.
size() != 1 || Ops[0] != 1)
774 switch (
MI.getOpcode()) {
781 LoadOpc = RISCV::LWU;
785 LoadOpc = RISCV::LBU;
816 LoadOpc = RISCV::FLH;
819 LoadOpc = RISCV::FLW;
822 LoadOpc = RISCV::FLD;
836 case RISCV::ZEXT_H_RV32:
837 case RISCV::ZEXT_H_RV64:
838 LoadOpc = RISCV::LHU;
843 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(LoadOpc),
853 bool DstIsDead)
const {
859 if (!isUInt<32>(Val))
863 Val = SignExtend64<32>(Val);
869 bool SrcRenamable =
false;
873 bool LastItem = ++Num == Seq.
size();
878 switch (Inst.getOpndKind()) {
888 .
addReg(SrcReg, SrcRegState)
895 .
addReg(SrcReg, SrcRegState)
896 .
addReg(SrcReg, SrcRegState)
902 .
addReg(SrcReg, SrcRegState)
910 SrcRenamable = DstRenamable;
918 case RISCV::CV_BEQIMM:
920 case RISCV::CV_BNEIMM:
944 "Unknown conditional branch");
957 return Imm ? RISCV::CV_BEQIMM : RISCV::BEQ;
959 return Imm ? RISCV::CV_BNEIMM : RISCV::BNE;
999 bool AllowModify)
const {
1000 TBB = FBB =
nullptr;
1005 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
1011 int NumTerminators = 0;
1012 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
1015 if (J->getDesc().isUnconditionalBranch() ||
1016 J->getDesc().isIndirectBranch()) {
1023 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
1024 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
1025 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
1028 I = FirstUncondOrIndirectBr;
1032 if (
I->getDesc().isIndirectBranch())
1036 if (
I->isPreISelOpcode())
1040 if (NumTerminators > 2)
1044 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1050 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1056 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1057 I->getDesc().isUnconditionalBranch()) {
1068 int *BytesRemoved)
const {
1075 if (!
I->getDesc().isUnconditionalBranch() &&
1076 !
I->getDesc().isConditionalBranch())
1082 I->eraseFromParent();
1089 if (!
I->getDesc().isConditionalBranch())
1095 I->eraseFromParent();
1108 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1110 "RISC-V branch conditions have two components!");
1145 assert(RS &&
"RegScavenger required for long branching");
1147 "new block should be inserted for expanding unconditional branch");
1150 "restore block should be inserted for restoring clobbered registers");
1157 if (!isInt<32>(BrOffset))
1159 "Branch offsets outside of the signed 32-bit range not supported");
1164 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1176 if (TmpGPR != RISCV::NoRegister)
1182 TmpGPR = RISCV::X27;
1185 if (FrameIndex == -1)
1190 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1193 MI.getOperand(1).setMBB(&RestoreBB);
1197 TRI->eliminateFrameIndex(RestoreBB.
back(),
1201 MRI.replaceRegWith(ScratchReg, TmpGPR);
1202 MRI.clearVirtRegs();
1207 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1247 auto isLoadImm = [](
const MachineInstr *
MI, int64_t &Imm) ->
bool {
1248 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1249 MI->getOperand(1).getReg() == RISCV::X0) {
1250 Imm =
MI->getOperand(2).getImm();
1260 return Reg.isVirtual() && isLoadImm(
MRI.getVRegDef(Reg), Imm);
1267 auto searchConst = [&](int64_t C1) ->
Register {
1269 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1271 return isLoadImm(&
I, Imm) && Imm == C1 &&
1272 I.getOperand(0).getReg().isVirtual();
1275 return DefC1->getOperand(0).getReg();
1280 bool Modify =
false;
1282 if (isFromLoadImm(
LHS, C0) &&
MRI.hasOneUse(
LHS.getReg())) {
1287 if (
Register RegZ = searchConst(C0 + 1)) {
1293 MRI.clearKillFlags(RegZ);
1296 }
else if (isFromLoadImm(
RHS, C0) &&
MRI.hasOneUse(
RHS.getReg())) {
1301 if (
Register RegZ = searchConst(C0 - 1)) {
1307 MRI.clearKillFlags(RegZ);
1321 MI.eraseFromParent();
1328 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1330 int NumOp =
MI.getNumExplicitOperands();
1331 return MI.getOperand(NumOp - 1).getMBB();
1335 int64_t BrOffset)
const {
1349 case RISCV::CV_BEQIMM:
1350 case RISCV::CV_BNEIMM:
1351 return isIntN(13, BrOffset);
1353 case RISCV::PseudoBR:
1354 return isIntN(21, BrOffset);
1355 case RISCV::PseudoJump:
1365 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1366 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1367 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1368 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1369 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1370 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1371 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1372 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1374 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1375 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1376 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1377 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1378 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1379 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1380 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1382 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1383 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1384 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1385 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1386 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1388 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1389 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1390 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1391 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1393 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
break;
1394 case RISCV::ORN:
return RISCV::PseudoCCORN;
break;
1395 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
break;
1398 return RISCV::INSTRUCTION_LIST_END;
1406 if (!Reg.isVirtual())
1408 if (!
MRI.hasOneNonDBGUse(Reg))
1417 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1418 MI->getOperand(1).getReg() == RISCV::X0)
1423 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1433 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1436 bool DontMoveAcrossStores =
true;
1437 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1444 unsigned &TrueOp,
unsigned &FalseOp,
1445 bool &Optimizable)
const {
1446 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1447 "Unknown select instruction");
1457 Cond.push_back(
MI.getOperand(1));
1458 Cond.push_back(
MI.getOperand(2));
1459 Cond.push_back(
MI.getOperand(3));
1461 Optimizable =
STI.hasShortForwardBranchOpt();
1468 bool PreferFalse)
const {
1469 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1470 "Unknown select instruction");
1471 if (!
STI.hasShortForwardBranchOpt())
1477 bool Invert = !
DefMI;
1485 Register DestReg =
MI.getOperand(0).getReg();
1487 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1491 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1498 NewMI.
add(
MI.getOperand(1));
1499 NewMI.
add(
MI.getOperand(2));
1508 NewMI.
add(FalseReg);
1532 if (
MI.isMetaInstruction())
1535 unsigned Opcode =
MI.getOpcode();
1537 if (Opcode == TargetOpcode::INLINEASM ||
1538 Opcode == TargetOpcode::INLINEASM_BR) {
1540 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1544 if (!
MI.memoperands_empty()) {
1548 if (isCompressibleInst(
MI,
STI))
1556 if (Opcode == TargetOpcode::BUNDLE)
1557 return getInstBundleLength(
MI);
1559 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1560 if (isCompressibleInst(
MI,
STI))
1565 case RISCV::PseudoMV_FPR16INX:
1566 case RISCV::PseudoMV_FPR32INX:
1569 case TargetOpcode::STACKMAP:
1572 case TargetOpcode::PATCHPOINT:
1575 case TargetOpcode::STATEPOINT: {
1579 return std::max(NumBytes, 8U);
1581 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
1582 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
1583 case TargetOpcode::PATCHABLE_TAIL_CALL: {
1586 if (Opcode == TargetOpcode::PATCHABLE_FUNCTION_ENTER &&
1587 F.hasFnAttribute(
"patchable-function-entry")) {
1589 if (
F.getFnAttribute(
"patchable-function-entry")
1591 .getAsInteger(10, Num))
1592 return get(Opcode).getSize();
1602 return get(Opcode).getSize();
1606unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1610 while (++
I != E &&
I->isInsideBundle()) {
1611 assert(!
I->isBundle() &&
"No nested bundle!");
1618 const unsigned Opcode =
MI.getOpcode();
1622 case RISCV::FSGNJ_D:
1623 case RISCV::FSGNJ_S:
1624 case RISCV::FSGNJ_H:
1625 case RISCV::FSGNJ_D_INX:
1626 case RISCV::FSGNJ_D_IN32X:
1627 case RISCV::FSGNJ_S_INX:
1628 case RISCV::FSGNJ_H_INX:
1630 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1631 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1635 return (
MI.getOperand(1).isReg() &&
1636 MI.getOperand(1).getReg() == RISCV::X0) ||
1637 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1639 return MI.isAsCheapAsAMove();
1642std::optional<DestSourcePair>
1646 switch (
MI.getOpcode()) {
1651 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1652 MI.getOperand(2).getImm() == 0)
1655 case RISCV::FSGNJ_D:
1656 case RISCV::FSGNJ_S:
1657 case RISCV::FSGNJ_H:
1658 case RISCV::FSGNJ_D_INX:
1659 case RISCV::FSGNJ_D_IN32X:
1660 case RISCV::FSGNJ_S_INX:
1661 case RISCV::FSGNJ_H_INX:
1663 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1664 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1668 return std::nullopt;
1676 const auto &SchedModel =
STI.getSchedModel();
1677 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1694 RISCV::OpName::frm) < 0;
1696 "New instructions require FRM whereas the old one does not have it");
1703 for (
auto *NewMI : InsInstrs) {
1706 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
1748bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
1749 bool Invert)
const {
1750#define OPCODE_LMUL_CASE(OPC) \
1751 case RISCV::OPC##_M1: \
1752 case RISCV::OPC##_M2: \
1753 case RISCV::OPC##_M4: \
1754 case RISCV::OPC##_M8: \
1755 case RISCV::OPC##_MF2: \
1756 case RISCV::OPC##_MF4: \
1757 case RISCV::OPC##_MF8
1759#define OPCODE_LMUL_MASK_CASE(OPC) \
1760 case RISCV::OPC##_M1_MASK: \
1761 case RISCV::OPC##_M2_MASK: \
1762 case RISCV::OPC##_M4_MASK: \
1763 case RISCV::OPC##_M8_MASK: \
1764 case RISCV::OPC##_MF2_MASK: \
1765 case RISCV::OPC##_MF4_MASK: \
1766 case RISCV::OPC##_MF8_MASK
1771 Opcode = *InvOpcode;
1788#undef OPCODE_LMUL_MASK_CASE
1789#undef OPCODE_LMUL_CASE
1792bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
1805 auto checkImmOperand = [&](
unsigned OpIdx) {
1809 auto checkRegOperand = [&](
unsigned OpIdx) {
1817 if (!checkRegOperand(1))
1832 bool SeenMI2 =
false;
1842 if (It->modifiesRegister(RISCV::V0,
TRI)) {
1843 Register SrcReg = It->getOperand(1).getReg();
1861 if (MI1VReg != SrcReg)
1870 assert(SeenMI2 &&
"Prev is expected to appear before Root");
1909bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
1910 bool &Commuted)
const {
1914 "Expect the present of passthrough operand.");
1920 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
1921 areRVVInstsReassociable(Inst, *MI2);
1925 return areRVVInstsReassociable(Inst, *MI1) &&
1926 (isVectorAssociativeAndCommutative(*MI1) ||
1927 isVectorAssociativeAndCommutative(*MI1,
true)) &&
1934 if (!isVectorAssociativeAndCommutative(Inst) &&
1935 !isVectorAssociativeAndCommutative(Inst,
true))
1947 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
1949 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
1961 for (
unsigned I = 0;
I < 5; ++
I)
1967 bool &Commuted)
const {
1968 if (isVectorAssociativeAndCommutative(Inst) ||
1969 isVectorAssociativeAndCommutative(Inst,
true))
1970 return hasReassociableVectorSibling(Inst, Commuted);
1976 unsigned OperandIdx = Commuted ? 2 : 1;
1980 int16_t InstFrmOpIdx =
1982 int16_t SiblingFrmOpIdx =
1985 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1990 bool Invert)
const {
1991 if (isVectorAssociativeAndCommutative(Inst, Invert))
1999 Opc = *InverseOpcode;
2044std::optional<unsigned>
2046#define RVV_OPC_LMUL_CASE(OPC, INV) \
2047 case RISCV::OPC##_M1: \
2048 return RISCV::INV##_M1; \
2049 case RISCV::OPC##_M2: \
2050 return RISCV::INV##_M2; \
2051 case RISCV::OPC##_M4: \
2052 return RISCV::INV##_M4; \
2053 case RISCV::OPC##_M8: \
2054 return RISCV::INV##_M8; \
2055 case RISCV::OPC##_MF2: \
2056 return RISCV::INV##_MF2; \
2057 case RISCV::OPC##_MF4: \
2058 return RISCV::INV##_MF4; \
2059 case RISCV::OPC##_MF8: \
2060 return RISCV::INV##_MF8
2062#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
2063 case RISCV::OPC##_M1_MASK: \
2064 return RISCV::INV##_M1_MASK; \
2065 case RISCV::OPC##_M2_MASK: \
2066 return RISCV::INV##_M2_MASK; \
2067 case RISCV::OPC##_M4_MASK: \
2068 return RISCV::INV##_M4_MASK; \
2069 case RISCV::OPC##_M8_MASK: \
2070 return RISCV::INV##_M8_MASK; \
2071 case RISCV::OPC##_MF2_MASK: \
2072 return RISCV::INV##_MF2_MASK; \
2073 case RISCV::OPC##_MF4_MASK: \
2074 return RISCV::INV##_MF4_MASK; \
2075 case RISCV::OPC##_MF8_MASK: \
2076 return RISCV::INV##_MF8_MASK
2080 return std::nullopt;
2082 return RISCV::FSUB_H;
2084 return RISCV::FSUB_S;
2086 return RISCV::FSUB_D;
2088 return RISCV::FADD_H;
2090 return RISCV::FADD_S;
2092 return RISCV::FADD_D;
2109#undef RVV_OPC_LMUL_MASK_CASE
2110#undef RVV_OPC_LMUL_CASE
2115 bool DoRegPressureReduce) {
2131 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2142 bool DoRegPressureReduce) {
2144 bool IsFAdd =
isFADD(Opc);
2145 if (!IsFAdd && !
isFSUB(Opc))
2149 DoRegPressureReduce)) {
2155 DoRegPressureReduce)) {
2165 bool DoRegPressureReduce) {
2173 unsigned CombineOpc) {
2180 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2183 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2194 unsigned OuterShiftAmt) {
2200 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2262 bool DoRegPressureReduce)
const {
2271 DoRegPressureReduce);
2279 return RISCV::FMADD_H;
2281 return RISCV::FMADD_S;
2283 return RISCV::FMADD_D;
2328 bool Mul1IsKill = Mul1.
isKill();
2329 bool Mul2IsKill = Mul2.
isKill();
2330 bool AddendIsKill = Addend.
isKill();
2339 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2364 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2371 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2374 switch (InnerShiftAmt - OuterShiftAmt) {
2378 InnerOpc = RISCV::ADD;
2381 InnerOpc = RISCV::SH1ADD;
2384 InnerOpc = RISCV::SH2ADD;
2387 InnerOpc = RISCV::SH3ADD;
2395 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2405 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2422 DelInstrs, InstrIdxForVirtReg);
2449 for (
const auto &[Index, Operand] :
enumerate(
Desc.operands())) {
2450 unsigned OpType = Operand.OperandType;
2455 ErrInfo =
"Expected a non-register operand.";
2459 int64_t Imm = MO.
getImm();
2466#define CASE_OPERAND_UIMM(NUM) \
2467 case RISCVOp::OPERAND_UIMM##NUM: \
2468 Ok = isUInt<NUM>(Imm); \
2482 Ok = isShiftedUInt<1, 1>(Imm);
2485 Ok = isShiftedUInt<4, 1>(Imm);
2488 Ok = isShiftedUInt<5, 1>(Imm);
2491 Ok = isShiftedUInt<5, 2>(Imm);
2494 Ok = isShiftedUInt<6, 2>(Imm);
2497 Ok = isShiftedUInt<5, 3>(Imm);
2500 Ok = isUInt<8>(Imm) && Imm >= 32;
2503 Ok = isShiftedUInt<6, 3>(Imm);
2506 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
2509 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
2518 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
2524 Ok = Imm != 0 && isInt<6>(Imm);
2527 Ok = isUInt<10>(Imm);
2530 Ok = isUInt<11>(Imm);
2533 Ok = isInt<12>(Imm);
2536 Ok = isShiftedInt<7, 5>(Imm);
2539 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2542 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2543 Ok = Ok && Imm != 0;
2546 Ok = (isUInt<5>(Imm) && Imm != 0) ||
2547 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2550 Ok = Imm >= 0 && Imm <= 10;
2553 Ok = Imm >= 0 && Imm <= 7;
2556 Ok = Imm >= 1 && Imm <= 10;
2559 Ok = Imm >= 2 && Imm <= 14;
2562 Ok = (Imm & 0xf) == 0;
2585 Ok = isUInt<2>(Imm);
2591 ErrInfo =
"Invalid immediate";
2601 if (!
Op.isImm() && !
Op.isReg()) {
2602 ErrInfo =
"Invalid operand type for VL operand";
2605 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
2607 auto *RC =
MRI.getRegClass(
Op.getReg());
2608 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2609 ErrInfo =
"Invalid register class for VL operand";
2614 ErrInfo =
"VL operand w/o SEW operand?";
2620 if (!
MI.getOperand(OpIdx).isImm()) {
2621 ErrInfo =
"SEW value expected to be an immediate";
2624 uint64_t Log2SEW =
MI.getOperand(OpIdx).getImm();
2626 ErrInfo =
"Unexpected SEW value";
2629 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2631 ErrInfo =
"Unexpected SEW value";
2637 if (!
MI.getOperand(OpIdx).isImm()) {
2638 ErrInfo =
"Policy operand expected to be an immediate";
2641 uint64_t Policy =
MI.getOperand(OpIdx).getImm();
2643 ErrInfo =
"Invalid Policy Value";
2647 ErrInfo =
"policy operand w/o VL operand?";
2655 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
2656 ErrInfo =
"policy operand w/o tied operand?";
2663 !
MI.readsRegister(RISCV::FRM,
nullptr)) {
2664 ErrInfo =
"dynamic rounding mode should read FRM";
2710 int64_t NewOffset = OldOffset + Disp;
2712 NewOffset = SignExtend64<32>(NewOffset);
2714 if (!isInt<12>(NewOffset))
2732 "Addressing mode not supported for folding");
2778 OffsetIsScalable =
false;
2794 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
2802 if (MO1->getAddrSpace() != MO2->getAddrSpace())
2805 auto Base1 = MO1->getValue();
2806 auto Base2 = MO2->getValue();
2807 if (!Base1 || !Base2)
2812 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
2815 return Base1 == Base2;
2821 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2822 unsigned NumBytes)
const {
2825 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
2830 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
2836 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
2842 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
2892 int64_t OffsetA = 0, OffsetB = 0;
2897 int LowOffset = std::min(OffsetA, OffsetB);
2898 int HighOffset = std::max(OffsetA, OffsetB);
2899 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2901 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
2908std::pair<unsigned, unsigned>
2911 return std::make_pair(TF & Mask, TF & ~Mask);
2916 using namespace RISCVII;
2917 static const std::pair<unsigned, const char *> TargetFlags[] = {
2918 {MO_CALL,
"riscv-call"},
2919 {MO_LO,
"riscv-lo"},
2920 {MO_HI,
"riscv-hi"},
2921 {MO_PCREL_LO,
"riscv-pcrel-lo"},
2922 {MO_PCREL_HI,
"riscv-pcrel-hi"},
2923 {MO_GOT_HI,
"riscv-got-hi"},
2924 {MO_TPREL_LO,
"riscv-tprel-lo"},
2925 {MO_TPREL_HI,
"riscv-tprel-hi"},
2926 {MO_TPREL_ADD,
"riscv-tprel-add"},
2927 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
2928 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
2929 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
2930 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
2931 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
2932 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
2940 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
2953 unsigned &Flags)
const {
2972 return F.getFnAttribute(
"fentry-call").getValueAsBool() ||
2973 F.hasFnAttribute(
"patchable-function-entry");
2978 return MI.readsRegister(RegNo,
TRI) ||
2979 MI.getDesc().hasImplicitUseOfPhysReg(RegNo);
2984 return MI.modifiesRegister(RegNo,
TRI) ||
2985 MI.getDesc().hasImplicitDefOfPhysReg(RegNo);
2998 unsigned TailExpandUseRegNo =
3009static std::optional<MachineOutlinerConstructionID>
3013 if (
C.back().isReturn()) {
3015 "The candidate who uses return instruction must be outlined "
3023 return isMIModifiesReg(MI, TRI, RISCV::X5);
3026 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
3029 if (!CandidateUsesX5(
C))
3032 return std::nullopt;
3035std::optional<std::unique_ptr<outliner::OutlinedFunction>>
3038 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
3039 unsigned MinRepeats)
const {
3045 RepeatedSequenceLocs.clear();
3048 if (RepeatedSequenceLocs.size() < MinRepeats)
3049 return std::nullopt;
3051 unsigned InstrSizeCExt =
3054 unsigned CallOverhead = 0, FrameOverhead = 0;
3062 FrameOverhead = InstrSizeCExt;
3066 CallOverhead = 4 + InstrSizeCExt;
3072 for (
auto &
C : RepeatedSequenceLocs)
3073 C.setCallInfo(MOCI, CallOverhead);
3075 unsigned SequenceSize = 0;
3076 for (
auto &
MI : Candidate)
3079 return std::make_unique<outliner::OutlinedFunction>(
3080 RepeatedSequenceLocs, SequenceSize, FrameOverhead, MOCI);
3086 unsigned Flags)
const {
3091 const auto &
F =
MI.getMF()->getFunction();
3094 if (
MI.isCFIInstruction())
3106 for (
const auto &MO :
MI.operands()) {
3111 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
3112 F.hasSection() ||
F.getSectionPrefix()))
3124 bool Changed =
true;
3129 for (;
I != E; ++
I) {
3130 if (
I->isCFIInstruction()) {
3131 I->removeFromParent();
3156 .addGlobalAddress(M.getNamedValue(MF.
getName()),
3164 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
3175 return std::nullopt;
3179 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
3180 MI.getOperand(2).isImm())
3181 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
3183 return std::nullopt;
3191 std::string GenericComment =
3193 if (!GenericComment.empty())
3194 return GenericComment;
3198 return std::string();
3201 if (OpIdx >=
Desc.getNumOperands())
3202 return std::string();
3204 std::string Comment;
3214 unsigned Imm =
Op.getImm();
3220 unsigned Log2SEW =
Op.getImm();
3221 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3227 unsigned Policy =
Op.getImm();
3229 "Invalid Policy Value");
3239#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3240 RISCV::Pseudo##OP##_##LMUL
3242#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3243 RISCV::Pseudo##OP##_##LMUL##_MASK
3245#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3246 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3247 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3249#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3250 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3251 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3252 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3253 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3254 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3255 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3257#define CASE_RVV_OPCODE_UNMASK(OP) \
3258 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3259 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3261#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3262 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3263 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3264 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3265 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3266 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3267 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3269#define CASE_RVV_OPCODE_MASK(OP) \
3270 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3271 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3273#define CASE_RVV_OPCODE_WIDEN(OP) \
3274 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3275 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3277#define CASE_RVV_OPCODE(OP) \
3278 CASE_RVV_OPCODE_UNMASK(OP): \
3279 case CASE_RVV_OPCODE_MASK(OP)
3283#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3284 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3286#define CASE_VMA_OPCODE_LMULS_M1(OP, TYPE) \
3287 CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3288 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3289 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3290 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3292#define CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE) \
3293 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3294 case CASE_VMA_OPCODE_LMULS_M1(OP, TYPE)
3296#define CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE) \
3297 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3298 case CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE)
3300#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3301 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3302 case CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE)
3305#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3306 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3308#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3309 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3310 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3311 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3312 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3314#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3315 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3316 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3318#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3319 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3320 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3322#define CASE_VFMA_OPCODE_VV(OP) \
3323 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3324 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3325 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3327#define CASE_VFMA_SPLATS(OP) \
3328 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3329 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3330 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3334 unsigned &SrcOpIdx1,
3335 unsigned &SrcOpIdx2)
const {
3337 if (!
Desc.isCommutable())
3340 switch (
MI.getOpcode()) {
3341 case RISCV::TH_MVEQZ:
3342 case RISCV::TH_MVNEZ:
3346 if (
MI.getOperand(2).getReg() == RISCV::X0)
3349 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3350 case RISCV::TH_MULA:
3351 case RISCV::TH_MULAW:
3352 case RISCV::TH_MULAH:
3353 case RISCV::TH_MULS:
3354 case RISCV::TH_MULSW:
3355 case RISCV::TH_MULSH:
3357 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3358 case RISCV::PseudoCCMOVGPRNoX0:
3359 case RISCV::PseudoCCMOVGPR:
3361 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3388 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3409 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3414 unsigned CommutableOpIdx1 = 1;
3415 unsigned CommutableOpIdx2 = 3;
3416 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3429 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3436 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3438 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3442 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3443 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3449 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3450 SrcOpIdx2 == CommuteAnyOperandIndex) {
3453 unsigned CommutableOpIdx1 = SrcOpIdx1;
3454 if (SrcOpIdx1 == SrcOpIdx2) {
3457 CommutableOpIdx1 = 1;
3458 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3460 CommutableOpIdx1 = SrcOpIdx2;
3465 unsigned CommutableOpIdx2;
3466 if (CommutableOpIdx1 != 1) {
3468 CommutableOpIdx2 = 1;
3470 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3475 if (Op1Reg !=
MI.getOperand(2).getReg())
3476 CommutableOpIdx2 = 2;
3478 CommutableOpIdx2 = 3;
3483 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3496#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3497 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3498 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3501#define CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
3502 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3503 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3504 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3505 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3507#define CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
3508 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3509 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
3511#define CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
3512 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3513 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
3515#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3516 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3517 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
3519#define CASE_VMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3520 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16) \
3521 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32) \
3522 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64)
3525#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3526 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3527 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3530#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3531 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3532 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3533 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3534 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3536#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3537 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3538 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3540#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3541 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3542 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3543 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3545#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3546 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3547 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3549#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE, SEW) \
3550 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8, SEW) \
3551 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW)
3553#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3554 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3555 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3556 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
3561 unsigned OpIdx2)
const {
3564 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
3568 switch (
MI.getOpcode()) {
3569 case RISCV::TH_MVEQZ:
3570 case RISCV::TH_MVNEZ: {
3571 auto &WorkingMI = cloneIfNew(
MI);
3572 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
3573 : RISCV::TH_MVEQZ));
3577 case RISCV::PseudoCCMOVGPRNoX0:
3578 case RISCV::PseudoCCMOVGPR: {
3582 auto &WorkingMI = cloneIfNew(
MI);
3583 WorkingMI.getOperand(3).setImm(
CC);
3607 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3608 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
3610 switch (
MI.getOpcode()) {
3633 auto &WorkingMI = cloneIfNew(
MI);
3634 WorkingMI.setDesc(
get(Opc));
3644 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3647 if (OpIdx1 == 3 || OpIdx2 == 3) {
3649 switch (
MI.getOpcode()) {
3660 auto &WorkingMI = cloneIfNew(
MI);
3661 WorkingMI.setDesc(
get(Opc));
3673#undef CASE_RVV_OPCODE_UNMASK_LMUL
3674#undef CASE_RVV_OPCODE_MASK_LMUL
3675#undef CASE_RVV_OPCODE_LMUL
3676#undef CASE_RVV_OPCODE_UNMASK_WIDEN
3677#undef CASE_RVV_OPCODE_UNMASK
3678#undef CASE_RVV_OPCODE_MASK_WIDEN
3679#undef CASE_RVV_OPCODE_MASK
3680#undef CASE_RVV_OPCODE_WIDEN
3681#undef CASE_RVV_OPCODE
3683#undef CASE_VMA_OPCODE_COMMON
3684#undef CASE_VMA_OPCODE_LMULS_M1
3685#undef CASE_VMA_OPCODE_LMULS_MF2
3686#undef CASE_VMA_OPCODE_LMULS_MF4
3687#undef CASE_VMA_OPCODE_LMULS
3688#undef CASE_VFMA_OPCODE_COMMON
3689#undef CASE_VFMA_OPCODE_LMULS_M1
3690#undef CASE_VFMA_OPCODE_LMULS_MF2
3691#undef CASE_VFMA_OPCODE_LMULS_MF4
3692#undef CASE_VFMA_OPCODE_VV
3693#undef CASE_VFMA_SPLATS
3696#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
3697 RISCV::PseudoV##OP##_##LMUL##_TIED
3699#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
3700 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
3701 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
3702 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
3703 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
3704 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
3706#define CASE_WIDEOP_OPCODE_LMULS(OP) \
3707 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
3708 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
3710#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
3711 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
3712 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
3715#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3716 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
3717 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
3718 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
3719 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
3720 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
3722#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3723 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
3724 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3727#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
3728 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
3730#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP) \
3731 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
3732 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
3733 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
3734 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
3735 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
3736 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
3737 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
3738 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
3739 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
3741#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
3742 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
3743 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
3746#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3747 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
3748 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
3749 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
3750 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
3751 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
3752 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
3753 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
3754 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
3755 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
3757#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3758 CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3765 switch (
MI.getOpcode()) {
3771 MI.getNumExplicitOperands() == 7 &&
3772 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
3779 switch (
MI.getOpcode()) {
3789 .
add(
MI.getOperand(0))
3791 .
add(
MI.getOperand(1))
3792 .
add(
MI.getOperand(2))
3793 .
add(
MI.getOperand(3))
3794 .
add(
MI.getOperand(4))
3795 .
add(
MI.getOperand(5))
3796 .
add(
MI.getOperand(6));
3805 MI.getNumExplicitOperands() == 6);
3806 if ((
MI.getOperand(5).getImm() & 1) == 0)
3811 switch (
MI.getOpcode()) {
3823 .
add(
MI.getOperand(0))
3825 .
add(
MI.getOperand(1))
3826 .
add(
MI.getOperand(2))
3827 .
add(
MI.getOperand(3))
3828 .
add(
MI.getOperand(4))
3829 .
add(
MI.getOperand(5));
3836 unsigned NumOps =
MI.getNumOperands();
3837 for (
unsigned I = 1;
I < NumOps; ++
I) {
3839 if (
Op.isReg() &&
Op.isKill())
3847 if (
MI.getOperand(0).isEarlyClobber()) {
3853 if (S->
end ==
Idx.getRegSlot(
true))
3854 S->
end =
Idx.getRegSlot();
3861#undef CASE_WIDEOP_OPCODE_COMMON
3862#undef CASE_WIDEOP_OPCODE_LMULS_MF4
3863#undef CASE_WIDEOP_OPCODE_LMULS
3864#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
3865#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3866#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
3867#undef CASE_FP_WIDEOP_OPCODE_COMMON
3868#undef CASE_FP_WIDEOP_OPCODE_LMULS_MF4
3869#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
3870#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3871#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
3878 if (llvm::has_single_bit<uint32_t>(Amount)) {
3880 if (ShiftAmount == 0)
3886 }
else if (
STI.hasStdExtZba() &&
3893 if (Amount % 9 == 0) {
3894 Opc = RISCV::SH3ADD;
3895 ShiftAmount =
Log2_64(Amount / 9);
3896 }
else if (Amount % 5 == 0) {
3897 Opc = RISCV::SH2ADD;
3898 ShiftAmount =
Log2_64(Amount / 5);
3899 }
else if (Amount % 3 == 0) {
3900 Opc = RISCV::SH1ADD;
3901 ShiftAmount =
Log2_64(Amount / 3);
3914 }
else if (llvm::has_single_bit<uint32_t>(Amount - 1)) {
3915 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3925 }
else if (llvm::has_single_bit<uint32_t>(Amount + 1)) {
3926 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3936 }
else if (
STI.hasStdExtZmmul()) {
3937 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3946 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
3947 if (Amount & (1U << ShiftAmount)) {
3951 .
addImm(ShiftAmount - PrevShiftAmount)
3953 if (Amount >> (ShiftAmount + 1)) {
3956 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3967 PrevShiftAmount = ShiftAmount;
3970 assert(Acc &&
"Expected valid accumulator");
3980 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
3994 return MI.getOpcode() == RISCV::ADDIW &&
MI.getOperand(1).isReg() &&
3995 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0;
4000 return MI.getOpcode() == RISCV::ADD_UW &&
MI.getOperand(1).isReg() &&
4001 MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0;
4006 return MI.getOpcode() == RISCV::ANDI &&
MI.getOperand(1).isReg() &&
4007 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 255;
4018 case RISCV::VL1RE8_V:
4019 case RISCV::VL2RE8_V:
4020 case RISCV::VL4RE8_V:
4021 case RISCV::VL8RE8_V:
4022 case RISCV::VL1RE16_V:
4023 case RISCV::VL2RE16_V:
4024 case RISCV::VL4RE16_V:
4025 case RISCV::VL8RE16_V:
4026 case RISCV::VL1RE32_V:
4027 case RISCV::VL2RE32_V:
4028 case RISCV::VL4RE32_V:
4029 case RISCV::VL8RE32_V:
4030 case RISCV::VL1RE64_V:
4031 case RISCV::VL2RE64_V:
4032 case RISCV::VL4RE64_V:
4033 case RISCV::VL8RE64_V:
4041 unsigned Opcode =
MI.getOpcode();
4042 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
4048std::optional<std::pair<unsigned, unsigned>>
4052 return std::nullopt;
4053 case RISCV::PseudoVSPILL2_M1:
4054 case RISCV::PseudoVRELOAD2_M1:
4055 return std::make_pair(2u, 1u);
4056 case RISCV::PseudoVSPILL2_M2:
4057 case RISCV::PseudoVRELOAD2_M2:
4058 return std::make_pair(2u, 2u);
4059 case RISCV::PseudoVSPILL2_M4:
4060 case RISCV::PseudoVRELOAD2_M4:
4061 return std::make_pair(2u, 4u);
4062 case RISCV::PseudoVSPILL3_M1:
4063 case RISCV::PseudoVRELOAD3_M1:
4064 return std::make_pair(3u, 1u);
4065 case RISCV::PseudoVSPILL3_M2:
4066 case RISCV::PseudoVRELOAD3_M2:
4067 return std::make_pair(3u, 2u);
4068 case RISCV::PseudoVSPILL4_M1:
4069 case RISCV::PseudoVRELOAD4_M1:
4070 return std::make_pair(4u, 1u);
4071 case RISCV::PseudoVSPILL4_M2:
4072 case RISCV::PseudoVRELOAD4_M2:
4073 return std::make_pair(4u, 2u);
4074 case RISCV::PseudoVSPILL5_M1:
4075 case RISCV::PseudoVRELOAD5_M1:
4076 return std::make_pair(5u, 1u);
4077 case RISCV::PseudoVSPILL6_M1:
4078 case RISCV::PseudoVRELOAD6_M1:
4079 return std::make_pair(6u, 1u);
4080 case RISCV::PseudoVSPILL7_M1:
4081 case RISCV::PseudoVRELOAD7_M1:
4082 return std::make_pair(7u, 1u);
4083 case RISCV::PseudoVSPILL8_M1:
4084 case RISCV::PseudoVRELOAD8_M1:
4085 return std::make_pair(8u, 1u);
4090 return MI.getNumExplicitDefs() == 2 &&
4091 MI.modifiesRegister(RISCV::VL,
nullptr) && !
MI.isInlineAsm();
4095 int16_t MI1FrmOpIdx =
4097 int16_t MI2FrmOpIdx =
4099 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
4106std::optional<unsigned>
4111 return std::nullopt;
4114 case RISCV::VSLL_VX:
4115 case RISCV::VSRL_VX:
4116 case RISCV::VSRA_VX:
4118 case RISCV::VSSRL_VX:
4119 case RISCV::VSSRA_VX:
4124 case RISCV::VNSRL_WX:
4125 case RISCV::VNSRA_WX:
4127 case RISCV::VNCLIPU_WX:
4128 case RISCV::VNCLIP_WX:
4133 case RISCV::VADD_VX:
4134 case RISCV::VSUB_VX:
4135 case RISCV::VRSUB_VX:
4137 case RISCV::VWADDU_VX:
4138 case RISCV::VWSUBU_VX:
4139 case RISCV::VWADD_VX:
4140 case RISCV::VWSUB_VX:
4141 case RISCV::VWADDU_WX:
4142 case RISCV::VWSUBU_WX:
4143 case RISCV::VWADD_WX:
4144 case RISCV::VWSUB_WX:
4146 case RISCV::VADC_VXM:
4147 case RISCV::VADC_VIM:
4148 case RISCV::VMADC_VXM:
4149 case RISCV::VMADC_VIM:
4150 case RISCV::VMADC_VX:
4151 case RISCV::VSBC_VXM:
4152 case RISCV::VMSBC_VXM:
4153 case RISCV::VMSBC_VX:
4155 case RISCV::VAND_VX:
4157 case RISCV::VXOR_VX:
4159 case RISCV::VMSEQ_VX:
4160 case RISCV::VMSNE_VX:
4161 case RISCV::VMSLTU_VX:
4162 case RISCV::VMSLT_VX:
4163 case RISCV::VMSLEU_VX:
4164 case RISCV::VMSLE_VX:
4165 case RISCV::VMSGTU_VX:
4166 case RISCV::VMSGT_VX:
4168 case RISCV::VMINU_VX:
4169 case RISCV::VMIN_VX:
4170 case RISCV::VMAXU_VX:
4171 case RISCV::VMAX_VX:
4173 case RISCV::VMUL_VX:
4174 case RISCV::VMULH_VX:
4175 case RISCV::VMULHU_VX:
4176 case RISCV::VMULHSU_VX:
4178 case RISCV::VDIVU_VX:
4179 case RISCV::VDIV_VX:
4180 case RISCV::VREMU_VX:
4181 case RISCV::VREM_VX:
4183 case RISCV::VWMUL_VX:
4184 case RISCV::VWMULU_VX:
4185 case RISCV::VWMULSU_VX:
4187 case RISCV::VMACC_VX:
4188 case RISCV::VNMSAC_VX:
4189 case RISCV::VMADD_VX:
4190 case RISCV::VNMSUB_VX:
4192 case RISCV::VWMACCU_VX:
4193 case RISCV::VWMACC_VX:
4194 case RISCV::VWMACCSU_VX:
4195 case RISCV::VWMACCUS_VX:
4197 case RISCV::VMERGE_VXM:
4199 case RISCV::VMV_V_X:
4201 case RISCV::VSADDU_VX:
4202 case RISCV::VSADD_VX:
4203 case RISCV::VSSUBU_VX:
4204 case RISCV::VSSUB_VX:
4206 case RISCV::VAADDU_VX:
4207 case RISCV::VAADD_VX:
4208 case RISCV::VASUBU_VX:
4209 case RISCV::VASUB_VX:
4211 case RISCV::VSMUL_VX:
4213 case RISCV::VMV_S_X:
4214 return 1U << Log2SEW;
4220 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
4223 return RVV->BaseInstr;
4233 unsigned Scaled = Log2SEW + (DestEEW - 1);
4240 if (
LHS.isReg() &&
RHS.isReg() &&
LHS.getReg().isVirtual() &&
4241 LHS.getReg() ==
RHS.getReg())
4247 if (!
LHS.isImm() || !
RHS.isImm())
4249 return LHS.getImm() <=
RHS.getImm();
4263 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
4273 std::optional<bool> createTripCountGreaterCondition(
4285 void adjustTripCount(
int TripCountAdjust)
override {}
4287 void disposed()
override {}
4291std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
4299 if (
TBB == LoopBB && FBB == LoopBB)
4306 assert((
TBB == LoopBB || FBB == LoopBB) &&
4307 "The Loop must be a single-basic-block loop");
4318 if (!Reg.isVirtual())
4320 return MRI.getVRegDef(Reg);
4330 return std::make_unique<RISCVPipelinerLoopInfo>(
LHS,
RHS,
Cond);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc, unsigned ZeroReg=0, bool CheckZeroReg=false)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
static bool cannotInsertTailCall(const MachineBasicBlock &MBB)
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
#define RVV_OPC_LMUL_CASE(OPC, INV)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static std::optional< MachineOutlinerConstructionID > analyzeCandidate(outliner::Candidate &C)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isFMUL(unsigned Opc)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isCandidatePatchable(const MachineBasicBlock &MBB)
static bool isMIReadsReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, unsigned RegNo)
static bool isFADD(unsigned Opc)
#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
static bool isMIModifiesReg(const MachineInstr &MI, const TargetRegisterInfo *TRI, unsigned RegNo)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallVector class.
static unsigned getSize(unsigned Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
This holds information about one operand of a machine instruction, indicating the register class for ...
uint8_t OperandType
Information about the type of the operand.
Wrapper class representing physical registers. Should be passed by value.
const FeatureBitset & getFeatureBits() const
unsigned pred_size() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isReturn(QueryType Type=AnyInBundle) const
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC, bool Imm=false) const
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
bool hasStdExtCOrZca() const
unsigned getTailDupAggressiveThreshold() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, bool Imm=false)
static bool isValidRoundingMode(unsigned Mode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static int getFRMOpNum(const MCInstrDesc &Desc)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool usesVXRM(uint64_t TSFlags)
static unsigned getTailExpandUseRegNo(const FeatureBitset &FeatureBits)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static RISCVII::VLMUL getLMul(uint64_t TSFlags)
static unsigned getNF(uint64_t TSFlags)
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
bool isVLKnownLE(const MachineOperand &LHS, const MachineOperand &RHS)
Given two VL operands, do we know that LHS <= RHS?
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
unsigned getDestLog2EEW(const MCInstrDesc &Desc, unsigned Log2SEW)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
static constexpr int64_t VLMaxSentinel
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
static const MachineMemOperand::Flags MONontemporalBit0
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
CodeGenOptLevel
Code generation optimization level.
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Description of the encoding of one expression Op.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
MachineFunction * getMF() const
The information necessary to create an outlined function for some class of candidate.
unsigned FrameConstructionID
Target-defined identifier for constructing a frame for this function.