40#define GEN_CHECK_COMPRESS_INSTR
41#include "RISCVGenCompressInstEmitter.inc"
43#define GET_INSTRINFO_CTOR_DTOR
44#define GET_INSTRINFO_NAMED_OPS
45#include "RISCVGenInstrInfo.inc"
49 cl::desc(
"Prefer whole register move for vector registers."));
52 "riscv-force-machine-combiner-strategy",
cl::Hidden,
53 cl::desc(
"Force machine combiner to use a specific strategy for machine "
54 "trace metrics evaluation."),
55 cl::init(MachineTraceStrategy::TS_NumStrategies),
58 clEnumValN(MachineTraceStrategy::TS_MinInstrCount,
"min-instr",
59 "MinInstrCount strategy.")));
65#define GET_RISCVVPseudosTable_IMPL
66#include "RISCVGenSearchableTables.inc"
72#define GET_RISCVMaskedPseudosTable_IMPL
73#include "RISCVGenSearchableTables.inc"
91 int &FrameIndex)
const {
98 unsigned &MemBytes)
const {
99 switch (
MI.getOpcode()) {
122 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
123 MI.getOperand(2).getImm() == 0) {
124 FrameIndex =
MI.getOperand(1).getIndex();
125 return MI.getOperand(0).getReg();
132 int &FrameIndex)
const {
139 unsigned &MemBytes)
const {
140 switch (
MI.getOpcode()) {
160 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
161 MI.getOperand(2).getImm() == 0) {
162 FrameIndex =
MI.getOperand(1).getIndex();
163 return MI.getOperand(0).getReg();
172 MI.getOperand(1).isUndef() &&
176 !
MI.hasRegisterImplicitUseOperand(RISCV::VTYPE))
183 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
194 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
195 "Unexpected COPY instruction.");
199 bool FoundDef =
false;
200 bool FirstVSetVLI =
false;
201 unsigned FirstSEW = 0;
204 if (
MBBI->isMetaInstruction())
207 if (
MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
208 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
209 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
219 unsigned FirstVType =
MBBI->getOperand(2).getImm();
224 if (FirstLMul != LMul)
229 if (
MBBI->getOperand(0).getReg() != RISCV::X0)
231 if (
MBBI->getOperand(1).isImm())
233 if (
MBBI->getOperand(1).getReg() != RISCV::X0)
239 unsigned VType =
MBBI->getOperand(2).getImm();
257 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
259 }
else if (
MBBI->getNumDefs()) {
262 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
268 if (!MO.isReg() || !MO.isDef())
270 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
285 if (MO.getReg() != SrcReg)
326 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
327 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
329 assert(!Fractional &&
"It is impossible be fractional lmul here.");
330 unsigned NumRegs = NF * LMulVal;
336 SrcEncoding += NumRegs - 1;
337 DstEncoding += NumRegs - 1;
343 unsigned,
unsigned> {
351 uint16_t Diff = DstEncoding - SrcEncoding;
352 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
353 DstEncoding % 8 == 7)
355 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
356 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
357 DstEncoding % 4 == 3)
359 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
360 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
361 DstEncoding % 2 == 1)
363 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
366 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
371 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
373 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
374 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
376 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
377 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
379 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
382 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
387 if (&RegClass == &RISCV::VRRegClass)
389 return TRI->getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
391 while (
I != NumRegs) {
396 auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
397 GetCopyInfo(SrcEncoding, DstEncoding);
401 if (LMul == LMulCopied &&
404 if (DefMBBI->getOpcode() == VIOpc)
410 MCRegister ActualSrcReg = FindRegWithEncoding(
411 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
412 MCRegister ActualDstReg = FindRegWithEncoding(
413 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
421 MIB = MIB.add(DefMBBI->getOperand(2));
434 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
435 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
446 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
453 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
456 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
457 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even),
461 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
462 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd),
469 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
470 RISCV::GPRRegClass.
contains(DstReg)) {
472 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
477 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
479 if (
STI.hasStdExtZfh()) {
480 Opc = RISCV::FSGNJ_H;
483 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
484 "Unexpected extensions");
486 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
487 &RISCV::FPR32RegClass);
488 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
489 &RISCV::FPR32RegClass);
490 Opc = RISCV::FSGNJ_S;
498 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
505 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
512 if (RISCV::FPR32RegClass.
contains(DstReg) &&
513 RISCV::GPRRegClass.
contains(SrcReg)) {
519 if (RISCV::GPRRegClass.
contains(DstReg) &&
520 RISCV::FPR32RegClass.
contains(SrcReg)) {
526 if (RISCV::FPR64RegClass.
contains(DstReg) &&
527 RISCV::GPRRegClass.
contains(SrcReg)) {
534 if (RISCV::GPRRegClass.
contains(DstReg) &&
535 RISCV::FPR64RegClass.
contains(SrcReg)) {
544 &RISCV::VRRegClass, &RISCV::VRM2RegClass, &RISCV::VRM4RegClass,
545 &RISCV::VRM8RegClass, &RISCV::VRN2M1RegClass, &RISCV::VRN2M2RegClass,
546 &RISCV::VRN2M4RegClass, &RISCV::VRN3M1RegClass, &RISCV::VRN3M2RegClass,
547 &RISCV::VRN4M1RegClass, &RISCV::VRN4M2RegClass, &RISCV::VRN5M1RegClass,
548 &RISCV::VRN6M1RegClass, &RISCV::VRN7M1RegClass, &RISCV::VRN8M1RegClass};
549 for (
const auto &RegClass : RVVRegClasses) {
550 if (RegClass->contains(DstReg, SrcReg)) {
561 Register SrcReg,
bool IsKill,
int FI,
569 bool IsScalableVector =
true;
570 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
571 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
572 RISCV::SW : RISCV::SD;
573 IsScalableVector =
false;
574 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
575 Opcode = RISCV::PseudoRV32ZdinxSD;
576 IsScalableVector =
false;
577 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
579 IsScalableVector =
false;
580 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
582 IsScalableVector =
false;
583 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
585 IsScalableVector =
false;
586 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
587 Opcode = RISCV::VS1R_V;
588 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
589 Opcode = RISCV::VS2R_V;
590 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
591 Opcode = RISCV::VS4R_V;
592 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
593 Opcode = RISCV::VS8R_V;
594 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
595 Opcode = RISCV::PseudoVSPILL2_M1;
596 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
597 Opcode = RISCV::PseudoVSPILL2_M2;
598 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
599 Opcode = RISCV::PseudoVSPILL2_M4;
600 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
601 Opcode = RISCV::PseudoVSPILL3_M1;
602 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
603 Opcode = RISCV::PseudoVSPILL3_M2;
604 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
605 Opcode = RISCV::PseudoVSPILL4_M1;
606 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
607 Opcode = RISCV::PseudoVSPILL4_M2;
608 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
609 Opcode = RISCV::PseudoVSPILL5_M1;
610 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
611 Opcode = RISCV::PseudoVSPILL6_M1;
612 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
613 Opcode = RISCV::PseudoVSPILL7_M1;
614 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
615 Opcode = RISCV::PseudoVSPILL8_M1;
619 if (IsScalableVector) {
652 bool IsScalableVector =
true;
653 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
654 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
655 RISCV::LW : RISCV::LD;
656 IsScalableVector =
false;
657 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
658 Opcode = RISCV::PseudoRV32ZdinxLD;
659 IsScalableVector =
false;
660 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
662 IsScalableVector =
false;
663 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
665 IsScalableVector =
false;
666 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
668 IsScalableVector =
false;
669 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
670 Opcode = RISCV::VL1RE8_V;
671 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
672 Opcode = RISCV::VL2RE8_V;
673 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
674 Opcode = RISCV::VL4RE8_V;
675 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
676 Opcode = RISCV::VL8RE8_V;
677 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
678 Opcode = RISCV::PseudoVRELOAD2_M1;
679 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
680 Opcode = RISCV::PseudoVRELOAD2_M2;
681 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
682 Opcode = RISCV::PseudoVRELOAD2_M4;
683 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
684 Opcode = RISCV::PseudoVRELOAD3_M1;
685 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
686 Opcode = RISCV::PseudoVRELOAD3_M2;
687 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
688 Opcode = RISCV::PseudoVRELOAD4_M1;
689 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
690 Opcode = RISCV::PseudoVRELOAD4_M2;
691 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
692 Opcode = RISCV::PseudoVRELOAD5_M1;
693 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
694 Opcode = RISCV::PseudoVRELOAD6_M1;
695 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
696 Opcode = RISCV::PseudoVRELOAD7_M1;
697 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
698 Opcode = RISCV::PseudoVRELOAD8_M1;
702 if (IsScalableVector) {
736 if (Ops.
size() != 1 || Ops[0] != 1)
740 switch (
MI.getOpcode()) {
747 LoadOpc = RISCV::LWU;
751 LoadOpc = RISCV::LBU;
761 case RISCV::ZEXT_H_RV32:
762 case RISCV::ZEXT_H_RV64:
763 LoadOpc = RISCV::LHU;
773 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(LoadOpc),
784 bool DstIsDead)
const {
790 if (!isUInt<32>(Val))
794 Val = SignExtend64<32>(Val);
800 bool SrcRenamable =
false;
804 bool LastItem = ++Num == Seq.
size();
809 switch (Inst.getOpndKind()) {
819 .
addReg(SrcReg, SrcRegState)
826 .
addReg(SrcReg, SrcRegState)
827 .
addReg(SrcReg, SrcRegState)
833 .
addReg(SrcReg, SrcRegState)
841 SrcRenamable = DstRenamable;
849 case RISCV::CV_BEQIMM:
851 case RISCV::CV_BNEIMM:
875 "Unknown conditional branch");
888 return Imm ? RISCV::CV_BEQIMM : RISCV::BEQ;
890 return Imm ? RISCV::CV_BNEIMM : RISCV::BNE;
930 bool AllowModify)
const {
936 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
942 int NumTerminators = 0;
943 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
946 if (J->getDesc().isUnconditionalBranch() ||
947 J->getDesc().isIndirectBranch()) {
954 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
955 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
956 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
959 I = FirstUncondOrIndirectBr;
963 if (
I->getDesc().isIndirectBranch())
967 if (
I->isPreISelOpcode())
971 if (NumTerminators > 2)
975 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
981 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
987 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
988 I->getDesc().isUnconditionalBranch()) {
999 int *BytesRemoved)
const {
1006 if (!
I->getDesc().isUnconditionalBranch() &&
1007 !
I->getDesc().isConditionalBranch())
1013 I->eraseFromParent();
1020 if (!
I->getDesc().isConditionalBranch())
1026 I->eraseFromParent();
1039 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1041 "RISC-V branch conditions have two components!");
1076 assert(RS &&
"RegScavenger required for long branching");
1078 "new block should be inserted for expanding unconditional branch");
1081 "restore block should be inserted for restoring clobbered registers");
1088 if (!isInt<32>(BrOffset))
1090 "Branch offsets outside of the signed 32-bit range not supported");
1095 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1107 if (TmpGPR != RISCV::NoRegister)
1113 TmpGPR = RISCV::X27;
1116 if (FrameIndex == -1)
1121 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1124 MI.getOperand(1).setMBB(&RestoreBB);
1128 TRI->eliminateFrameIndex(RestoreBB.
back(),
1132 MRI.replaceRegWith(ScratchReg, TmpGPR);
1133 MRI.clearVirtRegs();
1138 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1178 auto isLoadImm = [](
const MachineInstr *
MI, int64_t &Imm) ->
bool {
1179 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1180 MI->getOperand(1).getReg() == RISCV::X0) {
1181 Imm =
MI->getOperand(2).getImm();
1191 return Reg.isVirtual() && isLoadImm(
MRI.getVRegDef(Reg), Imm);
1198 auto searchConst = [&](int64_t C1) ->
Register {
1200 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1202 return isLoadImm(&
I, Imm) && Imm == C1 &&
1203 I.getOperand(0).getReg().isVirtual();
1206 return DefC1->getOperand(0).getReg();
1211 bool Modify =
false;
1213 if (isFromLoadImm(
LHS, C0) &&
MRI.hasOneUse(
LHS.getReg())) {
1218 if (
Register RegZ = searchConst(C0 + 1)) {
1224 MRI.clearKillFlags(RegZ);
1227 }
else if (isFromLoadImm(
RHS, C0) &&
MRI.hasOneUse(
RHS.getReg())) {
1232 if (
Register RegZ = searchConst(C0 - 1)) {
1238 MRI.clearKillFlags(RegZ);
1252 MI.eraseFromParent();
1259 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1261 int NumOp =
MI.getNumExplicitOperands();
1262 return MI.getOperand(NumOp - 1).getMBB();
1266 int64_t BrOffset)
const {
1280 case RISCV::CV_BEQIMM:
1281 case RISCV::CV_BNEIMM:
1282 return isIntN(13, BrOffset);
1284 case RISCV::PseudoBR:
1285 return isIntN(21, BrOffset);
1286 case RISCV::PseudoJump:
1296 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1297 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1298 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1299 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1300 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1301 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1302 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1303 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1305 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1306 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1307 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1308 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1309 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1310 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1311 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1313 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1314 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1315 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1316 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1317 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1319 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1320 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1321 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1322 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1324 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
break;
1325 case RISCV::ORN:
return RISCV::PseudoCCORN;
break;
1326 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
break;
1329 return RISCV::INSTRUCTION_LIST_END;
1337 if (!Reg.isVirtual())
1339 if (!
MRI.hasOneNonDBGUse(Reg))
1348 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1349 MI->getOperand(1).getReg() == RISCV::X0)
1354 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1364 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1367 bool DontMoveAcrossStores =
true;
1368 if (!
MI->isSafeToMove(DontMoveAcrossStores))
1375 unsigned &TrueOp,
unsigned &FalseOp,
1376 bool &Optimizable)
const {
1377 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1378 "Unknown select instruction");
1388 Cond.push_back(
MI.getOperand(1));
1389 Cond.push_back(
MI.getOperand(2));
1390 Cond.push_back(
MI.getOperand(3));
1392 Optimizable =
STI.hasShortForwardBranchOpt();
1399 bool PreferFalse)
const {
1400 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1401 "Unknown select instruction");
1402 if (!
STI.hasShortForwardBranchOpt())
1408 bool Invert = !
DefMI;
1416 Register DestReg =
MI.getOperand(0).getReg();
1418 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1422 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1429 NewMI.
add(
MI.getOperand(1));
1430 NewMI.
add(
MI.getOperand(2));
1439 NewMI.
add(FalseReg);
1463 if (
MI.isMetaInstruction())
1466 unsigned Opcode =
MI.getOpcode();
1468 if (Opcode == TargetOpcode::INLINEASM ||
1469 Opcode == TargetOpcode::INLINEASM_BR) {
1471 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1475 if (!
MI.memoperands_empty()) {
1479 if (isCompressibleInst(
MI,
STI))
1487 if (Opcode == TargetOpcode::BUNDLE)
1488 return getInstBundleLength(
MI);
1490 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1491 if (isCompressibleInst(
MI,
STI))
1496 case TargetOpcode::STACKMAP:
1499 case TargetOpcode::PATCHPOINT:
1502 case TargetOpcode::STATEPOINT: {
1506 return std::max(NumBytes, 8U);
1509 return get(Opcode).getSize();
1513unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1517 while (++
I != E &&
I->isInsideBundle()) {
1518 assert(!
I->isBundle() &&
"No nested bundle!");
1525 const unsigned Opcode =
MI.getOpcode();
1529 case RISCV::FSGNJ_D:
1530 case RISCV::FSGNJ_S:
1531 case RISCV::FSGNJ_H:
1532 case RISCV::FSGNJ_D_INX:
1533 case RISCV::FSGNJ_D_IN32X:
1534 case RISCV::FSGNJ_S_INX:
1535 case RISCV::FSGNJ_H_INX:
1537 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1538 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1542 return (
MI.getOperand(1).isReg() &&
1543 MI.getOperand(1).getReg() == RISCV::X0) ||
1544 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1546 return MI.isAsCheapAsAMove();
1549std::optional<DestSourcePair>
1553 switch (
MI.getOpcode()) {
1558 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1559 MI.getOperand(2).getImm() == 0)
1562 case RISCV::FSGNJ_D:
1563 case RISCV::FSGNJ_S:
1564 case RISCV::FSGNJ_H:
1565 case RISCV::FSGNJ_D_INX:
1566 case RISCV::FSGNJ_D_IN32X:
1567 case RISCV::FSGNJ_S_INX:
1568 case RISCV::FSGNJ_H_INX:
1570 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1571 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1575 return std::nullopt;
1583 const auto &SchedModel =
STI.getSchedModel();
1584 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1601 RISCV::OpName::frm) < 0;
1603 "New instructions require FRM whereas the old one does not have it");
1610 for (
auto *NewMI : InsInstrs) {
1613 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
1655bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
1656 bool Invert)
const {
1657#define OPCODE_LMUL_CASE(OPC) \
1658 case RISCV::OPC##_M1: \
1659 case RISCV::OPC##_M2: \
1660 case RISCV::OPC##_M4: \
1661 case RISCV::OPC##_M8: \
1662 case RISCV::OPC##_MF2: \
1663 case RISCV::OPC##_MF4: \
1664 case RISCV::OPC##_MF8
1666#define OPCODE_LMUL_MASK_CASE(OPC) \
1667 case RISCV::OPC##_M1_MASK: \
1668 case RISCV::OPC##_M2_MASK: \
1669 case RISCV::OPC##_M4_MASK: \
1670 case RISCV::OPC##_M8_MASK: \
1671 case RISCV::OPC##_MF2_MASK: \
1672 case RISCV::OPC##_MF4_MASK: \
1673 case RISCV::OPC##_MF8_MASK
1678 Opcode = *InvOpcode;
1695#undef OPCODE_LMUL_MASK_CASE
1696#undef OPCODE_LMUL_CASE
1699bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
1712 auto checkImmOperand = [&](
unsigned OpIdx) {
1716 auto checkRegOperand = [&](
unsigned OpIdx) {
1724 if (!checkRegOperand(1))
1739 bool SeenMI2 =
false;
1749 if (It->modifiesRegister(RISCV::V0,
TRI)) {
1750 Register SrcReg = It->getOperand(1).getReg();
1768 if (MI1VReg != SrcReg)
1777 assert(SeenMI2 &&
"Prev is expected to appear before Root");
1816bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
1817 bool &Commuted)
const {
1821 "Expect the present of passthrough operand.");
1827 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
1828 areRVVInstsReassociable(Inst, *MI2);
1832 return areRVVInstsReassociable(Inst, *MI1) &&
1833 (isVectorAssociativeAndCommutative(*MI1) ||
1834 isVectorAssociativeAndCommutative(*MI1,
true)) &&
1841 if (!isVectorAssociativeAndCommutative(Inst) &&
1842 !isVectorAssociativeAndCommutative(Inst,
true))
1854 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
1856 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
1868 for (
unsigned I = 0;
I < 5; ++
I)
1874 bool &Commuted)
const {
1875 if (isVectorAssociativeAndCommutative(Inst) ||
1876 isVectorAssociativeAndCommutative(Inst,
true))
1877 return hasReassociableVectorSibling(Inst, Commuted);
1883 unsigned OperandIdx = Commuted ? 2 : 1;
1887 int16_t InstFrmOpIdx =
1889 int16_t SiblingFrmOpIdx =
1892 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1897 bool Invert)
const {
1898 if (isVectorAssociativeAndCommutative(Inst, Invert))
1906 Opc = *InverseOpcode;
1951std::optional<unsigned>
1953#define RVV_OPC_LMUL_CASE(OPC, INV) \
1954 case RISCV::OPC##_M1: \
1955 return RISCV::INV##_M1; \
1956 case RISCV::OPC##_M2: \
1957 return RISCV::INV##_M2; \
1958 case RISCV::OPC##_M4: \
1959 return RISCV::INV##_M4; \
1960 case RISCV::OPC##_M8: \
1961 return RISCV::INV##_M8; \
1962 case RISCV::OPC##_MF2: \
1963 return RISCV::INV##_MF2; \
1964 case RISCV::OPC##_MF4: \
1965 return RISCV::INV##_MF4; \
1966 case RISCV::OPC##_MF8: \
1967 return RISCV::INV##_MF8
1969#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
1970 case RISCV::OPC##_M1_MASK: \
1971 return RISCV::INV##_M1_MASK; \
1972 case RISCV::OPC##_M2_MASK: \
1973 return RISCV::INV##_M2_MASK; \
1974 case RISCV::OPC##_M4_MASK: \
1975 return RISCV::INV##_M4_MASK; \
1976 case RISCV::OPC##_M8_MASK: \
1977 return RISCV::INV##_M8_MASK; \
1978 case RISCV::OPC##_MF2_MASK: \
1979 return RISCV::INV##_MF2_MASK; \
1980 case RISCV::OPC##_MF4_MASK: \
1981 return RISCV::INV##_MF4_MASK; \
1982 case RISCV::OPC##_MF8_MASK: \
1983 return RISCV::INV##_MF8_MASK
1987 return std::nullopt;
1989 return RISCV::FSUB_H;
1991 return RISCV::FSUB_S;
1993 return RISCV::FSUB_D;
1995 return RISCV::FADD_H;
1997 return RISCV::FADD_S;
1999 return RISCV::FADD_D;
2016#undef RVV_OPC_LMUL_MASK_CASE
2017#undef RVV_OPC_LMUL_CASE
2022 bool DoRegPressureReduce) {
2038 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2049 bool DoRegPressureReduce) {
2051 bool IsFAdd =
isFADD(Opc);
2052 if (!IsFAdd && !
isFSUB(Opc))
2056 DoRegPressureReduce)) {
2062 DoRegPressureReduce)) {
2072 bool DoRegPressureReduce) {
2080 unsigned CombineOpc) {
2087 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2090 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2101 unsigned OuterShiftAmt) {
2107 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2169 bool DoRegPressureReduce)
const {
2178 DoRegPressureReduce);
2186 return RISCV::FMADD_H;
2188 return RISCV::FMADD_S;
2190 return RISCV::FMADD_D;
2235 bool Mul1IsKill = Mul1.
isKill();
2236 bool Mul2IsKill = Mul2.
isKill();
2237 bool AddendIsKill = Addend.
isKill();
2246 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2271 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2278 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2281 switch (InnerShiftAmt - OuterShiftAmt) {
2285 InnerOpc = RISCV::ADD;
2288 InnerOpc = RISCV::SH1ADD;
2291 InnerOpc = RISCV::SH2ADD;
2294 InnerOpc = RISCV::SH3ADD;
2302 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2312 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2329 DelInstrs, InstrIdxForVirtReg);
2357 unsigned OpType = Operand.OperandType;
2362 int64_t Imm = MO.
getImm();
2369#define CASE_OPERAND_UIMM(NUM) \
2370 case RISCVOp::OPERAND_UIMM##NUM: \
2371 Ok = isUInt<NUM>(Imm); \
2385 Ok = isShiftedUInt<1, 1>(Imm);
2388 Ok = isShiftedUInt<4, 1>(Imm);
2391 Ok = isShiftedUInt<5, 1>(Imm);
2394 Ok = isShiftedUInt<5, 2>(Imm);
2397 Ok = isShiftedUInt<6, 2>(Imm);
2400 Ok = isShiftedUInt<5, 3>(Imm);
2403 Ok = isUInt<8>(Imm) && Imm >= 32;
2406 Ok = isShiftedUInt<6, 3>(Imm);
2409 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
2412 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
2421 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
2427 Ok = Imm != 0 && isInt<6>(Imm);
2430 Ok = isUInt<10>(Imm);
2433 Ok = isUInt<11>(Imm);
2436 Ok = isInt<12>(Imm);
2439 Ok = isShiftedInt<7, 5>(Imm);
2442 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2445 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2446 Ok = Ok && Imm != 0;
2449 Ok = (isUInt<5>(Imm) && Imm != 0) ||
2450 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2453 Ok = Imm >= 0 && Imm <= 10;
2456 Ok = Imm >= 0 && Imm <= 7;
2459 Ok = Imm >= 1 && Imm <= 10;
2462 Ok = Imm >= 2 && Imm <= 14;
2465 Ok = (Imm & 0xf) == 0;
2469 ErrInfo =
"Invalid immediate";
2479 if (!
Op.isImm() && !
Op.isReg()) {
2480 ErrInfo =
"Invalid operand type for VL operand";
2483 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
2485 auto *RC =
MRI.getRegClass(
Op.getReg());
2486 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2487 ErrInfo =
"Invalid register class for VL operand";
2492 ErrInfo =
"VL operand w/o SEW operand?";
2498 if (!
MI.getOperand(OpIdx).isImm()) {
2499 ErrInfo =
"SEW value expected to be an immediate";
2502 uint64_t Log2SEW =
MI.getOperand(OpIdx).getImm();
2504 ErrInfo =
"Unexpected SEW value";
2507 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2509 ErrInfo =
"Unexpected SEW value";
2515 if (!
MI.getOperand(OpIdx).isImm()) {
2516 ErrInfo =
"Policy operand expected to be an immediate";
2519 uint64_t Policy =
MI.getOperand(OpIdx).getImm();
2521 ErrInfo =
"Invalid Policy Value";
2525 ErrInfo =
"policy operand w/o VL operand?";
2533 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
2534 ErrInfo =
"policy operand w/o tied operand?";
2577 int64_t NewOffset = OldOffset + Disp;
2579 NewOffset = SignExtend64<32>(NewOffset);
2581 if (!isInt<12>(NewOffset))
2599 "Addressing mode not supported for folding");
2641 OffsetIsScalable =
false;
2657 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
2665 if (MO1->getAddrSpace() != MO2->getAddrSpace())
2668 auto Base1 = MO1->getValue();
2669 auto Base2 = MO2->getValue();
2670 if (!Base1 || !Base2)
2675 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
2678 return Base1 == Base2;
2684 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2685 unsigned NumBytes)
const {
2688 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
2693 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
2699 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
2705 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
2755 int64_t OffsetA = 0, OffsetB = 0;
2760 int LowOffset = std::min(OffsetA, OffsetB);
2761 int HighOffset = std::max(OffsetA, OffsetB);
2762 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2764 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
2771std::pair<unsigned, unsigned>
2774 return std::make_pair(TF & Mask, TF & ~Mask);
2779 using namespace RISCVII;
2780 static const std::pair<unsigned, const char *> TargetFlags[] = {
2781 {MO_CALL,
"riscv-call"},
2782 {MO_LO,
"riscv-lo"},
2783 {MO_HI,
"riscv-hi"},
2784 {MO_PCREL_LO,
"riscv-pcrel-lo"},
2785 {MO_PCREL_HI,
"riscv-pcrel-hi"},
2786 {MO_GOT_HI,
"riscv-got-hi"},
2787 {MO_TPREL_LO,
"riscv-tprel-lo"},
2788 {MO_TPREL_HI,
"riscv-tprel-hi"},
2789 {MO_TPREL_ADD,
"riscv-tprel-add"},
2790 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
2791 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
2792 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
2793 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
2794 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
2795 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
2803 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
2816 unsigned &Flags)
const {
2831std::optional<outliner::OutlinedFunction>
2834 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
2840 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
2846 if (RepeatedSequenceLocs.size() < 2)
2847 return std::nullopt;
2849 unsigned SequenceSize = 0;
2851 for (
auto &
MI : RepeatedSequenceLocs[0])
2855 unsigned CallOverhead = 8;
2856 for (
auto &
C : RepeatedSequenceLocs)
2860 unsigned FrameOverhead = 4;
2861 if (RepeatedSequenceLocs[0]
2863 ->getSubtarget<RISCVSubtarget>()
2874 unsigned Flags)
const {
2879 const auto &
F =
MI.getMF()->getFunction();
2882 if (
MI.isCFIInstruction())
2896 if (
MI.modifiesRegister(RISCV::X5,
TRI) ||
2897 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2901 for (
const auto &MO :
MI.operands()) {
2906 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
2919 bool Changed =
true;
2924 for (;
I != E; ++
I) {
2925 if (
I->isCFIInstruction()) {
2926 I->removeFromParent();
2949 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
2960 return std::nullopt;
2964 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
2965 MI.getOperand(2).isImm())
2966 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
2968 return std::nullopt;
2976 std::string GenericComment =
2978 if (!GenericComment.empty())
2979 return GenericComment;
2983 return std::string();
2985 std::string Comment;
2992 if ((
MI.getOpcode() == RISCV::VSETVLI ||
MI.getOpcode() == RISCV::VSETIVLI ||
2993 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2994 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2995 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2997 unsigned Imm =
MI.getOperand(OpIdx).getImm();
3001 unsigned Log2SEW =
MI.getOperand(OpIdx).getImm();
3002 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
3007 unsigned Policy =
MI.getOperand(OpIdx).getImm();
3009 "Invalid Policy Value");
3019#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3020 RISCV::Pseudo##OP##_##LMUL
3022#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3023 RISCV::Pseudo##OP##_##LMUL##_MASK
3025#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3026 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3027 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3029#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3030 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3031 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3032 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3033 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3034 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3035 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3037#define CASE_RVV_OPCODE_UNMASK(OP) \
3038 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3039 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3041#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3042 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3043 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3044 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3045 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3046 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3047 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3049#define CASE_RVV_OPCODE_MASK(OP) \
3050 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3051 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3053#define CASE_RVV_OPCODE_WIDEN(OP) \
3054 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3055 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3057#define CASE_RVV_OPCODE(OP) \
3058 CASE_RVV_OPCODE_UNMASK(OP): \
3059 case CASE_RVV_OPCODE_MASK(OP)
3063#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3064 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3066#define CASE_VMA_OPCODE_LMULS_M1(OP, TYPE) \
3067 CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3068 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3069 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3070 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3072#define CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE) \
3073 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3074 case CASE_VMA_OPCODE_LMULS_M1(OP, TYPE)
3076#define CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE) \
3077 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3078 case CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE)
3080#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3081 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3082 case CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE)
3085#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3086 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3088#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3089 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3090 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3091 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3092 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3094#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3095 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3096 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3098#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3099 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3100 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3102#define CASE_VFMA_OPCODE_VV(OP) \
3103 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3104 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3105 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3107#define CASE_VFMA_SPLATS(OP) \
3108 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3109 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3110 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3114 unsigned &SrcOpIdx1,
3115 unsigned &SrcOpIdx2)
const {
3117 if (!
Desc.isCommutable())
3120 switch (
MI.getOpcode()) {
3121 case RISCV::TH_MVEQZ:
3122 case RISCV::TH_MVNEZ:
3126 if (
MI.getOperand(2).getReg() == RISCV::X0)
3129 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3130 case RISCV::TH_MULA:
3131 case RISCV::TH_MULAW:
3132 case RISCV::TH_MULAH:
3133 case RISCV::TH_MULS:
3134 case RISCV::TH_MULSW:
3135 case RISCV::TH_MULSH:
3137 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3138 case RISCV::PseudoCCMOVGPRNoX0:
3139 case RISCV::PseudoCCMOVGPR:
3141 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3168 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3189 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3194 unsigned CommutableOpIdx1 = 1;
3195 unsigned CommutableOpIdx2 = 3;
3196 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3209 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3216 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3218 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3222 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3223 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3229 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3230 SrcOpIdx2 == CommuteAnyOperandIndex) {
3233 unsigned CommutableOpIdx1 = SrcOpIdx1;
3234 if (SrcOpIdx1 == SrcOpIdx2) {
3237 CommutableOpIdx1 = 1;
3238 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3240 CommutableOpIdx1 = SrcOpIdx2;
3245 unsigned CommutableOpIdx2;
3246 if (CommutableOpIdx1 != 1) {
3248 CommutableOpIdx2 = 1;
3250 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3255 if (Op1Reg !=
MI.getOperand(2).getReg())
3256 CommutableOpIdx2 = 2;
3258 CommutableOpIdx2 = 3;
3263 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3276#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3277 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3278 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3281#define CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
3282 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3283 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3284 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3285 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3287#define CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
3288 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3289 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
3291#define CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
3292 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3293 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
3295#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3296 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3297 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
3299#define CASE_VMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3300 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16) \
3301 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32) \
3302 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64)
3305#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3306 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3307 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3310#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3311 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3312 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3313 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3314 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3316#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3317 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3318 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3320#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3321 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3322 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3323 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3325#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3326 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3327 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3329#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE, SEW) \
3330 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8, SEW) \
3331 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW)
3333#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3334 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3335 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3336 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
3341 unsigned OpIdx2)
const {
3344 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
3348 switch (
MI.getOpcode()) {
3349 case RISCV::TH_MVEQZ:
3350 case RISCV::TH_MVNEZ: {
3351 auto &WorkingMI = cloneIfNew(
MI);
3352 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
3353 : RISCV::TH_MVEQZ));
3357 case RISCV::PseudoCCMOVGPRNoX0:
3358 case RISCV::PseudoCCMOVGPR: {
3362 auto &WorkingMI = cloneIfNew(
MI);
3363 WorkingMI.getOperand(3).setImm(
CC);
3387 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3388 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
3390 switch (
MI.getOpcode()) {
3413 auto &WorkingMI = cloneIfNew(
MI);
3414 WorkingMI.setDesc(
get(Opc));
3424 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3427 if (OpIdx1 == 3 || OpIdx2 == 3) {
3429 switch (
MI.getOpcode()) {
3440 auto &WorkingMI = cloneIfNew(
MI);
3441 WorkingMI.setDesc(
get(Opc));
3453#undef CASE_RVV_OPCODE_UNMASK_LMUL
3454#undef CASE_RVV_OPCODE_MASK_LMUL
3455#undef CASE_RVV_OPCODE_LMUL
3456#undef CASE_RVV_OPCODE_UNMASK_WIDEN
3457#undef CASE_RVV_OPCODE_UNMASK
3458#undef CASE_RVV_OPCODE_MASK_WIDEN
3459#undef CASE_RVV_OPCODE_MASK
3460#undef CASE_RVV_OPCODE_WIDEN
3461#undef CASE_RVV_OPCODE
3463#undef CASE_VMA_OPCODE_COMMON
3464#undef CASE_VMA_OPCODE_LMULS_M1
3465#undef CASE_VMA_OPCODE_LMULS_MF2
3466#undef CASE_VMA_OPCODE_LMULS_MF4
3467#undef CASE_VMA_OPCODE_LMULS
3468#undef CASE_VFMA_OPCODE_COMMON
3469#undef CASE_VFMA_OPCODE_LMULS_M1
3470#undef CASE_VFMA_OPCODE_LMULS_MF2
3471#undef CASE_VFMA_OPCODE_LMULS_MF4
3472#undef CASE_VFMA_OPCODE_VV
3473#undef CASE_VFMA_SPLATS
3476#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
3477 RISCV::PseudoV##OP##_##LMUL##_TIED
3479#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
3480 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
3481 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
3482 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
3483 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
3484 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
3486#define CASE_WIDEOP_OPCODE_LMULS(OP) \
3487 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
3488 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
3490#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
3491 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
3492 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
3495#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3496 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
3497 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
3498 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
3499 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
3500 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
3502#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3503 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
3504 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3507#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
3508 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
3510#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP) \
3511 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
3512 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
3513 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
3514 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
3515 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
3516 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
3517 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
3518 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
3519 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
3521#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
3522 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
3523 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
3526#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3527 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
3528 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
3529 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
3530 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
3531 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
3532 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
3533 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
3534 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
3535 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
3537#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3538 CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3545 switch (
MI.getOpcode()) {
3551 MI.getNumExplicitOperands() == 7 &&
3552 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
3559 switch (
MI.getOpcode()) {
3569 .
add(
MI.getOperand(0))
3571 .
add(
MI.getOperand(1))
3572 .
add(
MI.getOperand(2))
3573 .
add(
MI.getOperand(3))
3574 .
add(
MI.getOperand(4))
3575 .
add(
MI.getOperand(5))
3576 .
add(
MI.getOperand(6));
3585 MI.getNumExplicitOperands() == 6);
3586 if ((
MI.getOperand(5).getImm() & 1) == 0)
3591 switch (
MI.getOpcode()) {
3603 .
add(
MI.getOperand(0))
3605 .
add(
MI.getOperand(1))
3606 .
add(
MI.getOperand(2))
3607 .
add(
MI.getOperand(3))
3608 .
add(
MI.getOperand(4))
3609 .
add(
MI.getOperand(5));
3616 unsigned NumOps =
MI.getNumOperands();
3617 for (
unsigned I = 1;
I < NumOps; ++
I) {
3619 if (
Op.isReg() &&
Op.isKill())
3627 if (
MI.getOperand(0).isEarlyClobber()) {
3633 if (S->
end ==
Idx.getRegSlot(
true))
3634 S->
end =
Idx.getRegSlot();
3641#undef CASE_WIDEOP_OPCODE_COMMON
3642#undef CASE_WIDEOP_OPCODE_LMULS_MF4
3643#undef CASE_WIDEOP_OPCODE_LMULS
3644#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
3645#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3646#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
3647#undef CASE_FP_WIDEOP_OPCODE_COMMON
3648#undef CASE_FP_WIDEOP_OPCODE_LMULS_MF4
3649#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
3650#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3651#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
3658 if (llvm::has_single_bit<uint32_t>(Amount)) {
3660 if (ShiftAmount == 0)
3666 }
else if (
STI.hasStdExtZba() &&
3673 if (Amount % 9 == 0) {
3674 Opc = RISCV::SH3ADD;
3675 ShiftAmount =
Log2_64(Amount / 9);
3676 }
else if (Amount % 5 == 0) {
3677 Opc = RISCV::SH2ADD;
3678 ShiftAmount =
Log2_64(Amount / 5);
3679 }
else if (Amount % 3 == 0) {
3680 Opc = RISCV::SH1ADD;
3681 ShiftAmount =
Log2_64(Amount / 3);
3694 }
else if (llvm::has_single_bit<uint32_t>(Amount - 1)) {
3695 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3705 }
else if (llvm::has_single_bit<uint32_t>(Amount + 1)) {
3706 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3716 }
else if (
STI.hasStdExtZmmul()) {
3717 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3726 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
3727 if (Amount & (1U << ShiftAmount)) {
3731 .
addImm(ShiftAmount - PrevShiftAmount)
3733 if (Amount >> (ShiftAmount + 1)) {
3736 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3747 PrevShiftAmount = ShiftAmount;
3750 assert(Acc &&
"Expected valid accumulator");
3760 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
3774 return MI.getOpcode() == RISCV::ADDIW &&
MI.getOperand(1).isReg() &&
3775 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0;
3780 return MI.getOpcode() == RISCV::ADD_UW &&
MI.getOperand(1).isReg() &&
3781 MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0;
3786 return MI.getOpcode() == RISCV::ANDI &&
MI.getOperand(1).isReg() &&
3787 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 255;
3798 case RISCV::VL1RE8_V:
3799 case RISCV::VL2RE8_V:
3800 case RISCV::VL4RE8_V:
3801 case RISCV::VL8RE8_V:
3802 case RISCV::VL1RE16_V:
3803 case RISCV::VL2RE16_V:
3804 case RISCV::VL4RE16_V:
3805 case RISCV::VL8RE16_V:
3806 case RISCV::VL1RE32_V:
3807 case RISCV::VL2RE32_V:
3808 case RISCV::VL4RE32_V:
3809 case RISCV::VL8RE32_V:
3810 case RISCV::VL1RE64_V:
3811 case RISCV::VL2RE64_V:
3812 case RISCV::VL4RE64_V:
3813 case RISCV::VL8RE64_V:
3821 unsigned Opcode =
MI.getOpcode();
3822 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
3828std::optional<std::pair<unsigned, unsigned>>
3832 return std::nullopt;
3833 case RISCV::PseudoVSPILL2_M1:
3834 case RISCV::PseudoVRELOAD2_M1:
3835 return std::make_pair(2u, 1u);
3836 case RISCV::PseudoVSPILL2_M2:
3837 case RISCV::PseudoVRELOAD2_M2:
3838 return std::make_pair(2u, 2u);
3839 case RISCV::PseudoVSPILL2_M4:
3840 case RISCV::PseudoVRELOAD2_M4:
3841 return std::make_pair(2u, 4u);
3842 case RISCV::PseudoVSPILL3_M1:
3843 case RISCV::PseudoVRELOAD3_M1:
3844 return std::make_pair(3u, 1u);
3845 case RISCV::PseudoVSPILL3_M2:
3846 case RISCV::PseudoVRELOAD3_M2:
3847 return std::make_pair(3u, 2u);
3848 case RISCV::PseudoVSPILL4_M1:
3849 case RISCV::PseudoVRELOAD4_M1:
3850 return std::make_pair(4u, 1u);
3851 case RISCV::PseudoVSPILL4_M2:
3852 case RISCV::PseudoVRELOAD4_M2:
3853 return std::make_pair(4u, 2u);
3854 case RISCV::PseudoVSPILL5_M1:
3855 case RISCV::PseudoVRELOAD5_M1:
3856 return std::make_pair(5u, 1u);
3857 case RISCV::PseudoVSPILL6_M1:
3858 case RISCV::PseudoVRELOAD6_M1:
3859 return std::make_pair(6u, 1u);
3860 case RISCV::PseudoVSPILL7_M1:
3861 case RISCV::PseudoVRELOAD7_M1:
3862 return std::make_pair(7u, 1u);
3863 case RISCV::PseudoVSPILL8_M1:
3864 case RISCV::PseudoVRELOAD8_M1:
3865 return std::make_pair(8u, 1u);
3870 return MI.getNumExplicitDefs() == 2 &&
3871 MI.modifiesRegister(RISCV::VL,
nullptr) && !
MI.isInlineAsm();
3875 int16_t MI1FrmOpIdx =
3877 int16_t MI2FrmOpIdx =
3879 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
3886std::optional<unsigned>
3891 return std::nullopt;
3894 case RISCV::VSLL_VX:
3895 case RISCV::VSRL_VX:
3896 case RISCV::VSRA_VX:
3898 case RISCV::VSSRL_VX:
3899 case RISCV::VSSRA_VX:
3904 case RISCV::VNSRL_WX:
3905 case RISCV::VNSRA_WX:
3907 case RISCV::VNCLIPU_WX:
3908 case RISCV::VNCLIP_WX:
3913 case RISCV::VADD_VX:
3914 case RISCV::VSUB_VX:
3915 case RISCV::VRSUB_VX:
3917 case RISCV::VWADDU_VX:
3918 case RISCV::VWSUBU_VX:
3919 case RISCV::VWADD_VX:
3920 case RISCV::VWSUB_VX:
3921 case RISCV::VWADDU_WX:
3922 case RISCV::VWSUBU_WX:
3923 case RISCV::VWADD_WX:
3924 case RISCV::VWSUB_WX:
3926 case RISCV::VADC_VXM:
3927 case RISCV::VADC_VIM:
3928 case RISCV::VMADC_VXM:
3929 case RISCV::VMADC_VIM:
3930 case RISCV::VMADC_VX:
3931 case RISCV::VSBC_VXM:
3932 case RISCV::VMSBC_VXM:
3933 case RISCV::VMSBC_VX:
3935 case RISCV::VAND_VX:
3937 case RISCV::VXOR_VX:
3939 case RISCV::VMSEQ_VX:
3940 case RISCV::VMSNE_VX:
3941 case RISCV::VMSLTU_VX:
3942 case RISCV::VMSLT_VX:
3943 case RISCV::VMSLEU_VX:
3944 case RISCV::VMSLE_VX:
3945 case RISCV::VMSGTU_VX:
3946 case RISCV::VMSGT_VX:
3948 case RISCV::VMINU_VX:
3949 case RISCV::VMIN_VX:
3950 case RISCV::VMAXU_VX:
3951 case RISCV::VMAX_VX:
3953 case RISCV::VMUL_VX:
3954 case RISCV::VMULH_VX:
3955 case RISCV::VMULHU_VX:
3956 case RISCV::VMULHSU_VX:
3958 case RISCV::VDIVU_VX:
3959 case RISCV::VDIV_VX:
3960 case RISCV::VREMU_VX:
3961 case RISCV::VREM_VX:
3963 case RISCV::VWMUL_VX:
3964 case RISCV::VWMULU_VX:
3965 case RISCV::VWMULSU_VX:
3967 case RISCV::VMACC_VX:
3968 case RISCV::VNMSAC_VX:
3969 case RISCV::VMADD_VX:
3970 case RISCV::VNMSUB_VX:
3972 case RISCV::VWMACCU_VX:
3973 case RISCV::VWMACC_VX:
3974 case RISCV::VWMACCSU_VX:
3975 case RISCV::VWMACCUS_VX:
3977 case RISCV::VMERGE_VXM:
3979 case RISCV::VMV_V_X:
3981 case RISCV::VSADDU_VX:
3982 case RISCV::VSADD_VX:
3983 case RISCV::VSSUBU_VX:
3984 case RISCV::VSSUB_VX:
3986 case RISCV::VAADDU_VX:
3987 case RISCV::VAADD_VX:
3988 case RISCV::VASUBU_VX:
3989 case RISCV::VASUB_VX:
3991 case RISCV::VSMUL_VX:
3993 case RISCV::VMV_S_X:
3994 return 1U << Log2SEW;
4000 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc, unsigned ZeroReg=0, bool CheckZeroReg=false)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
#define RVV_OPC_LMUL_CASE(OPC, INV)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isFMUL(unsigned Opc)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isFADD(unsigned Opc)
#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Wrapper class representing physical registers. Should be passed by value.
unsigned pred_size() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC, bool Imm=false) const
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
unsigned getTailDuplicateSize(CodeGenOptLevel OptLevel) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
virtual outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
bool hasStdExtCOrZca() const
unsigned getTailDupAggressiveThreshold() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, bool Imm=false)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static RISCVII::VLMUL getLMul(uint64_t TSFlags)
static unsigned getNF(uint64_t TSFlags)
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are tuples (A, B,...
static const MachineMemOperand::Flags MONontemporalBit0
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
CodeGenOptLevel
Code generation optimization level.
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Description of the encoding of one expression Op.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.