40#define GEN_CHECK_COMPRESS_INSTR
41#include "RISCVGenCompressInstEmitter.inc"
43#define GET_INSTRINFO_CTOR_DTOR
44#define GET_INSTRINFO_NAMED_OPS
45#include "RISCVGenInstrInfo.inc"
49 cl::desc(
"Prefer whole register move for vector registers."));
52 "riscv-force-machine-combiner-strategy",
cl::Hidden,
53 cl::desc(
"Force machine combiner to use a specific strategy for machine "
54 "trace metrics evaluation."),
55 cl::init(MachineTraceStrategy::TS_NumStrategies),
58 clEnumValN(MachineTraceStrategy::TS_MinInstrCount,
"min-instr",
59 "MinInstrCount strategy.")));
65#define GET_RISCVVPseudosTable_IMPL
66#include "RISCVGenSearchableTables.inc"
72#define GET_RISCVMaskedPseudosTable_IMPL
73#include "RISCVGenSearchableTables.inc"
91 int &FrameIndex)
const {
98 unsigned &MemBytes)
const {
99 switch (
MI.getOpcode()) {
122 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
123 MI.getOperand(2).getImm() == 0) {
124 FrameIndex =
MI.getOperand(1).getIndex();
125 return MI.getOperand(0).getReg();
132 int &FrameIndex)
const {
139 unsigned &MemBytes)
const {
140 switch (
MI.getOpcode()) {
160 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
161 MI.getOperand(2).getImm() == 0) {
162 FrameIndex =
MI.getOperand(1).getIndex();
163 return MI.getOperand(0).getReg();
171 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
182 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
183 "Unexpected COPY instruction.");
187 bool FoundDef =
false;
188 bool FirstVSetVLI =
false;
189 unsigned FirstSEW = 0;
192 if (
MBBI->isMetaInstruction())
195 if (
MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
196 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
197 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
207 unsigned FirstVType =
MBBI->getOperand(2).getImm();
212 if (FirstLMul != LMul)
217 if (
MBBI->getOperand(0).getReg() != RISCV::X0)
219 if (
MBBI->getOperand(1).isImm())
221 if (
MBBI->getOperand(1).getReg() != RISCV::X0)
227 unsigned VType =
MBBI->getOperand(2).getImm();
245 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
247 }
else if (
MBBI->getNumDefs()) {
250 if (
MBBI->modifiesRegister(RISCV::VL,
nullptr))
256 if (!MO.isReg() || !MO.isDef())
258 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
273 if (MO.getReg() != SrcReg)
314 uint16_t SrcEncoding =
TRI->getEncodingValue(SrcReg);
315 uint16_t DstEncoding =
TRI->getEncodingValue(DstReg);
317 assert(!Fractional &&
"It is impossible be fractional lmul here.");
318 unsigned NumRegs = NF * LMulVal;
324 SrcEncoding += NumRegs - 1;
325 DstEncoding += NumRegs - 1;
331 unsigned,
unsigned> {
339 uint16_t Diff = DstEncoding - SrcEncoding;
340 if (
I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 &&
341 DstEncoding % 8 == 7)
343 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
344 if (
I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 &&
345 DstEncoding % 4 == 3)
347 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
348 if (
I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 &&
349 DstEncoding % 2 == 1)
351 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
354 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
359 if (
I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0)
361 RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8};
362 if (
I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0)
364 RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4};
365 if (
I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0)
367 RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2};
370 RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1};
375 if (&RegClass == &RISCV::VRRegClass)
377 return TRI->getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass);
379 while (
I != NumRegs) {
384 auto [LMulCopied, RegClass, Opc, VVOpc, VIOpc] =
385 GetCopyInfo(SrcEncoding, DstEncoding);
389 if (LMul == LMulCopied &&
392 if (DefMBBI->getOpcode() == VIOpc)
398 MCRegister ActualSrcReg = FindRegWithEncoding(
399 RegClass, ReversedCopy ? (SrcEncoding - NumCopied + 1) : SrcEncoding);
400 MCRegister ActualDstReg = FindRegWithEncoding(
401 RegClass, ReversedCopy ? (DstEncoding - NumCopied + 1) : DstEncoding);
409 MIB = MIB.add(DefMBBI->getOperand(2));
422 SrcEncoding += (ReversedCopy ? -NumCopied : NumCopied);
423 DstEncoding += (ReversedCopy ? -NumCopied : NumCopied);
434 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
441 if (RISCV::GPRPairRegClass.
contains(DstReg, SrcReg)) {
444 TRI->getSubReg(DstReg, RISCV::sub_gpr_even))
445 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_gpr_even),
449 TRI->getSubReg(DstReg, RISCV::sub_gpr_odd))
450 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_gpr_odd),
457 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
458 RISCV::GPRRegClass.
contains(DstReg)) {
460 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
465 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
467 if (
STI.hasStdExtZfh()) {
468 Opc = RISCV::FSGNJ_H;
471 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
472 "Unexpected extensions");
474 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
475 &RISCV::FPR32RegClass);
476 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
477 &RISCV::FPR32RegClass);
478 Opc = RISCV::FSGNJ_S;
486 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
493 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
500 if (RISCV::FPR32RegClass.
contains(DstReg) &&
501 RISCV::GPRRegClass.
contains(SrcReg)) {
507 if (RISCV::GPRRegClass.
contains(DstReg) &&
508 RISCV::FPR32RegClass.
contains(SrcReg)) {
514 if (RISCV::FPR64RegClass.
contains(DstReg) &&
515 RISCV::GPRRegClass.
contains(SrcReg)) {
522 if (RISCV::GPRRegClass.
contains(DstReg) &&
523 RISCV::FPR64RegClass.
contains(SrcReg)) {
532 &RISCV::VRRegClass, &RISCV::VRM2RegClass, &RISCV::VRM4RegClass,
533 &RISCV::VRM8RegClass, &RISCV::VRN2M1RegClass, &RISCV::VRN2M2RegClass,
534 &RISCV::VRN2M4RegClass, &RISCV::VRN3M1RegClass, &RISCV::VRN3M2RegClass,
535 &RISCV::VRN4M1RegClass, &RISCV::VRN4M2RegClass, &RISCV::VRN5M1RegClass,
536 &RISCV::VRN6M1RegClass, &RISCV::VRN7M1RegClass, &RISCV::VRN8M1RegClass};
537 for (
const auto &RegClass : RVVRegClasses) {
538 if (RegClass->contains(DstReg, SrcReg)) {
549 Register SrcReg,
bool IsKill,
int FI,
557 bool IsScalableVector =
true;
558 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
559 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
560 RISCV::SW : RISCV::SD;
561 IsScalableVector =
false;
562 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
563 Opcode = RISCV::PseudoRV32ZdinxSD;
564 IsScalableVector =
false;
565 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
567 IsScalableVector =
false;
568 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
570 IsScalableVector =
false;
571 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
573 IsScalableVector =
false;
574 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
575 Opcode = RISCV::VS1R_V;
576 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
577 Opcode = RISCV::VS2R_V;
578 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
579 Opcode = RISCV::VS4R_V;
580 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
581 Opcode = RISCV::VS8R_V;
582 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
583 Opcode = RISCV::PseudoVSPILL2_M1;
584 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
585 Opcode = RISCV::PseudoVSPILL2_M2;
586 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
587 Opcode = RISCV::PseudoVSPILL2_M4;
588 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
589 Opcode = RISCV::PseudoVSPILL3_M1;
590 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
591 Opcode = RISCV::PseudoVSPILL3_M2;
592 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
593 Opcode = RISCV::PseudoVSPILL4_M1;
594 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
595 Opcode = RISCV::PseudoVSPILL4_M2;
596 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
597 Opcode = RISCV::PseudoVSPILL5_M1;
598 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
599 Opcode = RISCV::PseudoVSPILL6_M1;
600 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
601 Opcode = RISCV::PseudoVSPILL7_M1;
602 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
603 Opcode = RISCV::PseudoVSPILL8_M1;
607 if (IsScalableVector) {
640 bool IsScalableVector =
true;
641 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
642 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
643 RISCV::LW : RISCV::LD;
644 IsScalableVector =
false;
645 }
else if (RISCV::GPRPairRegClass.hasSubClassEq(RC)) {
646 Opcode = RISCV::PseudoRV32ZdinxLD;
647 IsScalableVector =
false;
648 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
650 IsScalableVector =
false;
651 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
653 IsScalableVector =
false;
654 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
656 IsScalableVector =
false;
657 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
658 Opcode = RISCV::VL1RE8_V;
659 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
660 Opcode = RISCV::VL2RE8_V;
661 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
662 Opcode = RISCV::VL4RE8_V;
663 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
664 Opcode = RISCV::VL8RE8_V;
665 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
666 Opcode = RISCV::PseudoVRELOAD2_M1;
667 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
668 Opcode = RISCV::PseudoVRELOAD2_M2;
669 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
670 Opcode = RISCV::PseudoVRELOAD2_M4;
671 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
672 Opcode = RISCV::PseudoVRELOAD3_M1;
673 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
674 Opcode = RISCV::PseudoVRELOAD3_M2;
675 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
676 Opcode = RISCV::PseudoVRELOAD4_M1;
677 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
678 Opcode = RISCV::PseudoVRELOAD4_M2;
679 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
680 Opcode = RISCV::PseudoVRELOAD5_M1;
681 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
682 Opcode = RISCV::PseudoVRELOAD6_M1;
683 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
684 Opcode = RISCV::PseudoVRELOAD7_M1;
685 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
686 Opcode = RISCV::PseudoVRELOAD8_M1;
690 if (IsScalableVector) {
724 if (Ops.
size() != 1 || Ops[0] != 1)
728 switch (
MI.getOpcode()) {
735 LoadOpc = RISCV::LWU;
739 LoadOpc = RISCV::LBU;
749 case RISCV::ZEXT_H_RV32:
750 case RISCV::ZEXT_H_RV64:
751 LoadOpc = RISCV::LHU;
761 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(LoadOpc),
772 bool DstIsDead)
const {
778 if (!isUInt<32>(Val))
782 Val = SignExtend64<32>(Val);
788 bool SrcRenamable =
false;
792 bool LastItem = ++Num == Seq.
size();
797 switch (Inst.getOpndKind()) {
807 .
addReg(SrcReg, SrcRegState)
814 .
addReg(SrcReg, SrcRegState)
815 .
addReg(SrcReg, SrcRegState)
821 .
addReg(SrcReg, SrcRegState)
829 SrcRenamable = DstRenamable;
837 case RISCV::CV_BEQIMM:
839 case RISCV::CV_BNEIMM:
863 "Unknown conditional branch");
876 return Imm ? RISCV::CV_BEQIMM : RISCV::BEQ;
878 return Imm ? RISCV::CV_BNEIMM : RISCV::BNE;
918 bool AllowModify)
const {
924 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
930 int NumTerminators = 0;
931 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
934 if (J->getDesc().isUnconditionalBranch() ||
935 J->getDesc().isIndirectBranch()) {
942 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
943 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
944 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
947 I = FirstUncondOrIndirectBr;
951 if (
I->getDesc().isIndirectBranch())
955 if (
I->isPreISelOpcode())
959 if (NumTerminators > 2)
963 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
969 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
975 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
976 I->getDesc().isUnconditionalBranch()) {
987 int *BytesRemoved)
const {
994 if (!
I->getDesc().isUnconditionalBranch() &&
995 !
I->getDesc().isConditionalBranch())
1001 I->eraseFromParent();
1008 if (!
I->getDesc().isConditionalBranch())
1014 I->eraseFromParent();
1027 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1029 "RISC-V branch conditions have two components!");
1064 assert(RS &&
"RegScavenger required for long branching");
1066 "new block should be inserted for expanding unconditional branch");
1069 "restore block should be inserted for restoring clobbered registers");
1076 if (!isInt<32>(BrOffset))
1078 "Branch offsets outside of the signed 32-bit range not supported");
1083 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRJALRRegClass);
1095 if (TmpGPR != RISCV::NoRegister)
1101 TmpGPR = RISCV::X27;
1104 if (FrameIndex == -1)
1109 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1112 MI.getOperand(1).setMBB(&RestoreBB);
1116 TRI->eliminateFrameIndex(RestoreBB.
back(),
1120 MRI.replaceRegWith(ScratchReg, TmpGPR);
1121 MRI.clearVirtRegs();
1126 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1166 auto isLoadImm = [](
const MachineInstr *
MI, int64_t &Imm) ->
bool {
1167 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1168 MI->getOperand(1).getReg() == RISCV::X0) {
1169 Imm =
MI->getOperand(2).getImm();
1179 return Reg.isVirtual() && isLoadImm(
MRI.getVRegDef(Reg), Imm);
1186 auto searchConst = [&](int64_t C1) ->
Register {
1188 auto DefC1 = std::find_if(++
II, E, [&](
const MachineInstr &
I) ->
bool {
1190 return isLoadImm(&
I, Imm) && Imm == C1 &&
1191 I.getOperand(0).getReg().isVirtual();
1194 return DefC1->getOperand(0).getReg();
1199 bool Modify =
false;
1201 if (isFromLoadImm(
LHS, C0) &&
MRI.hasOneUse(
LHS.getReg())) {
1206 if (
Register RegZ = searchConst(C0 + 1)) {
1212 MRI.clearKillFlags(RegZ);
1215 }
else if (isFromLoadImm(
RHS, C0) &&
MRI.hasOneUse(
RHS.getReg())) {
1220 if (
Register RegZ = searchConst(C0 - 1)) {
1226 MRI.clearKillFlags(RegZ);
1240 MI.eraseFromParent();
1247 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1249 int NumOp =
MI.getNumExplicitOperands();
1250 return MI.getOperand(NumOp - 1).getMBB();
1254 int64_t BrOffset)
const {
1268 case RISCV::CV_BEQIMM:
1269 case RISCV::CV_BNEIMM:
1270 return isIntN(13, BrOffset);
1272 case RISCV::PseudoBR:
1273 return isIntN(21, BrOffset);
1274 case RISCV::PseudoJump:
1284 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1285 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1286 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1287 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1288 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1289 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1290 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1291 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1293 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1294 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1295 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1296 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1297 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1298 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1299 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1301 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1302 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1303 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1304 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1305 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1307 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1308 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1309 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1310 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1312 case RISCV::ANDN:
return RISCV::PseudoCCANDN;
break;
1313 case RISCV::ORN:
return RISCV::PseudoCCORN;
break;
1314 case RISCV::XNOR:
return RISCV::PseudoCCXNOR;
break;
1317 return RISCV::INSTRUCTION_LIST_END;
1325 if (!Reg.isVirtual())
1327 if (!
MRI.hasOneNonDBGUse(Reg))
1336 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1337 MI->getOperand(1).getReg() == RISCV::X0)
1342 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1352 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1355 bool DontMoveAcrossStores =
true;
1356 if (!
MI->isSafeToMove(
nullptr, DontMoveAcrossStores))
1363 unsigned &TrueOp,
unsigned &FalseOp,
1364 bool &Optimizable)
const {
1365 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1366 "Unknown select instruction");
1376 Cond.push_back(
MI.getOperand(1));
1377 Cond.push_back(
MI.getOperand(2));
1378 Cond.push_back(
MI.getOperand(3));
1380 Optimizable =
STI.hasShortForwardBranchOpt();
1387 bool PreferFalse)
const {
1388 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1389 "Unknown select instruction");
1390 if (!
STI.hasShortForwardBranchOpt())
1396 bool Invert = !
DefMI;
1404 Register DestReg =
MI.getOperand(0).getReg();
1406 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1410 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1417 NewMI.
add(
MI.getOperand(1));
1418 NewMI.
add(
MI.getOperand(2));
1427 NewMI.
add(FalseReg);
1451 if (
MI.isMetaInstruction())
1454 unsigned Opcode =
MI.getOpcode();
1456 if (Opcode == TargetOpcode::INLINEASM ||
1457 Opcode == TargetOpcode::INLINEASM_BR) {
1460 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1461 *
TM.getMCAsmInfo());
1464 if (!
MI.memoperands_empty()) {
1469 if (ST.hasStdExtCOrZca() && ST.enableRVCHintInstrs()) {
1470 if (isCompressibleInst(
MI,
STI))
1478 if (Opcode == TargetOpcode::BUNDLE)
1479 return getInstBundleLength(
MI);
1481 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1482 if (isCompressibleInst(
MI,
STI))
1487 case TargetOpcode::STACKMAP:
1490 case TargetOpcode::PATCHPOINT:
1493 case TargetOpcode::STATEPOINT: {
1497 return std::max(NumBytes, 8U);
1500 return get(Opcode).getSize();
1504unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1508 while (++
I != E &&
I->isInsideBundle()) {
1509 assert(!
I->isBundle() &&
"No nested bundle!");
1516 const unsigned Opcode =
MI.getOpcode();
1520 case RISCV::FSGNJ_D:
1521 case RISCV::FSGNJ_S:
1522 case RISCV::FSGNJ_H:
1523 case RISCV::FSGNJ_D_INX:
1524 case RISCV::FSGNJ_D_IN32X:
1525 case RISCV::FSGNJ_S_INX:
1526 case RISCV::FSGNJ_H_INX:
1528 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1529 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1533 return (
MI.getOperand(1).isReg() &&
1534 MI.getOperand(1).getReg() == RISCV::X0) ||
1535 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1537 return MI.isAsCheapAsAMove();
1540std::optional<DestSourcePair>
1544 switch (
MI.getOpcode()) {
1549 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1550 MI.getOperand(2).getImm() == 0)
1553 case RISCV::FSGNJ_D:
1554 case RISCV::FSGNJ_S:
1555 case RISCV::FSGNJ_H:
1556 case RISCV::FSGNJ_D_INX:
1557 case RISCV::FSGNJ_D_IN32X:
1558 case RISCV::FSGNJ_S_INX:
1559 case RISCV::FSGNJ_H_INX:
1561 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1562 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1566 return std::nullopt;
1574 const auto &SchedModel =
STI.getSchedModel();
1575 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1592 RISCV::OpName::frm) < 0;
1594 "New instructions require FRM whereas the old one does not have it");
1601 for (
auto *NewMI : InsInstrs) {
1604 NewMI->getOpcode(), RISCV::OpName::frm)) != NewMI->getNumOperands())
1646bool RISCVInstrInfo::isVectorAssociativeAndCommutative(
const MachineInstr &Inst,
1647 bool Invert)
const {
1648#define OPCODE_LMUL_CASE(OPC) \
1649 case RISCV::OPC##_M1: \
1650 case RISCV::OPC##_M2: \
1651 case RISCV::OPC##_M4: \
1652 case RISCV::OPC##_M8: \
1653 case RISCV::OPC##_MF2: \
1654 case RISCV::OPC##_MF4: \
1655 case RISCV::OPC##_MF8
1657#define OPCODE_LMUL_MASK_CASE(OPC) \
1658 case RISCV::OPC##_M1_MASK: \
1659 case RISCV::OPC##_M2_MASK: \
1660 case RISCV::OPC##_M4_MASK: \
1661 case RISCV::OPC##_M8_MASK: \
1662 case RISCV::OPC##_MF2_MASK: \
1663 case RISCV::OPC##_MF4_MASK: \
1664 case RISCV::OPC##_MF8_MASK
1669 Opcode = *InvOpcode;
1686#undef OPCODE_LMUL_MASK_CASE
1687#undef OPCODE_LMUL_CASE
1690bool RISCVInstrInfo::areRVVInstsReassociable(
const MachineInstr &Root,
1703 auto checkImmOperand = [&](
unsigned OpIdx) {
1707 auto checkRegOperand = [&](
unsigned OpIdx) {
1715 if (!checkRegOperand(1))
1730 bool SeenMI2 =
false;
1740 if (It->modifiesRegister(RISCV::V0,
TRI)) {
1741 Register SrcReg = It->getOperand(1).getReg();
1759 if (MI1VReg != SrcReg)
1768 assert(SeenMI2 &&
"Prev is expected to appear before Root");
1807bool RISCVInstrInfo::hasReassociableVectorSibling(
const MachineInstr &Inst,
1808 bool &Commuted)
const {
1812 "Expect the present of passthrough operand.");
1818 Commuted = !areRVVInstsReassociable(Inst, *MI1) &&
1819 areRVVInstsReassociable(Inst, *MI2);
1823 return areRVVInstsReassociable(Inst, *MI1) &&
1824 (isVectorAssociativeAndCommutative(*MI1) ||
1825 isVectorAssociativeAndCommutative(*MI1,
true)) &&
1832 if (!isVectorAssociativeAndCommutative(Inst) &&
1833 !isVectorAssociativeAndCommutative(Inst,
true))
1845 MI1 =
MRI.getUniqueVRegDef(Op1.
getReg());
1847 MI2 =
MRI.getUniqueVRegDef(Op2.
getReg());
1859 for (
unsigned I = 0;
I < 5; ++
I)
1865 bool &Commuted)
const {
1866 if (isVectorAssociativeAndCommutative(Inst) ||
1867 isVectorAssociativeAndCommutative(Inst,
true))
1868 return hasReassociableVectorSibling(Inst, Commuted);
1874 unsigned OperandIdx = Commuted ? 2 : 1;
1878 int16_t InstFrmOpIdx =
1880 int16_t SiblingFrmOpIdx =
1883 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1888 bool Invert)
const {
1889 if (isVectorAssociativeAndCommutative(Inst, Invert))
1897 Opc = *InverseOpcode;
1942std::optional<unsigned>
1944#define RVV_OPC_LMUL_CASE(OPC, INV) \
1945 case RISCV::OPC##_M1: \
1946 return RISCV::INV##_M1; \
1947 case RISCV::OPC##_M2: \
1948 return RISCV::INV##_M2; \
1949 case RISCV::OPC##_M4: \
1950 return RISCV::INV##_M4; \
1951 case RISCV::OPC##_M8: \
1952 return RISCV::INV##_M8; \
1953 case RISCV::OPC##_MF2: \
1954 return RISCV::INV##_MF2; \
1955 case RISCV::OPC##_MF4: \
1956 return RISCV::INV##_MF4; \
1957 case RISCV::OPC##_MF8: \
1958 return RISCV::INV##_MF8
1960#define RVV_OPC_LMUL_MASK_CASE(OPC, INV) \
1961 case RISCV::OPC##_M1_MASK: \
1962 return RISCV::INV##_M1_MASK; \
1963 case RISCV::OPC##_M2_MASK: \
1964 return RISCV::INV##_M2_MASK; \
1965 case RISCV::OPC##_M4_MASK: \
1966 return RISCV::INV##_M4_MASK; \
1967 case RISCV::OPC##_M8_MASK: \
1968 return RISCV::INV##_M8_MASK; \
1969 case RISCV::OPC##_MF2_MASK: \
1970 return RISCV::INV##_MF2_MASK; \
1971 case RISCV::OPC##_MF4_MASK: \
1972 return RISCV::INV##_MF4_MASK; \
1973 case RISCV::OPC##_MF8_MASK: \
1974 return RISCV::INV##_MF8_MASK
1978 return std::nullopt;
1980 return RISCV::FSUB_H;
1982 return RISCV::FSUB_S;
1984 return RISCV::FSUB_D;
1986 return RISCV::FADD_H;
1988 return RISCV::FADD_S;
1990 return RISCV::FADD_D;
2007#undef RVV_OPC_LMUL_MASK_CASE
2008#undef RVV_OPC_LMUL_CASE
2013 bool DoRegPressureReduce) {
2029 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2040 bool DoRegPressureReduce) {
2042 bool IsFAdd =
isFADD(Opc);
2043 if (!IsFAdd && !
isFSUB(Opc))
2047 DoRegPressureReduce)) {
2053 DoRegPressureReduce)) {
2063 bool DoRegPressureReduce) {
2071 unsigned CombineOpc) {
2078 if (!
MI ||
MI->getParent() != &
MBB ||
MI->getOpcode() != CombineOpc)
2081 if (!
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
2092 unsigned OuterShiftAmt) {
2098 if (InnerShiftAmt < OuterShiftAmt || (InnerShiftAmt - OuterShiftAmt) > 3)
2160 bool DoRegPressureReduce)
const {
2169 DoRegPressureReduce);
2177 return RISCV::FMADD_H;
2179 return RISCV::FMADD_S;
2181 return RISCV::FMADD_D;
2226 bool Mul1IsKill = Mul1.
isKill();
2227 bool Mul2IsKill = Mul2.
isKill();
2228 bool AddendIsKill = Addend.
isKill();
2237 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
2262 assert(OuterShiftAmt != 0 &&
"Unexpected opcode");
2269 assert(InnerShiftAmt >= OuterShiftAmt &&
"Unexpected shift amount");
2272 switch (InnerShiftAmt - OuterShiftAmt) {
2276 InnerOpc = RISCV::ADD;
2279 InnerOpc = RISCV::SH1ADD;
2282 InnerOpc = RISCV::SH2ADD;
2285 InnerOpc = RISCV::SH3ADD;
2293 Register NewVR =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2303 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
2320 DelInstrs, InstrIdxForVirtReg);
2348 unsigned OpType = Operand.OperandType;
2353 int64_t Imm = MO.
getImm();
2360#define CASE_OPERAND_UIMM(NUM) \
2361 case RISCVOp::OPERAND_UIMM##NUM: \
2362 Ok = isUInt<NUM>(Imm); \
2376 Ok = isShiftedUInt<1, 1>(Imm);
2379 Ok = isShiftedUInt<5, 2>(Imm);
2382 Ok = isShiftedUInt<6, 2>(Imm);
2385 Ok = isShiftedUInt<5, 3>(Imm);
2388 Ok = isUInt<8>(Imm) && Imm >= 32;
2391 Ok = isShiftedUInt<6, 3>(Imm);
2394 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
2397 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
2406 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
2412 Ok = Imm != 0 && isInt<6>(Imm);
2415 Ok = isUInt<10>(Imm);
2418 Ok = isUInt<11>(Imm);
2421 Ok = isInt<12>(Imm);
2424 Ok = isShiftedInt<7, 5>(Imm);
2427 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2430 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2431 Ok = Ok && Imm != 0;
2434 Ok = (isUInt<5>(Imm) && Imm != 0) ||
2435 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2438 Ok = Imm >= 0 && Imm <= 10;
2441 Ok = Imm >= 0 && Imm <= 7;
2444 Ok = Imm >= 1 && Imm <= 10;
2447 Ok = Imm >= 2 && Imm <= 14;
2450 Ok = (Imm & 0xf) == 0;
2454 ErrInfo =
"Invalid immediate";
2464 if (!
Op.isImm() && !
Op.isReg()) {
2465 ErrInfo =
"Invalid operand type for VL operand";
2468 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
2470 auto *RC =
MRI.getRegClass(
Op.getReg());
2471 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2472 ErrInfo =
"Invalid register class for VL operand";
2477 ErrInfo =
"VL operand w/o SEW operand?";
2483 if (!
MI.getOperand(OpIdx).isImm()) {
2484 ErrInfo =
"SEW value expected to be an immediate";
2487 uint64_t Log2SEW =
MI.getOperand(OpIdx).getImm();
2489 ErrInfo =
"Unexpected SEW value";
2492 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2494 ErrInfo =
"Unexpected SEW value";
2500 if (!
MI.getOperand(OpIdx).isImm()) {
2501 ErrInfo =
"Policy operand expected to be an immediate";
2504 uint64_t Policy =
MI.getOperand(OpIdx).getImm();
2506 ErrInfo =
"Invalid Policy Value";
2510 ErrInfo =
"policy operand w/o VL operand?";
2518 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
2519 ErrInfo =
"policy operand w/o tied operand?";
2562 int64_t NewOffset = OldOffset + Disp;
2564 NewOffset = SignExtend64<32>(NewOffset);
2566 if (!isInt<12>(NewOffset))
2584 "Addressing mode not supported for folding");
2626 OffsetIsScalable =
false;
2642 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
2650 if (MO1->getAddrSpace() != MO2->getAddrSpace())
2653 auto Base1 = MO1->getValue();
2654 auto Base2 = MO2->getValue();
2655 if (!Base1 || !Base2)
2660 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
2663 return Base1 == Base2;
2669 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2670 unsigned NumBytes)
const {
2673 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
2678 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
2684 BaseOps1.
front()->getParent()->getMF()->getSubtarget().getCacheLineSize();
2690 return ClusterSize <= 4 && std::abs(Offset1 - Offset2) <
CacheLineSize;
2740 int64_t OffsetA = 0, OffsetB = 0;
2745 int LowOffset = std::min(OffsetA, OffsetB);
2746 int HighOffset = std::max(OffsetA, OffsetB);
2747 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2749 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
2756std::pair<unsigned, unsigned>
2759 return std::make_pair(TF & Mask, TF & ~Mask);
2764 using namespace RISCVII;
2765 static const std::pair<unsigned, const char *> TargetFlags[] = {
2766 {MO_CALL,
"riscv-call"},
2767 {MO_LO,
"riscv-lo"},
2768 {MO_HI,
"riscv-hi"},
2769 {MO_PCREL_LO,
"riscv-pcrel-lo"},
2770 {MO_PCREL_HI,
"riscv-pcrel-hi"},
2771 {MO_GOT_HI,
"riscv-got-hi"},
2772 {MO_TPREL_LO,
"riscv-tprel-lo"},
2773 {MO_TPREL_HI,
"riscv-tprel-hi"},
2774 {MO_TPREL_ADD,
"riscv-tprel-add"},
2775 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
2776 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"},
2777 {MO_TLSDESC_HI,
"riscv-tlsdesc-hi"},
2778 {MO_TLSDESC_LOAD_LO,
"riscv-tlsdesc-load-lo"},
2779 {MO_TLSDESC_ADD_LO,
"riscv-tlsdesc-add-lo"},
2780 {MO_TLSDESC_CALL,
"riscv-tlsdesc-call"}};
2788 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
2801 unsigned &Flags)
const {
2816std::optional<outliner::OutlinedFunction>
2818 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
2824 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
2830 if (RepeatedSequenceLocs.size() < 2)
2831 return std::nullopt;
2833 unsigned SequenceSize = 0;
2835 for (
auto &
MI : RepeatedSequenceLocs[0])
2839 unsigned CallOverhead = 8;
2840 for (
auto &
C : RepeatedSequenceLocs)
2844 unsigned FrameOverhead = 4;
2845 if (RepeatedSequenceLocs[0]
2847 ->getSubtarget<RISCVSubtarget>()
2857 unsigned Flags)
const {
2862 const auto &
F =
MI.getMF()->getFunction();
2865 if (
MI.isCFIInstruction())
2879 if (
MI.modifiesRegister(RISCV::X5,
TRI) ||
2880 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2884 for (
const auto &MO :
MI.operands()) {
2889 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
2902 bool Changed =
true;
2907 for (;
I != E; ++
I) {
2908 if (
I->isCFIInstruction()) {
2909 I->removeFromParent();
2932 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
2943 return std::nullopt;
2947 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
2948 MI.getOperand(2).isImm())
2949 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
2951 return std::nullopt;
2959 std::string GenericComment =
2961 if (!GenericComment.empty())
2962 return GenericComment;
2966 return std::string();
2968 std::string Comment;
2975 if ((
MI.getOpcode() == RISCV::VSETVLI ||
MI.getOpcode() == RISCV::VSETIVLI ||
2976 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2977 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2978 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2980 unsigned Imm =
MI.getOperand(OpIdx).getImm();
2984 unsigned Log2SEW =
MI.getOperand(OpIdx).getImm();
2985 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2990 unsigned Policy =
MI.getOperand(OpIdx).getImm();
2992 "Invalid Policy Value");
3002#define CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL) \
3003 RISCV::Pseudo##OP##_##LMUL
3005#define CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL) \
3006 RISCV::Pseudo##OP##_##LMUL##_MASK
3008#define CASE_RVV_OPCODE_LMUL(OP, LMUL) \
3009 CASE_RVV_OPCODE_UNMASK_LMUL(OP, LMUL): \
3010 case CASE_RVV_OPCODE_MASK_LMUL(OP, LMUL)
3012#define CASE_RVV_OPCODE_UNMASK_WIDEN(OP) \
3013 CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF8): \
3014 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF4): \
3015 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, MF2): \
3016 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M1): \
3017 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M2): \
3018 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M4)
3020#define CASE_RVV_OPCODE_UNMASK(OP) \
3021 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3022 case CASE_RVV_OPCODE_UNMASK_LMUL(OP, M8)
3024#define CASE_RVV_OPCODE_MASK_WIDEN(OP) \
3025 CASE_RVV_OPCODE_MASK_LMUL(OP, MF8): \
3026 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF4): \
3027 case CASE_RVV_OPCODE_MASK_LMUL(OP, MF2): \
3028 case CASE_RVV_OPCODE_MASK_LMUL(OP, M1): \
3029 case CASE_RVV_OPCODE_MASK_LMUL(OP, M2): \
3030 case CASE_RVV_OPCODE_MASK_LMUL(OP, M4)
3032#define CASE_RVV_OPCODE_MASK(OP) \
3033 CASE_RVV_OPCODE_MASK_WIDEN(OP): \
3034 case CASE_RVV_OPCODE_MASK_LMUL(OP, M8)
3036#define CASE_RVV_OPCODE_WIDEN(OP) \
3037 CASE_RVV_OPCODE_UNMASK_WIDEN(OP): \
3038 case CASE_RVV_OPCODE_MASK_WIDEN(OP)
3040#define CASE_RVV_OPCODE(OP) \
3041 CASE_RVV_OPCODE_UNMASK(OP): \
3042 case CASE_RVV_OPCODE_MASK(OP)
3046#define CASE_VMA_OPCODE_COMMON(OP, TYPE, LMUL) \
3047 RISCV::PseudoV##OP##_##TYPE##_##LMUL
3049#define CASE_VMA_OPCODE_LMULS_M1(OP, TYPE) \
3050 CASE_VMA_OPCODE_COMMON(OP, TYPE, M1): \
3051 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M2): \
3052 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M4): \
3053 case CASE_VMA_OPCODE_COMMON(OP, TYPE, M8)
3055#define CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE) \
3056 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF2): \
3057 case CASE_VMA_OPCODE_LMULS_M1(OP, TYPE)
3059#define CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE) \
3060 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF4): \
3061 case CASE_VMA_OPCODE_LMULS_MF2(OP, TYPE)
3063#define CASE_VMA_OPCODE_LMULS(OP, TYPE) \
3064 CASE_VMA_OPCODE_COMMON(OP, TYPE, MF8): \
3065 case CASE_VMA_OPCODE_LMULS_MF4(OP, TYPE)
3068#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL, SEW) \
3069 RISCV::PseudoV##OP##_##TYPE##_##LMUL##_##SEW
3071#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW) \
3072 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1, SEW): \
3073 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2, SEW): \
3074 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4, SEW): \
3075 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8, SEW)
3077#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW) \
3078 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2, SEW): \
3079 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE, SEW)
3081#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE, SEW) \
3082 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4, SEW): \
3083 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE, SEW)
3085#define CASE_VFMA_OPCODE_VV(OP) \
3086 CASE_VFMA_OPCODE_LMULS_MF4(OP, VV, E16): \
3087 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VV, E32): \
3088 case CASE_VFMA_OPCODE_LMULS_M1(OP, VV, E64)
3090#define CASE_VFMA_SPLATS(OP) \
3091 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16, E16): \
3092 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32, E32): \
3093 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64, E64)
3097 unsigned &SrcOpIdx1,
3098 unsigned &SrcOpIdx2)
const {
3100 if (!
Desc.isCommutable())
3103 switch (
MI.getOpcode()) {
3104 case RISCV::TH_MVEQZ:
3105 case RISCV::TH_MVNEZ:
3109 if (
MI.getOperand(2).getReg() == RISCV::X0)
3112 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
3113 case RISCV::TH_MULA:
3114 case RISCV::TH_MULAW:
3115 case RISCV::TH_MULAH:
3116 case RISCV::TH_MULS:
3117 case RISCV::TH_MULSW:
3118 case RISCV::TH_MULSH:
3120 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3121 case RISCV::PseudoCCMOVGPRNoX0:
3122 case RISCV::PseudoCCMOVGPR:
3124 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
3151 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
3172 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3177 unsigned CommutableOpIdx1 = 1;
3178 unsigned CommutableOpIdx2 = 3;
3179 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3192 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
3199 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
3201 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
3205 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
3206 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
3212 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
3213 SrcOpIdx2 == CommuteAnyOperandIndex) {
3216 unsigned CommutableOpIdx1 = SrcOpIdx1;
3217 if (SrcOpIdx1 == SrcOpIdx2) {
3220 CommutableOpIdx1 = 1;
3221 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
3223 CommutableOpIdx1 = SrcOpIdx2;
3228 unsigned CommutableOpIdx2;
3229 if (CommutableOpIdx1 != 1) {
3231 CommutableOpIdx2 = 1;
3233 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
3238 if (Op1Reg !=
MI.getOperand(2).getReg())
3239 CommutableOpIdx2 = 2;
3241 CommutableOpIdx2 = 3;
3246 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
3259#define CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
3260 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
3261 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
3264#define CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
3265 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
3266 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
3267 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
3268 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
3270#define CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
3271 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
3272 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
3274#define CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
3275 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
3276 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
3278#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
3279 CASE_VMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
3280 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
3282#define CASE_VMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3283 CASE_VMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16) \
3284 CASE_VMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32) \
3285 CASE_VMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64)
3288#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL, SEW) \
3289 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL##_##SEW: \
3290 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL##_##SEW; \
3293#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW) \
3294 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1, SEW) \
3295 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2, SEW) \
3296 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4, SEW) \
3297 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8, SEW)
3299#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW) \
3300 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2, SEW) \
3301 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE, SEW)
3303#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP) \
3304 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VV, E16) \
3305 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VV, E32) \
3306 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VV, E64)
3308#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW) \
3309 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4, SEW) \
3310 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE, SEW)
3312#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE, SEW) \
3313 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8, SEW) \
3314 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE, SEW)
3316#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
3317 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16, E16) \
3318 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32, E32) \
3319 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64, E64)
3324 unsigned OpIdx2)
const {
3327 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
3331 switch (
MI.getOpcode()) {
3332 case RISCV::TH_MVEQZ:
3333 case RISCV::TH_MVNEZ: {
3334 auto &WorkingMI = cloneIfNew(
MI);
3335 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
3336 : RISCV::TH_MVEQZ));
3340 case RISCV::PseudoCCMOVGPRNoX0:
3341 case RISCV::PseudoCCMOVGPR: {
3345 auto &WorkingMI = cloneIfNew(
MI);
3346 WorkingMI.getOperand(3).setImm(
CC);
3370 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3371 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
3373 switch (
MI.getOpcode()) {
3396 auto &WorkingMI = cloneIfNew(
MI);
3397 WorkingMI.setDesc(
get(Opc));
3407 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
3410 if (OpIdx1 == 3 || OpIdx2 == 3) {
3412 switch (
MI.getOpcode()) {
3423 auto &WorkingMI = cloneIfNew(
MI);
3424 WorkingMI.setDesc(
get(Opc));
3436#undef CASE_RVV_OPCODE_UNMASK_LMUL
3437#undef CASE_RVV_OPCODE_MASK_LMUL
3438#undef CASE_RVV_OPCODE_LMUL
3439#undef CASE_RVV_OPCODE_UNMASK_WIDEN
3440#undef CASE_RVV_OPCODE_UNMASK
3441#undef CASE_RVV_OPCODE_MASK_WIDEN
3442#undef CASE_RVV_OPCODE_MASK
3443#undef CASE_RVV_OPCODE_WIDEN
3444#undef CASE_RVV_OPCODE
3446#undef CASE_VMA_OPCODE_COMMON
3447#undef CASE_VMA_OPCODE_LMULS_M1
3448#undef CASE_VMA_OPCODE_LMULS_MF2
3449#undef CASE_VMA_OPCODE_LMULS_MF4
3450#undef CASE_VMA_OPCODE_LMULS
3451#undef CASE_VFMA_OPCODE_COMMON
3452#undef CASE_VFMA_OPCODE_LMULS_M1
3453#undef CASE_VFMA_OPCODE_LMULS_MF2
3454#undef CASE_VFMA_OPCODE_LMULS_MF4
3455#undef CASE_VFMA_OPCODE_VV
3456#undef CASE_VFMA_SPLATS
3459#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
3460 RISCV::PseudoV##OP##_##LMUL##_TIED
3462#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
3463 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
3464 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
3465 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
3466 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
3467 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
3469#define CASE_WIDEOP_OPCODE_LMULS(OP) \
3470 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
3471 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
3473#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
3474 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
3475 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
3478#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3479 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
3480 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
3481 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
3482 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
3483 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
3485#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3486 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
3487 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3490#define CASE_FP_WIDEOP_OPCODE_COMMON(OP, LMUL, SEW) \
3491 RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED
3493#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP) \
3494 CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF4, E16): \
3495 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E16): \
3496 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, MF2, E32): \
3497 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E16): \
3498 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M1, E32): \
3499 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E16): \
3500 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M2, E32): \
3501 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E16): \
3502 case CASE_FP_WIDEOP_OPCODE_COMMON(OP, M4, E32) \
3504#define CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL, SEW) \
3505 case RISCV::PseudoV##OP##_##LMUL##_##SEW##_TIED: \
3506 NewOpc = RISCV::PseudoV##OP##_##LMUL##_##SEW; \
3509#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
3510 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4, E16) \
3511 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E16) \
3512 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2, E32) \
3513 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E16) \
3514 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1, E32) \
3515 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E16) \
3516 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2, E32) \
3517 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E16) \
3518 CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4, E32) \
3520#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
3521 CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
3528 switch (
MI.getOpcode()) {
3534 MI.getNumExplicitOperands() == 7 &&
3535 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
3542 switch (
MI.getOpcode()) {
3552 .
add(
MI.getOperand(0))
3554 .
add(
MI.getOperand(1))
3555 .
add(
MI.getOperand(2))
3556 .
add(
MI.getOperand(3))
3557 .
add(
MI.getOperand(4))
3558 .
add(
MI.getOperand(5))
3559 .
add(
MI.getOperand(6));
3568 MI.getNumExplicitOperands() == 6);
3569 if ((
MI.getOperand(5).getImm() & 1) == 0)
3574 switch (
MI.getOpcode()) {
3586 .
add(
MI.getOperand(0))
3588 .
add(
MI.getOperand(1))
3589 .
add(
MI.getOperand(2))
3590 .
add(
MI.getOperand(3))
3591 .
add(
MI.getOperand(4))
3592 .
add(
MI.getOperand(5));
3599 unsigned NumOps =
MI.getNumOperands();
3600 for (
unsigned I = 1;
I < NumOps; ++
I) {
3602 if (
Op.isReg() &&
Op.isKill())
3610 if (
MI.getOperand(0).isEarlyClobber()) {
3616 if (S->
end ==
Idx.getRegSlot(
true))
3617 S->
end =
Idx.getRegSlot();
3624#undef CASE_WIDEOP_OPCODE_COMMON
3625#undef CASE_WIDEOP_OPCODE_LMULS_MF4
3626#undef CASE_WIDEOP_OPCODE_LMULS
3627#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
3628#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3629#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
3630#undef CASE_FP_WIDEOP_OPCODE_COMMON
3631#undef CASE_FP_WIDEOP_OPCODE_LMULS_MF4
3632#undef CASE_FP_WIDEOP_CHANGE_OPCODE_COMMON
3633#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4
3634#undef CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS
3641 if (llvm::has_single_bit<uint32_t>(Amount)) {
3643 if (ShiftAmount == 0)
3649 }
else if (
STI.hasStdExtZba() &&
3656 if (Amount % 9 == 0) {
3657 Opc = RISCV::SH3ADD;
3658 ShiftAmount =
Log2_64(Amount / 9);
3659 }
else if (Amount % 5 == 0) {
3660 Opc = RISCV::SH2ADD;
3661 ShiftAmount =
Log2_64(Amount / 5);
3662 }
else if (Amount % 3 == 0) {
3663 Opc = RISCV::SH1ADD;
3664 ShiftAmount =
Log2_64(Amount / 3);
3677 }
else if (llvm::has_single_bit<uint32_t>(Amount - 1)) {
3678 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3688 }
else if (llvm::has_single_bit<uint32_t>(Amount + 1)) {
3689 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3699 }
else if (
STI.hasStdExtZmmul()) {
3700 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3709 for (
uint32_t ShiftAmount = 0; Amount >> ShiftAmount; ShiftAmount++) {
3710 if (Amount & (1U << ShiftAmount)) {
3714 .
addImm(ShiftAmount - PrevShiftAmount)
3716 if (Amount >> (ShiftAmount + 1)) {
3719 Acc =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3730 PrevShiftAmount = ShiftAmount;
3733 assert(Acc &&
"Expected valid accumulator");
3743 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
3751 return MI.getOpcode() == RISCV::ADDIW &&
MI.getOperand(1).isReg() &&
3752 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0;
3757 return MI.getOpcode() == RISCV::ADD_UW &&
MI.getOperand(1).isReg() &&
3758 MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0;
3763 return MI.getOpcode() == RISCV::ANDI &&
MI.getOperand(1).isReg() &&
3764 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 255;
3775 case RISCV::VL1RE8_V:
3776 case RISCV::VL2RE8_V:
3777 case RISCV::VL4RE8_V:
3778 case RISCV::VL8RE8_V:
3779 case RISCV::VL1RE16_V:
3780 case RISCV::VL2RE16_V:
3781 case RISCV::VL4RE16_V:
3782 case RISCV::VL8RE16_V:
3783 case RISCV::VL1RE32_V:
3784 case RISCV::VL2RE32_V:
3785 case RISCV::VL4RE32_V:
3786 case RISCV::VL8RE32_V:
3787 case RISCV::VL1RE64_V:
3788 case RISCV::VL2RE64_V:
3789 case RISCV::VL4RE64_V:
3790 case RISCV::VL8RE64_V:
3798 unsigned Opcode =
MI.getOpcode();
3799 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
3805std::optional<std::pair<unsigned, unsigned>>
3809 return std::nullopt;
3810 case RISCV::PseudoVSPILL2_M1:
3811 case RISCV::PseudoVRELOAD2_M1:
3812 return std::make_pair(2u, 1u);
3813 case RISCV::PseudoVSPILL2_M2:
3814 case RISCV::PseudoVRELOAD2_M2:
3815 return std::make_pair(2u, 2u);
3816 case RISCV::PseudoVSPILL2_M4:
3817 case RISCV::PseudoVRELOAD2_M4:
3818 return std::make_pair(2u, 4u);
3819 case RISCV::PseudoVSPILL3_M1:
3820 case RISCV::PseudoVRELOAD3_M1:
3821 return std::make_pair(3u, 1u);
3822 case RISCV::PseudoVSPILL3_M2:
3823 case RISCV::PseudoVRELOAD3_M2:
3824 return std::make_pair(3u, 2u);
3825 case RISCV::PseudoVSPILL4_M1:
3826 case RISCV::PseudoVRELOAD4_M1:
3827 return std::make_pair(4u, 1u);
3828 case RISCV::PseudoVSPILL4_M2:
3829 case RISCV::PseudoVRELOAD4_M2:
3830 return std::make_pair(4u, 2u);
3831 case RISCV::PseudoVSPILL5_M1:
3832 case RISCV::PseudoVRELOAD5_M1:
3833 return std::make_pair(5u, 1u);
3834 case RISCV::PseudoVSPILL6_M1:
3835 case RISCV::PseudoVRELOAD6_M1:
3836 return std::make_pair(6u, 1u);
3837 case RISCV::PseudoVSPILL7_M1:
3838 case RISCV::PseudoVRELOAD7_M1:
3839 return std::make_pair(7u, 1u);
3840 case RISCV::PseudoVSPILL8_M1:
3841 case RISCV::PseudoVRELOAD8_M1:
3842 return std::make_pair(8u, 1u);
3847 return MI.getNumExplicitDefs() == 2 &&
3848 MI.modifiesRegister(RISCV::VL,
nullptr) && !
MI.isInlineAsm();
3852 int16_t MI1FrmOpIdx =
3854 int16_t MI2FrmOpIdx =
3856 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
3863std::optional<unsigned>
3868 return std::nullopt;
3871 case RISCV::VSLL_VX:
3872 case RISCV::VSRL_VX:
3873 case RISCV::VSRA_VX:
3875 case RISCV::VSSRL_VX:
3876 case RISCV::VSSRA_VX:
3881 case RISCV::VNSRL_WX:
3882 case RISCV::VNSRA_WX:
3884 case RISCV::VNCLIPU_WX:
3885 case RISCV::VNCLIP_WX:
3890 case RISCV::VADD_VX:
3891 case RISCV::VSUB_VX:
3892 case RISCV::VRSUB_VX:
3894 case RISCV::VWADDU_VX:
3895 case RISCV::VWSUBU_VX:
3896 case RISCV::VWADD_VX:
3897 case RISCV::VWSUB_VX:
3898 case RISCV::VWADDU_WX:
3899 case RISCV::VWSUBU_WX:
3900 case RISCV::VWADD_WX:
3901 case RISCV::VWSUB_WX:
3903 case RISCV::VADC_VXM:
3904 case RISCV::VADC_VIM:
3905 case RISCV::VMADC_VXM:
3906 case RISCV::VMADC_VIM:
3907 case RISCV::VMADC_VX:
3908 case RISCV::VSBC_VXM:
3909 case RISCV::VMSBC_VXM:
3910 case RISCV::VMSBC_VX:
3912 case RISCV::VAND_VX:
3914 case RISCV::VXOR_VX:
3916 case RISCV::VMSEQ_VX:
3917 case RISCV::VMSNE_VX:
3918 case RISCV::VMSLTU_VX:
3919 case RISCV::VMSLT_VX:
3920 case RISCV::VMSLEU_VX:
3921 case RISCV::VMSLE_VX:
3922 case RISCV::VMSGTU_VX:
3923 case RISCV::VMSGT_VX:
3925 case RISCV::VMINU_VX:
3926 case RISCV::VMIN_VX:
3927 case RISCV::VMAXU_VX:
3928 case RISCV::VMAX_VX:
3930 case RISCV::VMUL_VX:
3931 case RISCV::VMULH_VX:
3932 case RISCV::VMULHU_VX:
3933 case RISCV::VMULHSU_VX:
3935 case RISCV::VDIVU_VX:
3936 case RISCV::VDIV_VX:
3937 case RISCV::VREMU_VX:
3938 case RISCV::VREM_VX:
3940 case RISCV::VWMUL_VX:
3941 case RISCV::VWMULU_VX:
3942 case RISCV::VWMULSU_VX:
3944 case RISCV::VMACC_VX:
3945 case RISCV::VNMSAC_VX:
3946 case RISCV::VMADD_VX:
3947 case RISCV::VNMSUB_VX:
3949 case RISCV::VWMACCU_VX:
3950 case RISCV::VWMACC_VX:
3951 case RISCV::VWMACCSU_VX:
3952 case RISCV::VWMACCUS_VX:
3954 case RISCV::VMERGE_VXM:
3956 case RISCV::VMV_V_X:
3958 case RISCV::VSADDU_VX:
3959 case RISCV::VSADD_VX:
3960 case RISCV::VSSUBU_VX:
3961 case RISCV::VSSUB_VX:
3963 case RISCV::VAADDU_VX:
3964 case RISCV::VAADD_VX:
3965 case RISCV::VASUBU_VX:
3966 case RISCV::VASUB_VX:
3968 case RISCV::VSMUL_VX:
3970 case RISCV::VMV_S_X:
3971 return 1U << Log2SEW;
3977 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc, unsigned ZeroReg=0, bool CheckZeroReg=false)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
const HexagonInstrInfo * TII
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
const char LLVMTargetMachineRef TM
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
#define CASE_VFMA_CHANGE_OPCODE_VV(OLDOP, NEWOP)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, unsigned Pattern)
#define RVV_OPC_LMUL_CASE(OPC, INV)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static unsigned getAddendOperandIdx(unsigned Pattern)
#define CASE_RVV_OPCODE_UNMASK(OP)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static void genShXAddAddShift(MachineInstr &Root, unsigned AddOpIdx, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
#define OPCODE_LMUL_MASK_CASE(OPC)
static bool isFSUB(unsigned Opc)
#define CASE_VMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
#define CASE_RVV_OPCODE(OP)
#define CASE_VFMA_OPCODE_VV(OP)
MachineOutlinerConstructionID
#define CASE_RVV_OPCODE_WIDEN(OP)
#define CASE_VMA_OPCODE_LMULS(OP, TYPE)
static bool isFMUL(unsigned Opc)
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
#define OPCODE_LMUL_CASE(OPC)
#define CASE_OPERAND_UIMM(NUM)
static bool canCombineShiftIntoShXAdd(const MachineBasicBlock &MBB, const MachineOperand &MO, unsigned OuterShiftAmt)
Utility routine that checks if.
static bool isFADD(unsigned Opc)
#define CASE_FP_WIDEOP_OPCODE_LMULS_MF4(OP)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static bool getSHXADDPatterns(const MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
static unsigned getSHXADDShiftAmount(unsigned Opc)
#define CASE_RVV_OPCODE_MASK(OP)
#define RVV_OPC_LMUL_MASK_CASE(OPC, INV)
#define CASE_FP_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
static constexpr LocationSize beforeOrAfterPointer()
Any location before or after the base pointer (but still within the underlying object).
TypeSize getValue() const
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Wrapper class representing physical registers. Should be passed by value.
unsigned pred_size() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static MachineOperand CreateImm(int64_t Val)
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
@ MO_Immediate
Immediate operand.
@ MO_Register
Register operand.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
void mulImm(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, uint32_t Amt, MachineInstr::MIFlag Flag) const
Generate code to multiply the value in DestReg by Amt - handles all the common optimizations for this...
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC, bool Imm=false) const
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const override
const RISCVSubtarget & STI
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const override
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
CombinerObjective getCombinerObjective(unsigned Pattern) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
bool hasStdExtCOrZca() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isValid() const
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool hasReassociableOperands(const MachineInstr &Inst, const MachineBasicBlock *MBB) const
Return true when \P Inst has reassociable operands in the same \P MBB.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void getReassociateOperandIndices(const MachineInstr &Root, unsigned Pattern, std::array< unsigned, 5 > &OperandIndices) const
The returned array encodes the operand index for each parameter because the operands may be commuted;...
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const uint8_t TSFlags
Configurable target specific flags.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC, bool Imm=false)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool usesMaskPolicy(uint64_t TSFlags)
static bool hasRoundModeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
static bool isFirstDefTiedToFirstUse(const MCInstrDesc &Desc)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static RISCVII::VLMUL getLMul(uint64_t TSFlags)
static unsigned getNF(uint64_t TSFlags)
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
static const MachineMemOperand::Flags MONontemporalBit0
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Description of the encoding of one expression Op.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.