39#define GEN_CHECK_COMPRESS_INSTR
40#include "RISCVGenCompressInstEmitter.inc"
42#define GET_INSTRINFO_CTOR_DTOR
43#define GET_INSTRINFO_NAMED_OPS
44#include "RISCVGenInstrInfo.inc"
48 cl::desc(
"Prefer whole register move for vector registers."));
51 "riscv-force-machine-combiner-strategy",
cl::Hidden,
52 cl::desc(
"Force machine combiner to use a specific strategy for machine "
53 "trace metrics evaluation."),
54 cl::init(MachineTraceStrategy::TS_NumStrategies),
57 clEnumValN(MachineTraceStrategy::TS_MinInstrCount,
"min-instr",
58 "MinInstrCount strategy.")));
64#define GET_RISCVVPseudosTable_IMPL
65#include "RISCVGenSearchableTables.inc"
83 int &FrameIndex)
const {
90 unsigned &MemBytes)
const {
91 switch (
MI.getOpcode()) {
114 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
115 MI.getOperand(2).getImm() == 0) {
116 FrameIndex =
MI.getOperand(1).getIndex();
117 return MI.getOperand(0).getReg();
124 int &FrameIndex)
const {
131 unsigned &MemBytes)
const {
132 switch (
MI.getOpcode()) {
152 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
153 MI.getOperand(2).getImm() == 0) {
154 FrameIndex =
MI.getOperand(1).getIndex();
155 return MI.getOperand(0).getReg();
163 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
174 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
175 "Unexpected COPY instruction.");
179 bool FoundDef =
false;
180 bool FirstVSetVLI =
false;
181 unsigned FirstSEW = 0;
184 if (
MBBI->isMetaInstruction())
187 if (
MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
188 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
189 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
199 unsigned FirstVType =
MBBI->getOperand(2).getImm();
204 if (FirstLMul != LMul)
209 if (
MBBI->getOperand(0).getReg() != RISCV::X0)
211 if (
MBBI->getOperand(1).isImm())
213 if (
MBBI->getOperand(1).getReg() != RISCV::X0)
219 unsigned VType =
MBBI->getOperand(2).getImm();
237 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
239 }
else if (
MBBI->getNumDefs()) {
242 if (
MBBI->modifiesRegister(RISCV::VL))
248 if (!MO.isReg() || !MO.isDef())
250 if (!FoundDef &&
TRI->regsOverlap(MO.getReg(), SrcReg)) {
265 if (MO.getReg() != SrcReg)
302 unsigned Opc,
unsigned NF)
const {
307 unsigned VVOpc, VIOpc;
313 SubRegIdx = RISCV::sub_vrm1_0;
314 VVOpc = RISCV::PseudoVMV_V_V_M1;
315 VIOpc = RISCV::PseudoVMV_V_I_M1;
319 SubRegIdx = RISCV::sub_vrm2_0;
320 VVOpc = RISCV::PseudoVMV_V_V_M2;
321 VIOpc = RISCV::PseudoVMV_V_I_M2;
325 SubRegIdx = RISCV::sub_vrm4_0;
326 VVOpc = RISCV::PseudoVMV_V_V_M4;
327 VIOpc = RISCV::PseudoVMV_V_I_M4;
332 SubRegIdx = RISCV::sub_vrm1_0;
333 VVOpc = RISCV::PseudoVMV_V_V_M8;
334 VIOpc = RISCV::PseudoVMV_V_I_M8;
338 bool UseVMV_V_V =
false;
339 bool UseVMV_V_I =
false;
345 if (DefMBBI->getOpcode() == VIOpc) {
356 MIB = MIB.add(DefMBBI->getOperand(2));
370 int I = 0,
End = NF, Incr = 1;
371 unsigned SrcEncoding =
TRI->getEncodingValue(SrcReg);
372 unsigned DstEncoding =
TRI->getEncodingValue(DstReg);
376 assert(!Fractional &&
"It is impossible be fractional lmul here.");
383 for (;
I !=
End;
I += Incr) {
389 MIB = MIB.add(DefMBBI->getOperand(2));
391 MIB = MIB.addReg(
TRI->getSubReg(SrcReg, SubRegIdx +
I),
410 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
417 if (RISCV::GPRPF64RegClass.
contains(DstReg, SrcReg)) {
420 TRI->getSubReg(DstReg, RISCV::sub_32))
424 TRI->getSubReg(DstReg, RISCV::sub_32_hi))
425 .
addReg(
TRI->getSubReg(SrcReg, RISCV::sub_32_hi),
432 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
433 RISCV::GPRRegClass.
contains(DstReg)) {
435 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
440 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
442 if (
STI.hasStdExtZfh()) {
443 Opc = RISCV::FSGNJ_H;
446 (
STI.hasStdExtZfhmin() ||
STI.hasStdExtZfbfmin()) &&
447 "Unexpected extensions");
449 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
450 &RISCV::FPR32RegClass);
451 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
452 &RISCV::FPR32RegClass);
453 Opc = RISCV::FSGNJ_S;
461 if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
468 if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
475 if (RISCV::FPR32RegClass.
contains(DstReg) &&
476 RISCV::GPRRegClass.
contains(SrcReg)) {
482 if (RISCV::GPRRegClass.
contains(DstReg) &&
483 RISCV::FPR32RegClass.
contains(SrcReg)) {
489 if (RISCV::FPR64RegClass.
contains(DstReg) &&
490 RISCV::GPRRegClass.
contains(SrcReg)) {
497 if (RISCV::GPRRegClass.
contains(DstReg) &&
498 RISCV::FPR64RegClass.
contains(SrcReg)) {
506 if (RISCV::VRRegClass.
contains(DstReg, SrcReg)) {
511 if (RISCV::VRM2RegClass.
contains(DstReg, SrcReg)) {
516 if (RISCV::VRM4RegClass.
contains(DstReg, SrcReg)) {
521 if (RISCV::VRM8RegClass.
contains(DstReg, SrcReg)) {
526 if (RISCV::VRN2M1RegClass.
contains(DstReg, SrcReg)) {
532 if (RISCV::VRN2M2RegClass.
contains(DstReg, SrcReg)) {
538 if (RISCV::VRN2M4RegClass.
contains(DstReg, SrcReg)) {
544 if (RISCV::VRN3M1RegClass.
contains(DstReg, SrcReg)) {
550 if (RISCV::VRN3M2RegClass.
contains(DstReg, SrcReg)) {
556 if (RISCV::VRN4M1RegClass.
contains(DstReg, SrcReg)) {
562 if (RISCV::VRN4M2RegClass.
contains(DstReg, SrcReg)) {
568 if (RISCV::VRN5M1RegClass.
contains(DstReg, SrcReg)) {
574 if (RISCV::VRN6M1RegClass.
contains(DstReg, SrcReg)) {
580 if (RISCV::VRN7M1RegClass.
contains(DstReg, SrcReg)) {
586 if (RISCV::VRN8M1RegClass.
contains(DstReg, SrcReg)) {
597 Register SrcReg,
bool IsKill,
int FI,
605 bool IsScalableVector =
true;
606 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
607 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
608 RISCV::SW : RISCV::SD;
609 IsScalableVector =
false;
610 }
else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
611 Opcode = RISCV::PseudoRV32ZdinxSD;
612 IsScalableVector =
false;
613 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
615 IsScalableVector =
false;
616 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
618 IsScalableVector =
false;
619 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
621 IsScalableVector =
false;
622 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
624 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
626 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
628 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
630 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
631 Opcode = RISCV::PseudoVSPILL2_M1;
632 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
633 Opcode = RISCV::PseudoVSPILL2_M2;
634 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
635 Opcode = RISCV::PseudoVSPILL2_M4;
636 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
637 Opcode = RISCV::PseudoVSPILL3_M1;
638 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
639 Opcode = RISCV::PseudoVSPILL3_M2;
640 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
641 Opcode = RISCV::PseudoVSPILL4_M1;
642 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
643 Opcode = RISCV::PseudoVSPILL4_M2;
644 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
645 Opcode = RISCV::PseudoVSPILL5_M1;
646 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
647 Opcode = RISCV::PseudoVSPILL6_M1;
648 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
649 Opcode = RISCV::PseudoVSPILL7_M1;
650 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
651 Opcode = RISCV::PseudoVSPILL8_M1;
655 if (IsScalableVector) {
688 bool IsScalableVector =
true;
689 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
690 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
691 RISCV::LW : RISCV::LD;
692 IsScalableVector =
false;
693 }
else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
694 Opcode = RISCV::PseudoRV32ZdinxLD;
695 IsScalableVector =
false;
696 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
698 IsScalableVector =
false;
699 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
701 IsScalableVector =
false;
702 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
704 IsScalableVector =
false;
705 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
707 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
709 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
711 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
713 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
714 Opcode = RISCV::PseudoVRELOAD2_M1;
715 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
716 Opcode = RISCV::PseudoVRELOAD2_M2;
717 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
718 Opcode = RISCV::PseudoVRELOAD2_M4;
719 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
720 Opcode = RISCV::PseudoVRELOAD3_M1;
721 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
722 Opcode = RISCV::PseudoVRELOAD3_M2;
723 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
724 Opcode = RISCV::PseudoVRELOAD4_M1;
725 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
726 Opcode = RISCV::PseudoVRELOAD4_M2;
727 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
728 Opcode = RISCV::PseudoVRELOAD5_M1;
729 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
730 Opcode = RISCV::PseudoVRELOAD6_M1;
731 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
732 Opcode = RISCV::PseudoVRELOAD7_M1;
733 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
734 Opcode = RISCV::PseudoVRELOAD8_M1;
738 if (IsScalableVector) {
772 if (Ops.
size() != 1 || Ops[0] != 1)
776 switch (
MI.getOpcode()) {
783 LoadOpc = RISCV::LWU;
787 LoadOpc = RISCV::LBU;
797 case RISCV::ZEXT_H_RV32:
798 case RISCV::ZEXT_H_RV64:
799 LoadOpc = RISCV::LHU;
809 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(LoadOpc),
820 bool DstIsDead)
const {
829 bool SrcRenamable =
false;
833 bool LastItem = ++Num == Seq.
size();
838 switch (Inst.getOpndKind()) {
848 .
addReg(SrcReg, SrcRegState)
855 .
addReg(SrcReg, SrcRegState)
856 .
addReg(SrcReg, SrcRegState)
862 .
addReg(SrcReg, SrcRegState)
870 SrcRenamable = DstRenamable;
900 "Unknown conditional branch");
954 bool AllowModify)
const {
960 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
966 int NumTerminators = 0;
967 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
970 if (J->getDesc().isUnconditionalBranch() ||
971 J->getDesc().isIndirectBranch()) {
978 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
979 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
980 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
983 I = FirstUncondOrIndirectBr;
987 if (
I->getDesc().isIndirectBranch())
991 if (
I->isPreISelOpcode())
995 if (NumTerminators > 2)
999 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
1005 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
1011 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
1012 I->getDesc().isUnconditionalBranch()) {
1023 int *BytesRemoved)
const {
1030 if (!
I->getDesc().isUnconditionalBranch() &&
1031 !
I->getDesc().isConditionalBranch())
1037 I->eraseFromParent();
1044 if (!
I->getDesc().isConditionalBranch())
1050 I->eraseFromParent();
1063 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1065 "RISC-V branch conditions have two components!");
1098 assert(RS &&
"RegScavenger required for long branching");
1100 "new block should be inserted for expanding unconditional branch");
1103 "restore block should be inserted for restoring clobbered registers");
1110 if (!isInt<32>(BrOffset))
1112 "Branch offsets outside of the signed 32-bit range not supported");
1117 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
1129 if (TmpGPR != RISCV::NoRegister)
1135 TmpGPR = RISCV::X27;
1138 if (FrameIndex == -1)
1143 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1146 MI.getOperand(1).setMBB(&RestoreBB);
1150 TRI->eliminateFrameIndex(RestoreBB.
back(),
1154 MRI.replaceRegWith(ScratchReg, TmpGPR);
1155 MRI.clearVirtRegs();
1160 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1201 auto isLoadImm = [](
const MachineInstr *
MI, int64_t &Imm) ->
bool {
1202 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1203 MI->getOperand(1).getReg() == RISCV::X0) {
1204 Imm =
MI->getOperand(2).getImm();
1214 if (Reg == RISCV::X0) {
1218 if (!Reg.isVirtual())
1220 return isLoadImm(
MRI.getVRegDef(
Op.getReg()), Imm);
1227 auto searchConst = [&](int64_t C1) ->
Register {
1229 auto DefC1 = std::find_if(++II,
E, [&](
const MachineInstr &
I) ->
bool {
1231 return isLoadImm(&
I, Imm) && Imm == C1;
1234 return DefC1->getOperand(0).getReg();
1239 bool Modify =
false;
1241 if (isFromLoadImm(
LHS, C0) &&
MRI.hasOneUse(
LHS.getReg())) {
1246 if (
Register RegZ = searchConst(C0 + 1)) {
1252 MRI.clearKillFlags(RegZ);
1255 }
else if (isFromLoadImm(
RHS, C0) &&
MRI.hasOneUse(
RHS.getReg())) {
1260 if (
Register RegZ = searchConst(C0 - 1)) {
1266 MRI.clearKillFlags(RegZ);
1280 MI.eraseFromParent();
1287 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1289 int NumOp =
MI.getNumExplicitOperands();
1290 return MI.getOperand(NumOp - 1).getMBB();
1294 int64_t BrOffset)
const {
1308 return isIntN(13, BrOffset);
1310 case RISCV::PseudoBR:
1311 return isIntN(21, BrOffset);
1312 case RISCV::PseudoJump:
1322 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1323 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1324 case RISCV::SLL:
return RISCV::PseudoCCSLL;
break;
1325 case RISCV::SRL:
return RISCV::PseudoCCSRL;
break;
1326 case RISCV::SRA:
return RISCV::PseudoCCSRA;
break;
1327 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1328 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1329 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1331 case RISCV::ADDI:
return RISCV::PseudoCCADDI;
break;
1332 case RISCV::SLLI:
return RISCV::PseudoCCSLLI;
break;
1333 case RISCV::SRLI:
return RISCV::PseudoCCSRLI;
break;
1334 case RISCV::SRAI:
return RISCV::PseudoCCSRAI;
break;
1335 case RISCV::ANDI:
return RISCV::PseudoCCANDI;
break;
1336 case RISCV::ORI:
return RISCV::PseudoCCORI;
break;
1337 case RISCV::XORI:
return RISCV::PseudoCCXORI;
break;
1339 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1340 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1341 case RISCV::SLLW:
return RISCV::PseudoCCSLLW;
break;
1342 case RISCV::SRLW:
return RISCV::PseudoCCSRLW;
break;
1343 case RISCV::SRAW:
return RISCV::PseudoCCSRAW;
break;
1345 case RISCV::ADDIW:
return RISCV::PseudoCCADDIW;
break;
1346 case RISCV::SLLIW:
return RISCV::PseudoCCSLLIW;
break;
1347 case RISCV::SRLIW:
return RISCV::PseudoCCSRLIW;
break;
1348 case RISCV::SRAIW:
return RISCV::PseudoCCSRAIW;
break;
1351 return RISCV::INSTRUCTION_LIST_END;
1359 if (!Reg.isVirtual())
1361 if (!
MRI.hasOneNonDBGUse(Reg))
1370 if (
MI->getOpcode() == RISCV::ADDI &&
MI->getOperand(1).isReg() &&
1371 MI->getOperand(1).getReg() == RISCV::X0)
1376 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1386 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1389 bool DontMoveAcrossStores =
true;
1390 if (!
MI->isSafeToMove(
nullptr, DontMoveAcrossStores))
1397 unsigned &TrueOp,
unsigned &FalseOp,
1398 bool &Optimizable)
const {
1399 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1400 "Unknown select instruction");
1410 Cond.push_back(
MI.getOperand(1));
1411 Cond.push_back(
MI.getOperand(2));
1412 Cond.push_back(
MI.getOperand(3));
1414 Optimizable =
STI.hasShortForwardBranchOpt();
1421 bool PreferFalse)
const {
1422 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1423 "Unknown select instruction");
1424 if (!
STI.hasShortForwardBranchOpt())
1430 bool Invert = !
DefMI;
1438 Register DestReg =
MI.getOperand(0).getReg();
1440 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1444 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1451 NewMI.
add(
MI.getOperand(1));
1452 NewMI.
add(
MI.getOperand(2));
1461 NewMI.
add(FalseReg);
1485 if (
MI.isMetaInstruction())
1490 if (
Opcode == TargetOpcode::INLINEASM ||
1491 Opcode == TargetOpcode::INLINEASM_BR) {
1494 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1495 *
TM.getMCAsmInfo());
1498 if (!
MI.memoperands_empty()) {
1503 if (ST.hasStdExtCOrZca() && ST.enableRVCHintInstrs()) {
1504 if (isCompressibleInst(
MI,
STI))
1512 if (
Opcode == TargetOpcode::BUNDLE)
1513 return getInstBundleLength(
MI);
1515 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1516 if (isCompressibleInst(
MI,
STI))
1521 case TargetOpcode::STACKMAP:
1524 case TargetOpcode::PATCHPOINT:
1527 case TargetOpcode::STATEPOINT:
1535unsigned RISCVInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
1539 while (++
I !=
E &&
I->isInsideBundle()) {
1540 assert(!
I->isBundle() &&
"No nested bundle!");
1547 const unsigned Opcode =
MI.getOpcode();
1551 case RISCV::FSGNJ_D:
1552 case RISCV::FSGNJ_S:
1553 case RISCV::FSGNJ_H:
1554 case RISCV::FSGNJ_D_INX:
1555 case RISCV::FSGNJ_D_IN32X:
1556 case RISCV::FSGNJ_S_INX:
1557 case RISCV::FSGNJ_H_INX:
1559 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1560 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1564 return (
MI.getOperand(1).isReg() &&
1565 MI.getOperand(1).getReg() == RISCV::X0) ||
1566 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1568 return MI.isAsCheapAsAMove();
1571std::optional<DestSourcePair>
1575 switch (
MI.getOpcode()) {
1580 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1581 MI.getOperand(2).getImm() == 0)
1584 case RISCV::FSGNJ_D:
1585 case RISCV::FSGNJ_S:
1586 case RISCV::FSGNJ_H:
1587 case RISCV::FSGNJ_D_INX:
1588 case RISCV::FSGNJ_D_IN32X:
1589 case RISCV::FSGNJ_S_INX:
1590 case RISCV::FSGNJ_H_INX:
1592 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1593 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1597 return std::nullopt;
1605 const auto &SchedModel =
STI.getSchedModel();
1606 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1623 RISCV::OpName::frm) < 0;
1625 "New instructions require FRM whereas the old one does not have it");
1632 for (
auto *NewMI : InsInstrs) {
1634 NewMI->getOpcode(), RISCV::OpName::frm)) ==
1635 NewMI->getNumOperands() &&
1636 "Instruction has unexpected number of operands");
1678 bool &Commuted)
const {
1683 unsigned OperandIdx = Commuted ? 2 : 1;
1687 int16_t InstFrmOpIdx =
1689 int16_t SiblingFrmOpIdx =
1692 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1697 bool Invert)
const {
1703 Opc = *InverseOpcode;
1748std::optional<unsigned>
1752 return std::nullopt;
1754 return RISCV::FSUB_H;
1756 return RISCV::FSUB_S;
1758 return RISCV::FSUB_D;
1760 return RISCV::FADD_H;
1762 return RISCV::FADD_S;
1764 return RISCV::FADD_D;
1778 bool DoRegPressureReduce) {
1794 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
1806 bool DoRegPressureReduce) {
1808 bool IsFAdd =
isFADD(Opc);
1809 if (!IsFAdd && !
isFSUB(Opc))
1813 DoRegPressureReduce)) {
1819 DoRegPressureReduce)) {
1829 bool DoRegPressureReduce) {
1835 bool DoRegPressureReduce)
const {
1841 DoRegPressureReduce);
1850 return RISCV::FMADD_H;
1852 return RISCV::FMADD_S;
1854 return RISCV::FMADD_D;
1899 bool Mul1IsKill = Mul1.
isKill();
1900 bool Mul2IsKill = Mul2.
isKill();
1901 bool AddendIsKill = Addend.
isKill();
1910 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
1931 DelInstrs, InstrIdxForVirtReg);
1953 unsigned OpType = Operand.OperandType;
1958 int64_t Imm = MO.
getImm();
1965#define CASE_OPERAND_UIMM(NUM) \
1966 case RISCVOp::OPERAND_UIMM##NUM: \
1967 Ok = isUInt<NUM>(Imm); \
1981 Ok = isShiftedUInt<1, 1>(Imm);
1984 Ok = isShiftedUInt<5, 2>(Imm);
1987 Ok = isShiftedUInt<6, 2>(Imm);
1990 Ok = isShiftedUInt<5, 3>(Imm);
1993 Ok = isUInt<8>(Imm) && Imm >= 32;
1996 Ok = isShiftedUInt<6, 3>(Imm);
1999 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
2002 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
2011 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
2017 Ok = Imm != 0 && isInt<6>(Imm);
2020 Ok = isUInt<10>(Imm);
2023 Ok = isUInt<11>(Imm);
2026 Ok = isInt<12>(Imm);
2029 Ok = isShiftedInt<7, 5>(Imm);
2032 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2035 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
2036 Ok = Ok && Imm != 0;
2039 Ok = (isUInt<5>(Imm) && Imm != 0) ||
2040 (Imm >= 0xfffe0 && Imm <= 0xfffff);
2043 Ok = Imm >= 0 && Imm <= 10;
2046 Ok = Imm >= 0 && Imm <= 7;
2049 Ok = Imm >= 1 && Imm <= 10;
2052 Ok = Imm >= 2 && Imm <= 14;
2056 ErrInfo =
"Invalid immediate";
2066 if (!
Op.isImm() && !
Op.isReg()) {
2067 ErrInfo =
"Invalid operand type for VL operand";
2070 if (
Op.isReg() &&
Op.getReg() != RISCV::NoRegister) {
2072 auto *RC =
MRI.getRegClass(
Op.getReg());
2073 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
2074 ErrInfo =
"Invalid register class for VL operand";
2079 ErrInfo =
"VL operand w/o SEW operand?";
2085 if (!
MI.getOperand(OpIdx).isImm()) {
2086 ErrInfo =
"SEW value expected to be an immediate";
2089 uint64_t Log2SEW =
MI.getOperand(OpIdx).getImm();
2091 ErrInfo =
"Unexpected SEW value";
2094 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2096 ErrInfo =
"Unexpected SEW value";
2102 if (!
MI.getOperand(OpIdx).isImm()) {
2103 ErrInfo =
"Policy operand expected to be an immediate";
2106 uint64_t Policy =
MI.getOperand(OpIdx).getImm();
2108 ErrInfo =
"Invalid Policy Value";
2112 ErrInfo =
"policy operand w/o VL operand?";
2120 if (!
MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
2121 ErrInfo =
"policy operand w/o tied operand?";
2164 int64_t NewOffset = OldOffset + Disp;
2166 NewOffset = SignExtend64<32>(NewOffset);
2168 if (!isInt<12>(NewOffset))
2186 "Addressing mode not supported for folding");
2199 int64_t &
Offset,
bool &OffsetIsScalable,
unsigned &Width,
2228 OffsetIsScalable =
false;
2244 if (BaseOps1.
front()->isIdenticalTo(*BaseOps2.
front()))
2252 if (MO1->getAddrSpace() != MO2->getAddrSpace())
2255 auto Base1 = MO1->getValue();
2256 auto Base2 = MO2->getValue();
2257 if (!Base1 || !Base2)
2262 if (isa<UndefValue>(Base1) || isa<UndefValue>(Base2))
2265 return Base1 == Base2;
2271 int64_t Offset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2272 unsigned NumBytes)
const {
2275 if (!BaseOps1.
empty() && !BaseOps2.
empty()) {
2280 }
else if (!BaseOps1.
empty() || !BaseOps2.
empty()) {
2287 return ClusterSize <= 4;
2337 int64_t OffsetA = 0, OffsetB = 0;
2338 unsigned int WidthA = 0, WidthB = 0;
2342 int LowOffset = std::min(OffsetA, OffsetB);
2343 int HighOffset = std::max(OffsetA, OffsetB);
2344 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2345 if (LowOffset + LowWidth <= HighOffset)
2352std::pair<unsigned, unsigned>
2355 return std::make_pair(TF & Mask, TF & ~Mask);
2360 using namespace RISCVII;
2361 static const std::pair<unsigned, const char *> TargetFlags[] = {
2362 {MO_CALL,
"riscv-call"},
2363 {MO_PLT,
"riscv-plt"},
2364 {MO_LO,
"riscv-lo"},
2365 {MO_HI,
"riscv-hi"},
2366 {MO_PCREL_LO,
"riscv-pcrel-lo"},
2367 {MO_PCREL_HI,
"riscv-pcrel-hi"},
2368 {MO_GOT_HI,
"riscv-got-hi"},
2369 {MO_TPREL_LO,
"riscv-tprel-lo"},
2370 {MO_TPREL_HI,
"riscv-tprel-hi"},
2371 {MO_TPREL_ADD,
"riscv-tprel-add"},
2372 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
2373 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"}};
2381 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
2394 unsigned &Flags)
const {
2409std::optional<outliner::OutlinedFunction>
2411 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
2417 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
2423 if (RepeatedSequenceLocs.size() < 2)
2424 return std::nullopt;
2426 unsigned SequenceSize = 0;
2428 auto I = RepeatedSequenceLocs[0].front();
2429 auto E = std::next(RepeatedSequenceLocs[0].back());
2434 unsigned CallOverhead = 8;
2435 for (
auto &
C : RepeatedSequenceLocs)
2439 unsigned FrameOverhead = 4;
2440 if (RepeatedSequenceLocs[0]
2442 ->getSubtarget<RISCVSubtarget>()
2452 unsigned Flags)
const {
2457 const auto &
F =
MI.getMF()->getFunction();
2460 if (
MI.isCFIInstruction())
2474 if (
MI.modifiesRegister(RISCV::X5,
TRI) ||
2475 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2479 for (
const auto &MO :
MI.operands()) {
2484 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
2497 bool Changed =
true;
2502 for (;
I !=
E; ++
I) {
2503 if (
I->isCFIInstruction()) {
2504 I->removeFromParent();
2527 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
2538 return std::nullopt;
2542 if (
MI.getOpcode() == RISCV::ADDI &&
MI.getOperand(1).isReg() &&
2543 MI.getOperand(2).isImm())
2544 return RegImmPair{
MI.getOperand(1).getReg(),
MI.getOperand(2).getImm()};
2546 return std::nullopt;
2554 std::string GenericComment =
2556 if (!GenericComment.empty())
2557 return GenericComment;
2561 return std::string();
2563 std::string Comment;
2570 if ((
MI.getOpcode() == RISCV::VSETVLI ||
MI.getOpcode() == RISCV::VSETIVLI ||
2571 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2572 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2573 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2575 unsigned Imm =
MI.getOperand(OpIdx).getImm();
2579 unsigned Log2SEW =
MI.getOperand(OpIdx).getImm();
2580 unsigned SEW = Log2SEW ? 1 << Log2SEW : 8;
2585 unsigned Policy =
MI.getOperand(OpIdx).getImm();
2587 "Invalid Policy Value");
2597#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
2598 RISCV::PseudoV##OP##_##TYPE##_##LMUL
2600#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
2601 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
2602 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
2603 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
2604 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
2606#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
2607 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
2608 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
2610#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
2611 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
2612 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
2614#define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
2615 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
2616 case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
2618#define CASE_VFMA_SPLATS(OP) \
2619 CASE_VFMA_OPCODE_LMULS_MF4(OP, VFPR16): \
2620 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VFPR32): \
2621 case CASE_VFMA_OPCODE_LMULS_M1(OP, VFPR64)
2625 unsigned &SrcOpIdx1,
2626 unsigned &SrcOpIdx2)
const {
2628 if (!
Desc.isCommutable())
2631 switch (
MI.getOpcode()) {
2632 case RISCV::TH_MVEQZ:
2633 case RISCV::TH_MVNEZ:
2637 if (
MI.getOperand(2).getReg() == RISCV::X0)
2640 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
2641 case RISCV::TH_MULA:
2642 case RISCV::TH_MULAW:
2643 case RISCV::TH_MULAH:
2644 case RISCV::TH_MULS:
2645 case RISCV::TH_MULSW:
2646 case RISCV::TH_MULSH:
2648 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
2649 case RISCV::PseudoCCMOVGPR:
2651 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
2672 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2677 unsigned CommutableOpIdx1 = 1;
2678 unsigned CommutableOpIdx2 = 3;
2679 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2692 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2699 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
2701 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
2705 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2706 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
2712 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2713 SrcOpIdx2 == CommuteAnyOperandIndex) {
2716 unsigned CommutableOpIdx1 = SrcOpIdx1;
2717 if (SrcOpIdx1 == SrcOpIdx2) {
2720 CommutableOpIdx1 = 1;
2721 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
2723 CommutableOpIdx1 = SrcOpIdx2;
2728 unsigned CommutableOpIdx2;
2729 if (CommutableOpIdx1 != 1) {
2731 CommutableOpIdx2 = 1;
2733 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
2738 if (Op1Reg !=
MI.getOperand(2).getReg())
2739 CommutableOpIdx2 = 2;
2741 CommutableOpIdx2 = 3;
2746 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2758#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
2759 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
2760 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
2763#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
2764 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
2765 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
2766 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
2767 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
2769#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
2770 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
2771 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
2773#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
2774 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
2775 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
2777#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
2778 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
2779 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
2781#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
2782 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VFPR16) \
2783 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VFPR32) \
2784 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VFPR64)
2789 unsigned OpIdx2)
const {
2792 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
2796 switch (
MI.getOpcode()) {
2797 case RISCV::TH_MVEQZ:
2798 case RISCV::TH_MVNEZ: {
2799 auto &WorkingMI = cloneIfNew(
MI);
2800 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
2801 : RISCV::TH_MVEQZ));
2805 case RISCV::PseudoCCMOVGPR: {
2809 auto &WorkingMI = cloneIfNew(
MI);
2810 WorkingMI.getOperand(3).setImm(
CC);
2834 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
2835 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
2837 switch (
MI.getOpcode()) {
2860 auto &WorkingMI = cloneIfNew(
MI);
2861 WorkingMI.setDesc(
get(Opc));
2871 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
2874 if (OpIdx1 == 3 || OpIdx2 == 3) {
2876 switch (
MI.getOpcode()) {
2887 auto &WorkingMI = cloneIfNew(
MI);
2888 WorkingMI.setDesc(
get(Opc));
2900#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
2901#undef CASE_VFMA_CHANGE_OPCODE_LMULS
2902#undef CASE_VFMA_CHANGE_OPCODE_COMMON
2903#undef CASE_VFMA_SPLATS
2904#undef CASE_VFMA_OPCODE_LMULS
2905#undef CASE_VFMA_OPCODE_COMMON
2908#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
2909 RISCV::PseudoV##OP##_##LMUL##_TIED
2911#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
2912 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
2913 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
2914 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
2915 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
2916 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
2918#define CASE_WIDEOP_OPCODE_LMULS(OP) \
2919 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
2920 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
2923#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
2924 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
2925 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
2928#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
2929 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
2930 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
2931 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
2932 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
2933 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
2935#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
2936 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
2937 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
2943 switch (
MI.getOpcode()) {
2949 MI.getNumExplicitOperands() == 7 &&
2950 "Expect 7 explicit operands rd, rs2, rs1, rm, vl, sew, policy");
2957 switch (
MI.getOpcode()) {
2967 .
add(
MI.getOperand(0))
2969 .
add(
MI.getOperand(1))
2970 .
add(
MI.getOperand(2))
2971 .
add(
MI.getOperand(3))
2972 .
add(
MI.getOperand(4))
2973 .
add(
MI.getOperand(5))
2974 .
add(
MI.getOperand(6));
2983 MI.getNumExplicitOperands() == 6);
2984 if ((
MI.getOperand(5).getImm() & 1) == 0)
2989 switch (
MI.getOpcode()) {
3001 .
add(
MI.getOperand(0))
3003 .
add(
MI.getOperand(1))
3004 .
add(
MI.getOperand(2))
3005 .
add(
MI.getOperand(3))
3006 .
add(
MI.getOperand(4))
3007 .
add(
MI.getOperand(5));
3014 unsigned NumOps =
MI.getNumOperands();
3015 for (
unsigned I = 1;
I < NumOps; ++
I) {
3017 if (
Op.isReg() &&
Op.isKill())
3025 if (
MI.getOperand(0).isEarlyClobber()) {
3031 if (S->
end ==
Idx.getRegSlot(
true))
3032 S->
end =
Idx.getRegSlot();
3039#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
3040#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
3041#undef CASE_WIDEOP_OPCODE_LMULS
3042#undef CASE_WIDEOP_OPCODE_COMMON
3050 assert(Amount > 0 &&
"There is no need to get VLEN scaled value.");
3051 assert(Amount % 8 == 0 &&
3052 "Reserve the stack by the multiple of one vector size.");
3055 int64_t NumOfVReg = Amount / 8;
3058 assert(isInt<32>(NumOfVReg) &&
3059 "Expect the number of vector registers within 32-bits.");
3060 if (llvm::has_single_bit<uint32_t>(NumOfVReg)) {
3062 if (ShiftAmount == 0)
3068 }
else if (
STI.hasStdExtZba() &&
3075 if (NumOfVReg % 9 == 0) {
3076 Opc = RISCV::SH3ADD;
3077 ShiftAmount =
Log2_64(NumOfVReg / 9);
3078 }
else if (NumOfVReg % 5 == 0) {
3079 Opc = RISCV::SH2ADD;
3080 ShiftAmount =
Log2_64(NumOfVReg / 5);
3081 }
else if (NumOfVReg % 3 == 0) {
3082 Opc = RISCV::SH1ADD;
3083 ShiftAmount =
Log2_64(NumOfVReg / 3);
3096 }
else if (llvm::has_single_bit<uint32_t>(NumOfVReg - 1)) {
3097 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3107 }
else if (llvm::has_single_bit<uint32_t>(NumOfVReg + 1)) {
3108 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3119 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
3121 if (!
STI.hasStdExtM() && !
STI.hasStdExtZmmul())
3124 "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
3135 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
3143 return MI.getOpcode() == RISCV::ADDIW &&
MI.getOperand(1).isReg() &&
3144 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0;
3149 return MI.getOpcode() == RISCV::ADD_UW &&
MI.getOperand(1).isReg() &&
3150 MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0;
3155 return MI.getOpcode() == RISCV::ANDI &&
MI.getOperand(1).isReg() &&
3156 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 255;
3167 case RISCV::VL1RE8_V:
3168 case RISCV::VL2RE8_V:
3169 case RISCV::VL4RE8_V:
3170 case RISCV::VL8RE8_V:
3171 case RISCV::VL1RE16_V:
3172 case RISCV::VL2RE16_V:
3173 case RISCV::VL4RE16_V:
3174 case RISCV::VL8RE16_V:
3175 case RISCV::VL1RE32_V:
3176 case RISCV::VL2RE32_V:
3177 case RISCV::VL4RE32_V:
3178 case RISCV::VL8RE32_V:
3179 case RISCV::VL1RE64_V:
3180 case RISCV::VL2RE64_V:
3181 case RISCV::VL4RE64_V:
3182 case RISCV::VL8RE64_V:
3191 if (!RISCVVPseudosTable::getPseudoInfo(
Opcode) &&
3197std::optional<std::pair<unsigned, unsigned>>
3201 return std::nullopt;
3202 case RISCV::PseudoVSPILL2_M1:
3203 case RISCV::PseudoVRELOAD2_M1:
3204 return std::make_pair(2u, 1u);
3205 case RISCV::PseudoVSPILL2_M2:
3206 case RISCV::PseudoVRELOAD2_M2:
3207 return std::make_pair(2u, 2u);
3208 case RISCV::PseudoVSPILL2_M4:
3209 case RISCV::PseudoVRELOAD2_M4:
3210 return std::make_pair(2u, 4u);
3211 case RISCV::PseudoVSPILL3_M1:
3212 case RISCV::PseudoVRELOAD3_M1:
3213 return std::make_pair(3u, 1u);
3214 case RISCV::PseudoVSPILL3_M2:
3215 case RISCV::PseudoVRELOAD3_M2:
3216 return std::make_pair(3u, 2u);
3217 case RISCV::PseudoVSPILL4_M1:
3218 case RISCV::PseudoVRELOAD4_M1:
3219 return std::make_pair(4u, 1u);
3220 case RISCV::PseudoVSPILL4_M2:
3221 case RISCV::PseudoVRELOAD4_M2:
3222 return std::make_pair(4u, 2u);
3223 case RISCV::PseudoVSPILL5_M1:
3224 case RISCV::PseudoVRELOAD5_M1:
3225 return std::make_pair(5u, 1u);
3226 case RISCV::PseudoVSPILL6_M1:
3227 case RISCV::PseudoVRELOAD6_M1:
3228 return std::make_pair(6u, 1u);
3229 case RISCV::PseudoVSPILL7_M1:
3230 case RISCV::PseudoVRELOAD7_M1:
3231 return std::make_pair(7u, 1u);
3232 case RISCV::PseudoVSPILL8_M1:
3233 case RISCV::PseudoVRELOAD8_M1:
3234 return std::make_pair(8u, 1u);
3239 return MI.getNumExplicitDefs() == 2 &&
MI.modifiesRegister(RISCV::VL) &&
3244 int16_t MI1FrmOpIdx =
3246 int16_t MI2FrmOpIdx =
3248 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
3255std::optional<unsigned>
3260 return std::nullopt;
3263 case RISCV::VSLL_VX:
3264 case RISCV::VSRL_VX:
3265 case RISCV::VSRA_VX:
3267 case RISCV::VSSRL_VX:
3268 case RISCV::VSSRA_VX:
3273 case RISCV::VNSRL_WX:
3274 case RISCV::VNSRA_WX:
3276 case RISCV::VNCLIPU_WX:
3277 case RISCV::VNCLIP_WX:
3282 case RISCV::VADD_VX:
3283 case RISCV::VSUB_VX:
3284 case RISCV::VRSUB_VX:
3286 case RISCV::VWADDU_VX:
3287 case RISCV::VWSUBU_VX:
3288 case RISCV::VWADD_VX:
3289 case RISCV::VWSUB_VX:
3290 case RISCV::VWADDU_WX:
3291 case RISCV::VWSUBU_WX:
3292 case RISCV::VWADD_WX:
3293 case RISCV::VWSUB_WX:
3295 case RISCV::VADC_VXM:
3296 case RISCV::VADC_VIM:
3297 case RISCV::VMADC_VXM:
3298 case RISCV::VMADC_VIM:
3299 case RISCV::VMADC_VX:
3300 case RISCV::VSBC_VXM:
3301 case RISCV::VMSBC_VXM:
3302 case RISCV::VMSBC_VX:
3304 case RISCV::VAND_VX:
3306 case RISCV::VXOR_VX:
3308 case RISCV::VMSEQ_VX:
3309 case RISCV::VMSNE_VX:
3310 case RISCV::VMSLTU_VX:
3311 case RISCV::VMSLT_VX:
3312 case RISCV::VMSLEU_VX:
3313 case RISCV::VMSLE_VX:
3314 case RISCV::VMSGTU_VX:
3315 case RISCV::VMSGT_VX:
3317 case RISCV::VMINU_VX:
3318 case RISCV::VMIN_VX:
3319 case RISCV::VMAXU_VX:
3320 case RISCV::VMAX_VX:
3322 case RISCV::VMUL_VX:
3323 case RISCV::VMULH_VX:
3324 case RISCV::VMULHU_VX:
3325 case RISCV::VMULHSU_VX:
3327 case RISCV::VDIVU_VX:
3328 case RISCV::VDIV_VX:
3329 case RISCV::VREMU_VX:
3330 case RISCV::VREM_VX:
3332 case RISCV::VWMUL_VX:
3333 case RISCV::VWMULU_VX:
3334 case RISCV::VWMULSU_VX:
3336 case RISCV::VMACC_VX:
3337 case RISCV::VNMSAC_VX:
3338 case RISCV::VMADD_VX:
3339 case RISCV::VNMSUB_VX:
3341 case RISCV::VWMACCU_VX:
3342 case RISCV::VWMACC_VX:
3343 case RISCV::VWMACCSU_VX:
3344 case RISCV::VWMACCUS_VX:
3346 case RISCV::VMERGE_VXM:
3348 case RISCV::VMV_V_X:
3350 case RISCV::VSADDU_VX:
3351 case RISCV::VSADD_VX:
3352 case RISCV::VSSUBU_VX:
3353 case RISCV::VSSUB_VX:
3355 case RISCV::VAADDU_VX:
3356 case RISCV::VAADD_VX:
3357 case RISCV::VASUBU_VX:
3358 case RISCV::VASUB_VX:
3360 case RISCV::VSMUL_VX:
3362 case RISCV::VMV_S_X:
3363 return 1U << Log2SEW;
3369 RISCVVPseudosTable::getPseudoInfo(RVVPseudoOpcode);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
const char LLVMTargetMachineRef TM
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, MachineCombinerPattern Pattern)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isFSUB(unsigned Opc)
MachineOutlinerConstructionID
static bool isFMUL(unsigned Opc)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
#define CASE_OPERAND_UIMM(NUM)
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
static bool isFADD(unsigned Opc)
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
static unsigned getAddendOperandIdx(MachineCombinerPattern Pattern)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
This file declares the machine register scavenger class.
static bool memOpsHaveSameBasePtr(const MachineInstr &MI1, ArrayRef< const MachineOperand * > BaseOps1, const MachineInstr &MI2, ArrayRef< const MachineOperand * > BaseOps2)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
static constexpr uint32_t Opcode
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
bool empty() const
empty - Check if the array is empty.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
This class represents an Operation in the Expression.
Diagnostic information for unsupported feature in backend.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Wrapper class representing physical registers. Should be passed by value.
unsigned pred_size() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags, bool DstRenamable=false, bool DstIsDead=false) const
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
MachineInstr * emitLdStWithAddr(MachineInstr &MemI, const ExtAddrMode &AM) const override
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool canFoldIntoAddrMode(const MachineInstr &MemI, Register Reg, const MachineInstr &AddrI, ExtAddrMode &AM) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
const RISCVSubtarget & STI
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &MI, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, unsigned &Width, const TargetRegisterInfo *TRI) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
void copyPhysRegVector(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, unsigned Opc, unsigned NF=1) const
bool optimizeCondBranch(MachineInstr &MI) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
bool hasStdExtCOrZca() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
MI-level Statepoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given statepoint should emit.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
unsigned getBrCond(CondCode CC)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
InstSeq generateInstSeq(int64_t Val, const MCSubtargetInfo &STI)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
std::optional< unsigned > getVectorLowDemandedScalarBits(uint16_t Opcode, unsigned Log2SEW)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
unsigned getRVVMCOpcode(unsigned RVVPseudoOpcode)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
static const MachineMemOperand::Flags MONontemporalBit0
const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=6)
This method strips off any GEP address adjustments and pointer casts from the specified value,...
unsigned getDeadRegState(bool B)
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
unsigned getRenamableRegState(bool B)
DWARFExpression::Operation Op
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
Description of the encoding of one expression Op.
Used to describe addressing mode similar to ExtAddrMode in CodeGenPrepare.
This represents a simple continuous liveness interval for a value.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.