37#define GEN_CHECK_COMPRESS_INSTR
38#include "RISCVGenCompressInstEmitter.inc"
40#define GET_INSTRINFO_CTOR_DTOR
41#define GET_INSTRINFO_NAMED_OPS
42#include "RISCVGenInstrInfo.inc"
46 cl::desc(
"Prefer whole register move for vector registers."));
49 "riscv-force-machine-combiner-strategy",
cl::Hidden,
50 cl::desc(
"Force machine combiner to use a specific strategy for machine "
51 "trace metrics evaluation."),
52 cl::init(MachineTraceStrategy::TS_NumStrategies),
55 clEnumValN(MachineTraceStrategy::TS_MinInstrCount,
"min-instr",
56 "MinInstrCount strategy.")));
62#define GET_RISCVVPseudosTable_IMPL
63#include "RISCVGenSearchableTables.inc"
81 int &FrameIndex)
const {
88 unsigned &MemBytes)
const {
89 switch (
MI.getOpcode()) {
112 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
113 MI.getOperand(2).getImm() == 0) {
114 FrameIndex =
MI.getOperand(1).getIndex();
115 return MI.getOperand(0).getReg();
122 int &FrameIndex)
const {
129 unsigned &MemBytes)
const {
130 switch (
MI.getOpcode()) {
150 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
151 MI.getOperand(2).getImm() == 0) {
152 FrameIndex =
MI.getOperand(1).getIndex();
153 return MI.getOperand(0).getReg();
161 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
172 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
173 "Unexpected COPY instruction.");
177 bool FoundDef =
false;
178 bool FirstVSetVLI =
false;
179 unsigned FirstSEW = 0;
182 if (
MBBI->isMetaInstruction())
185 if (
MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
186 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
187 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
197 unsigned FirstVType =
MBBI->getOperand(2).getImm();
202 if (FirstLMul != LMul)
207 if (
MBBI->getOperand(0).getReg() != RISCV::X0)
209 if (
MBBI->getOperand(1).isImm())
211 if (
MBBI->getOperand(1).getReg() != RISCV::X0)
217 unsigned VType =
MBBI->getOperand(2).getImm();
235 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
237 }
else if (
MBBI->getNumDefs()) {
240 if (
MBBI->modifiesRegister(RISCV::VL))
246 if (!MO.isReg() || !MO.isDef())
248 if (!FoundDef &&
TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
263 if (MO.getReg() != SrcReg)
302 if (RISCV::GPRPF64RegClass.
contains(DstReg))
303 DstReg =
TRI->getSubReg(DstReg, RISCV::sub_32);
304 if (RISCV::GPRPF64RegClass.
contains(SrcReg))
305 SrcReg =
TRI->getSubReg(SrcReg, RISCV::sub_32);
307 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
315 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
316 RISCV::GPRRegClass.
contains(DstReg)) {
318 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI->getName(SrcReg))->Encoding)
325 bool IsScalableVector =
true;
328 unsigned SubRegIdx = RISCV::sub_vrm1_0;
329 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
330 if (!
STI.hasStdExtZfh() &&
STI.hasStdExtZfhmin()) {
332 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
333 &RISCV::FPR32RegClass);
334 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
335 &RISCV::FPR32RegClass);
336 Opc = RISCV::FSGNJ_S;
338 Opc = RISCV::FSGNJ_H;
340 IsScalableVector =
false;
341 }
else if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
342 Opc = RISCV::FSGNJ_S;
343 IsScalableVector =
false;
344 }
else if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
345 Opc = RISCV::FSGNJ_D;
346 IsScalableVector =
false;
347 }
else if (RISCV::VRRegClass.
contains(DstReg, SrcReg)) {
348 Opc = RISCV::VMV1R_V;
350 }
else if (RISCV::VRM2RegClass.
contains(DstReg, SrcReg)) {
351 Opc = RISCV::VMV2R_V;
353 }
else if (RISCV::VRM4RegClass.
contains(DstReg, SrcReg)) {
354 Opc = RISCV::VMV4R_V;
356 }
else if (RISCV::VRM8RegClass.
contains(DstReg, SrcReg)) {
357 Opc = RISCV::VMV8R_V;
359 }
else if (RISCV::VRN2M1RegClass.
contains(DstReg, SrcReg)) {
360 Opc = RISCV::VMV1R_V;
361 SubRegIdx = RISCV::sub_vrm1_0;
364 }
else if (RISCV::VRN2M2RegClass.
contains(DstReg, SrcReg)) {
365 Opc = RISCV::VMV2R_V;
366 SubRegIdx = RISCV::sub_vrm2_0;
369 }
else if (RISCV::VRN2M4RegClass.
contains(DstReg, SrcReg)) {
370 Opc = RISCV::VMV4R_V;
371 SubRegIdx = RISCV::sub_vrm4_0;
374 }
else if (RISCV::VRN3M1RegClass.
contains(DstReg, SrcReg)) {
375 Opc = RISCV::VMV1R_V;
376 SubRegIdx = RISCV::sub_vrm1_0;
379 }
else if (RISCV::VRN3M2RegClass.
contains(DstReg, SrcReg)) {
380 Opc = RISCV::VMV2R_V;
381 SubRegIdx = RISCV::sub_vrm2_0;
384 }
else if (RISCV::VRN4M1RegClass.
contains(DstReg, SrcReg)) {
385 Opc = RISCV::VMV1R_V;
386 SubRegIdx = RISCV::sub_vrm1_0;
389 }
else if (RISCV::VRN4M2RegClass.
contains(DstReg, SrcReg)) {
390 Opc = RISCV::VMV2R_V;
391 SubRegIdx = RISCV::sub_vrm2_0;
394 }
else if (RISCV::VRN5M1RegClass.
contains(DstReg, SrcReg)) {
395 Opc = RISCV::VMV1R_V;
396 SubRegIdx = RISCV::sub_vrm1_0;
399 }
else if (RISCV::VRN6M1RegClass.
contains(DstReg, SrcReg)) {
400 Opc = RISCV::VMV1R_V;
401 SubRegIdx = RISCV::sub_vrm1_0;
404 }
else if (RISCV::VRN7M1RegClass.
contains(DstReg, SrcReg)) {
405 Opc = RISCV::VMV1R_V;
406 SubRegIdx = RISCV::sub_vrm1_0;
409 }
else if (RISCV::VRN8M1RegClass.
contains(DstReg, SrcReg)) {
410 Opc = RISCV::VMV1R_V;
411 SubRegIdx = RISCV::sub_vrm1_0;
418 if (IsScalableVector) {
419 bool UseVMV_V_V =
false;
430 Opc = RISCV::PseudoVMV_V_V_M1;
431 VIOpc = RISCV::PseudoVMV_V_I_M1;
434 Opc = RISCV::PseudoVMV_V_V_M2;
435 VIOpc = RISCV::PseudoVMV_V_I_M2;
438 Opc = RISCV::PseudoVMV_V_V_M4;
439 VIOpc = RISCV::PseudoVMV_V_I_M4;
442 Opc = RISCV::PseudoVMV_V_V_M8;
443 VIOpc = RISCV::PseudoVMV_V_I_M8;
448 bool UseVMV_V_I =
false;
449 if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
457 MIB = MIB.add(DefMBBI->getOperand(1));
468 int I = 0,
End = NF, Incr = 1;
469 unsigned SrcEncoding =
TRI->getEncodingValue(SrcReg);
470 unsigned DstEncoding =
TRI->getEncodingValue(DstReg);
474 assert(!Fractional &&
"It is impossible be fractional lmul here.");
481 for (;
I !=
End;
I += Incr) {
483 TRI->getSubReg(DstReg, SubRegIdx +
I));
485 MIB = MIB.add(DefMBBI->getOperand(1));
487 MIB = MIB.addReg(
TRI->getSubReg(SrcReg, SubRegIdx +
I),
507 Register SrcReg,
bool IsKill,
int FI,
513 DL =
I->getDebugLoc();
519 bool IsScalableVector =
true;
520 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
521 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
522 RISCV::SW : RISCV::SD;
523 IsScalableVector =
false;
524 }
else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
525 Opcode = RISCV::PseudoRV32ZdinxSD;
526 IsScalableVector =
false;
527 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
529 IsScalableVector =
false;
530 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
532 IsScalableVector =
false;
533 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
535 IsScalableVector =
false;
536 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
537 Opcode = RISCV::VS1R_V;
538 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
539 Opcode = RISCV::VS2R_V;
540 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
541 Opcode = RISCV::VS4R_V;
542 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
543 Opcode = RISCV::VS8R_V;
544 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
545 Opcode = RISCV::PseudoVSPILL2_M1;
546 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
547 Opcode = RISCV::PseudoVSPILL2_M2;
548 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
549 Opcode = RISCV::PseudoVSPILL2_M4;
550 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
551 Opcode = RISCV::PseudoVSPILL3_M1;
552 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
553 Opcode = RISCV::PseudoVSPILL3_M2;
554 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
555 Opcode = RISCV::PseudoVSPILL4_M1;
556 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
557 Opcode = RISCV::PseudoVSPILL4_M2;
558 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
559 Opcode = RISCV::PseudoVSPILL5_M1;
560 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
561 Opcode = RISCV::PseudoVSPILL6_M1;
562 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
563 Opcode = RISCV::PseudoVSPILL7_M1;
564 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
565 Opcode = RISCV::PseudoVSPILL8_M1;
569 if (IsScalableVector) {
600 DL =
I->getDebugLoc();
606 bool IsScalableVector =
true;
607 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
608 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
609 RISCV::LW : RISCV::LD;
610 IsScalableVector =
false;
611 }
else if (RISCV::GPRPF64RegClass.hasSubClassEq(RC)) {
612 Opcode = RISCV::PseudoRV32ZdinxLD;
613 IsScalableVector =
false;
614 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
616 IsScalableVector =
false;
617 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
619 IsScalableVector =
false;
620 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
622 IsScalableVector =
false;
623 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
624 Opcode = RISCV::VL1RE8_V;
625 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
626 Opcode = RISCV::VL2RE8_V;
627 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
628 Opcode = RISCV::VL4RE8_V;
629 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
630 Opcode = RISCV::VL8RE8_V;
631 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
632 Opcode = RISCV::PseudoVRELOAD2_M1;
633 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
634 Opcode = RISCV::PseudoVRELOAD2_M2;
635 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
636 Opcode = RISCV::PseudoVRELOAD2_M4;
637 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
638 Opcode = RISCV::PseudoVRELOAD3_M1;
639 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
640 Opcode = RISCV::PseudoVRELOAD3_M2;
641 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
642 Opcode = RISCV::PseudoVRELOAD4_M1;
643 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
644 Opcode = RISCV::PseudoVRELOAD4_M2;
645 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
646 Opcode = RISCV::PseudoVRELOAD5_M1;
647 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
648 Opcode = RISCV::PseudoVRELOAD6_M1;
649 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
650 Opcode = RISCV::PseudoVRELOAD7_M1;
651 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
652 Opcode = RISCV::PseudoVRELOAD8_M1;
656 if (IsScalableVector) {
691 if (Ops.
size() != 1 || Ops[0] != 1)
695 switch (
MI.getOpcode()) {
702 LoadOpc = RISCV::LWU;
706 LoadOpc = RISCV::LBU;
716 case RISCV::ZEXT_H_RV32:
717 case RISCV::ZEXT_H_RV64:
718 LoadOpc = RISCV::LHU;
728 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(LoadOpc),
749 switch (Inst.getOpndKind()) {
806 "Unknown conditional branch");
819 return get(RISCV::BEQ);
821 return get(RISCV::BNE);
823 return get(RISCV::BLT);
825 return get(RISCV::BGE);
827 return get(RISCV::BLTU);
829 return get(RISCV::BGEU);
856 bool AllowModify)
const {
862 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
868 int NumTerminators = 0;
869 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
872 if (J->getDesc().isUnconditionalBranch() ||
873 J->getDesc().isIndirectBranch()) {
880 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
881 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
882 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
885 I = FirstUncondOrIndirectBr;
889 if (
I->getDesc().isIndirectBranch())
893 if (NumTerminators > 2)
897 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
903 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
909 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
910 I->getDesc().isUnconditionalBranch()) {
921 int *BytesRemoved)
const {
928 if (!
I->getDesc().isUnconditionalBranch() &&
929 !
I->getDesc().isConditionalBranch())
935 I->eraseFromParent();
942 if (!
I->getDesc().isConditionalBranch())
948 I->eraseFromParent();
961 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
963 "RISC-V branch conditions have two components!");
996 assert(RS &&
"RegScavenger required for long branching");
998 "new block should be inserted for expanding unconditional branch");
1001 "restore block should be inserted for restoring clobbered registers");
1008 if (!isInt<32>(BrOffset))
1010 "Branch offsets outside of the signed 32-bit range not supported");
1015 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
1027 if (TmpGPR != RISCV::NoRegister)
1033 TmpGPR = RISCV::X27;
1036 if (FrameIndex == -1)
1041 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
1044 MI.getOperand(1).setMBB(&RestoreBB);
1048 TRI->eliminateFrameIndex(RestoreBB.
back(),
1052 MRI.replaceRegWith(ScratchReg, TmpGPR);
1053 MRI.clearVirtRegs();
1058 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1066 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1068 int NumOp =
MI.getNumExplicitOperands();
1069 return MI.getOperand(NumOp - 1).getMBB();
1073 int64_t BrOffset)
const {
1087 return isIntN(13, BrOffset);
1089 case RISCV::PseudoBR:
1090 return isIntN(21, BrOffset);
1091 case RISCV::PseudoJump:
1101 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1102 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1103 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1104 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1105 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1107 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1108 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1111 return RISCV::INSTRUCTION_LIST_END;
1119 if (!Reg.isVirtual())
1121 if (!
MRI.hasOneNonDBGUse(Reg))
1132 if (MO.isFI() || MO.isCPI() || MO.isJTI())
1142 if (MO.getReg().isPhysical() && !
MRI.isConstantPhysReg(MO.getReg()))
1145 bool DontMoveAcrossStores =
true;
1146 if (!
MI->isSafeToMove(
nullptr, DontMoveAcrossStores))
1153 unsigned &TrueOp,
unsigned &FalseOp,
1154 bool &Optimizable)
const {
1155 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1156 "Unknown select instruction");
1166 Cond.push_back(
MI.getOperand(1));
1167 Cond.push_back(
MI.getOperand(2));
1168 Cond.push_back(
MI.getOperand(3));
1170 Optimizable =
STI.hasShortForwardBranchOpt();
1177 bool PreferFalse)
const {
1178 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1179 "Unknown select instruction");
1180 if (!
STI.hasShortForwardBranchOpt())
1186 bool Invert = !
DefMI;
1194 Register DestReg =
MI.getOperand(0).getReg();
1196 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1200 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1207 NewMI.
add(
MI.getOperand(1));
1208 NewMI.
add(
MI.getOperand(2));
1217 NewMI.
add(FalseReg);
1241 if (
MI.isMetaInstruction())
1244 unsigned Opcode =
MI.getOpcode();
1246 if (Opcode == TargetOpcode::INLINEASM ||
1247 Opcode == TargetOpcode::INLINEASM_BR) {
1250 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1251 *
TM.getMCAsmInfo());
1254 if (!
MI.memoperands_empty()) {
1259 if (ST.hasStdExtCOrZca() && ST.enableRVCHintInstrs()) {
1260 if (isCompressibleInst(
MI,
STI))
1268 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1269 if (isCompressibleInst(
MI,
STI))
1272 return get(Opcode).getSize();
1276 const unsigned Opcode =
MI.getOpcode();
1280 case RISCV::FSGNJ_D:
1281 case RISCV::FSGNJ_S:
1282 case RISCV::FSGNJ_H:
1283 case RISCV::FSGNJ_D_INX:
1284 case RISCV::FSGNJ_D_IN32X:
1285 case RISCV::FSGNJ_S_INX:
1286 case RISCV::FSGNJ_H_INX:
1288 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1289 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1293 return (
MI.getOperand(1).isReg() &&
1294 MI.getOperand(1).getReg() == RISCV::X0) ||
1295 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1297 return MI.isAsCheapAsAMove();
1300std::optional<DestSourcePair>
1304 switch (
MI.getOpcode()) {
1309 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1310 MI.getOperand(2).getImm() == 0)
1313 case RISCV::FSGNJ_D:
1314 case RISCV::FSGNJ_S:
1315 case RISCV::FSGNJ_H:
1316 case RISCV::FSGNJ_D_INX:
1317 case RISCV::FSGNJ_D_IN32X:
1318 case RISCV::FSGNJ_S_INX:
1319 case RISCV::FSGNJ_H_INX:
1321 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1322 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1326 return std::nullopt;
1334 const auto &SchedModel =
STI.getSchedModel();
1335 return (!SchedModel.hasInstrSchedModel() || SchedModel.isOutOfOrder())
1361 RISCV::OpName::frm) < 0;
1363 "New instructions require FRM whereas the old one does not have it");
1370 for (
auto *NewMI : InsInstrs) {
1372 NewMI->getOpcode(), RISCV::OpName::frm)) ==
1373 NewMI->getNumOperands() &&
1374 "Instruction has unexpected number of operands");
1416 bool &Commuted)
const {
1421 unsigned OperandIdx = Commuted ? 2 : 1;
1425 int16_t InstFrmOpIdx =
1427 int16_t SiblingFrmOpIdx =
1430 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1435 bool Invert)
const {
1441 Opc = *InverseOpcode;
1486std::optional<unsigned>
1490 return std::nullopt;
1492 return RISCV::FSUB_H;
1494 return RISCV::FSUB_S;
1496 return RISCV::FSUB_D;
1498 return RISCV::FADD_H;
1500 return RISCV::FADD_S;
1502 return RISCV::FADD_D;
1516 bool DoRegPressureReduce) {
1532 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
1544 bool DoRegPressureReduce) {
1546 bool IsFAdd =
isFADD(Opc);
1547 if (!IsFAdd && !
isFSUB(Opc))
1551 DoRegPressureReduce)) {
1557 DoRegPressureReduce)) {
1567 bool DoRegPressureReduce) {
1573 bool DoRegPressureReduce)
const {
1579 DoRegPressureReduce);
1588 return RISCV::FMADD_H;
1590 return RISCV::FMADD_S;
1592 return RISCV::FMADD_D;
1638 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
1663 DelInstrs, InstrIdxForVirtReg);
1685 unsigned OpType = Operand.OperandType;
1690 int64_t Imm = MO.
getImm();
1697#define CASE_OPERAND_UIMM(NUM) \
1698 case RISCVOp::OPERAND_UIMM##NUM: \
1699 Ok = isUInt<NUM>(Imm); \
1713 Ok = isShiftedUInt<1, 1>(Imm);
1716 Ok = isShiftedUInt<5, 2>(Imm);
1719 Ok = isShiftedUInt<6, 2>(Imm);
1722 Ok = isShiftedUInt<5, 3>(Imm);
1725 Ok = isUInt<8>(Imm) && Imm >= 32;
1728 Ok = isShiftedUInt<6, 3>(Imm);
1731 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
1734 Ok = isShiftedUInt<8, 2>(Imm) && (Imm != 0);
1743 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
1749 Ok = Imm != 0 && isInt<6>(Imm);
1752 Ok = isUInt<10>(Imm);
1755 Ok = isUInt<11>(Imm);
1758 Ok = isInt<12>(Imm);
1761 Ok = isShiftedInt<7, 5>(Imm);
1764 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1767 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1768 Ok = Ok && Imm != 0;
1771 Ok = (isUInt<5>(Imm) && Imm != 0) ||
1772 (Imm >= 0xfffe0 && Imm <= 0xfffff);
1775 Ok = Imm >= 0 && Imm <= 10;
1778 Ok = Imm >= 0 && Imm <= 7;
1781 Ok = Imm >= 1 && Imm <= 10;
1784 Ok = Imm >= 2 && Imm <= 14;
1788 ErrInfo =
"Invalid immediate";
1798 if (
MI.findTiedOperandIdx(0) != OpIdx) {
1799 ErrInfo =
"Merge op improperly tied";
1805 if (!Op.isImm() && !Op.isReg()) {
1806 ErrInfo =
"Invalid operand type for VL operand";
1809 if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
1811 auto *RC =
MRI.getRegClass(Op.getReg());
1812 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
1813 ErrInfo =
"Invalid register class for VL operand";
1818 ErrInfo =
"VL operand w/o SEW operand?";
1826 ErrInfo =
"Unexpected SEW value";
1831 ErrInfo =
"Unexpected SEW value";
1837 uint64_t Policy =
MI.getOperand(OpIdx).getImm();
1839 ErrInfo =
"Invalid Policy Value";
1843 ErrInfo =
"policy operand w/o VL operand?";
1851 if (!
MI.isRegTiedToUseOperand(0, &
UseOpIdx)) {
1852 ErrInfo =
"policy operand w/o tied operand?";
1901 int64_t OffsetA = 0, OffsetB = 0;
1902 unsigned int WidthA = 0, WidthB = 0;
1906 int LowOffset = std::min(OffsetA, OffsetB);
1907 int HighOffset = std::max(OffsetA, OffsetB);
1908 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1909 if (LowOffset + LowWidth <= HighOffset)
1916std::pair<unsigned, unsigned>
1919 return std::make_pair(TF & Mask, TF & ~Mask);
1924 using namespace RISCVII;
1925 static const std::pair<unsigned, const char *> TargetFlags[] = {
1926 {MO_CALL,
"riscv-call"},
1927 {MO_PLT,
"riscv-plt"},
1928 {MO_LO,
"riscv-lo"},
1929 {MO_HI,
"riscv-hi"},
1930 {MO_PCREL_LO,
"riscv-pcrel-lo"},
1931 {MO_PCREL_HI,
"riscv-pcrel-hi"},
1932 {MO_GOT_HI,
"riscv-got-hi"},
1933 {MO_TPREL_LO,
"riscv-tprel-lo"},
1934 {MO_TPREL_HI,
"riscv-tprel-hi"},
1935 {MO_TPREL_ADD,
"riscv-tprel-add"},
1936 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
1937 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"}};
1945 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
1958 unsigned &Flags)
const {
1973std::optional<outliner::OutlinedFunction>
1975 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
1981 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
1987 if (RepeatedSequenceLocs.size() < 2)
1988 return std::nullopt;
1990 unsigned SequenceSize = 0;
1992 auto I = RepeatedSequenceLocs[0].front();
1993 auto E = std::next(RepeatedSequenceLocs[0].back());
1998 unsigned CallOverhead = 8;
1999 for (
auto &
C : RepeatedSequenceLocs)
2003 unsigned FrameOverhead = 4;
2004 if (RepeatedSequenceLocs[0]
2006 ->getSubtarget<RISCVSubtarget>()
2016 unsigned Flags)
const {
2021 const auto &
F =
MI.getMF()->getFunction();
2024 if (
MI.isCFIInstruction())
2038 if (
MI.modifiesRegister(RISCV::X5,
TRI) ||
2039 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
2043 for (
const auto &MO :
MI.operands()) {
2048 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
2061 bool Changed =
true;
2066 for (;
I !=
E; ++
I) {
2067 if (
I->isCFIInstruction()) {
2068 I->removeFromParent();
2091 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
2101 std::string GenericComment =
2103 if (!GenericComment.empty())
2104 return GenericComment;
2108 return std::string();
2110 std::string Comment;
2117 if ((
MI.getOpcode() == RISCV::VSETVLI ||
MI.getOpcode() == RISCV::VSETIVLI ||
2118 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2119 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2120 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2122 unsigned Imm =
MI.getOperand(OpIdx).getImm();
2126 unsigned Log2SEW =
MI.getOperand(OpIdx).getImm();
2132 unsigned Policy =
MI.getOperand(OpIdx).getImm();
2134 "Invalid Policy Value");
2144#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
2145 RISCV::PseudoV##OP##_##TYPE##_##LMUL
2147#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
2148 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
2149 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
2150 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
2151 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
2153#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
2154 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
2155 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
2157#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
2158 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
2159 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
2161#define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
2162 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
2163 case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
2165#define CASE_VFMA_SPLATS(OP) \
2166 CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
2167 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
2168 case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
2172 unsigned &SrcOpIdx1,
2173 unsigned &SrcOpIdx2)
const {
2178 switch (
MI.getOpcode()) {
2179 case RISCV::TH_MVEQZ:
2180 case RISCV::TH_MVNEZ:
2184 if (
MI.getOperand(2).getReg() == RISCV::X0)
2187 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 1, 2);
2188 case RISCV::TH_MULA:
2189 case RISCV::TH_MULAW:
2190 case RISCV::TH_MULAH:
2191 case RISCV::TH_MULS:
2192 case RISCV::TH_MULSW:
2193 case RISCV::TH_MULSH:
2195 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
2196 case RISCV::PseudoCCMOVGPR:
2198 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
2219 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2224 unsigned CommutableOpIdx1 = 1;
2225 unsigned CommutableOpIdx2 = 3;
2226 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2239 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2246 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
2248 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
2252 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2253 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
2259 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2260 SrcOpIdx2 == CommuteAnyOperandIndex) {
2263 unsigned CommutableOpIdx1 = SrcOpIdx1;
2264 if (SrcOpIdx1 == SrcOpIdx2) {
2267 CommutableOpIdx1 = 1;
2268 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
2270 CommutableOpIdx1 = SrcOpIdx2;
2275 unsigned CommutableOpIdx2;
2276 if (CommutableOpIdx1 != 1) {
2278 CommutableOpIdx2 = 1;
2280 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
2285 if (Op1Reg !=
MI.getOperand(2).getReg())
2286 CommutableOpIdx2 = 2;
2288 CommutableOpIdx2 = 3;
2293 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2305#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
2306 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
2307 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
2310#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
2311 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
2312 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
2313 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
2314 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
2316#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
2317 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
2318 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
2320#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
2321 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
2322 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
2324#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
2325 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
2326 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
2328#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
2329 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
2330 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
2331 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
2336 unsigned OpIdx2)
const {
2339 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
2343 switch (
MI.getOpcode()) {
2344 case RISCV::TH_MVEQZ:
2345 case RISCV::TH_MVNEZ: {
2346 auto &WorkingMI = cloneIfNew(
MI);
2347 WorkingMI.setDesc(
get(
MI.getOpcode() == RISCV::TH_MVEQZ ? RISCV::TH_MVNEZ
2348 : RISCV::TH_MVEQZ));
2352 case RISCV::PseudoCCMOVGPR: {
2356 auto &WorkingMI = cloneIfNew(
MI);
2357 WorkingMI.getOperand(3).setImm(
CC);
2381 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
2382 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
2384 switch (
MI.getOpcode()) {
2407 auto &WorkingMI = cloneIfNew(
MI);
2408 WorkingMI.setDesc(
get(Opc));
2418 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
2421 if (OpIdx1 == 3 || OpIdx2 == 3) {
2423 switch (
MI.getOpcode()) {
2434 auto &WorkingMI = cloneIfNew(
MI);
2435 WorkingMI.setDesc(
get(Opc));
2447#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
2448#undef CASE_VFMA_CHANGE_OPCODE_LMULS
2449#undef CASE_VFMA_CHANGE_OPCODE_COMMON
2450#undef CASE_VFMA_SPLATS
2451#undef CASE_VFMA_OPCODE_LMULS
2452#undef CASE_VFMA_OPCODE_COMMON
2455#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
2456 RISCV::PseudoV##OP##_##LMUL##_TIED
2458#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
2459 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
2460 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
2461 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
2462 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
2463 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
2465#define CASE_WIDEOP_OPCODE_LMULS(OP) \
2466 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
2467 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
2470#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
2471 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
2472 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
2475#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
2476 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
2477 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
2478 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
2479 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
2480 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
2482#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
2483 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
2484 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
2489 switch (
MI.getOpcode()) {
2500 MI.getNumExplicitOperands() == 6);
2501 if ((
MI.getOperand(5).getImm() & 1) == 0)
2506 switch (
MI.getOpcode()) {
2520 .
add(
MI.getOperand(0))
2521 .
add(
MI.getOperand(1))
2522 .
add(
MI.getOperand(2))
2523 .
add(
MI.getOperand(3))
2524 .
add(
MI.getOperand(4));
2528 unsigned NumOps =
MI.getNumOperands();
2529 for (
unsigned I = 1;
I < NumOps; ++
I) {
2531 if (Op.isReg() && Op.isKill())
2539 if (
MI.getOperand(0).isEarlyClobber()) {
2545 if (S->
end ==
Idx.getRegSlot(
true))
2546 S->
end =
Idx.getRegSlot();
2557#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
2558#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
2559#undef CASE_WIDEOP_OPCODE_LMULS
2560#undef CASE_WIDEOP_OPCODE_COMMON
2568 assert(Amount > 0 &&
"There is no need to get VLEN scaled value.");
2569 assert(Amount % 8 == 0 &&
2570 "Reserve the stack by the multiple of one vector size.");
2573 int64_t NumOfVReg = Amount / 8;
2576 assert(isInt<32>(NumOfVReg) &&
2577 "Expect the number of vector registers within 32-bits.");
2578 if (llvm::has_single_bit<uint32_t>(NumOfVReg)) {
2580 if (ShiftAmount == 0)
2586 }
else if (
STI.hasStdExtZba() &&
2593 if (NumOfVReg % 9 == 0) {
2594 Opc = RISCV::SH3ADD;
2595 ShiftAmount =
Log2_64(NumOfVReg / 9);
2596 }
else if (NumOfVReg % 5 == 0) {
2597 Opc = RISCV::SH2ADD;
2598 ShiftAmount =
Log2_64(NumOfVReg / 5);
2599 }
else if (NumOfVReg % 3 == 0) {
2600 Opc = RISCV::SH1ADD;
2601 ShiftAmount =
Log2_64(NumOfVReg / 3);
2614 }
else if (llvm::has_single_bit<uint32_t>(NumOfVReg - 1)) {
2615 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2625 }
else if (llvm::has_single_bit<uint32_t>(NumOfVReg + 1)) {
2626 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2637 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2639 if (!
STI.hasStdExtM() && !
STI.hasStdExtZmmul())
2642 "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
2653 static const std::pair<MachineMemOperand::Flags, const char *> TargetFlags[] =
2661 return MI.getOpcode() == RISCV::ADDIW &&
MI.getOperand(1).isReg() &&
2662 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0;
2667 return MI.getOpcode() == RISCV::ADD_UW &&
MI.getOperand(1).isReg() &&
2668 MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0;
2673 return MI.getOpcode() == RISCV::ANDI &&
MI.getOperand(1).isReg() &&
2674 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 255;
2685 case RISCV::VL1RE8_V:
2686 case RISCV::VL2RE8_V:
2687 case RISCV::VL4RE8_V:
2688 case RISCV::VL8RE8_V:
2689 case RISCV::VL1RE16_V:
2690 case RISCV::VL2RE16_V:
2691 case RISCV::VL4RE16_V:
2692 case RISCV::VL8RE16_V:
2693 case RISCV::VL1RE32_V:
2694 case RISCV::VL2RE32_V:
2695 case RISCV::VL4RE32_V:
2696 case RISCV::VL8RE32_V:
2697 case RISCV::VL1RE64_V:
2698 case RISCV::VL2RE64_V:
2699 case RISCV::VL4RE64_V:
2700 case RISCV::VL8RE64_V:
2708 unsigned Opcode =
MI.getOpcode();
2709 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
2715std::optional<std::pair<unsigned, unsigned>>
2719 return std::nullopt;
2720 case RISCV::PseudoVSPILL2_M1:
2721 case RISCV::PseudoVRELOAD2_M1:
2722 return std::make_pair(2u, 1u);
2723 case RISCV::PseudoVSPILL2_M2:
2724 case RISCV::PseudoVRELOAD2_M2:
2725 return std::make_pair(2u, 2u);
2726 case RISCV::PseudoVSPILL2_M4:
2727 case RISCV::PseudoVRELOAD2_M4:
2728 return std::make_pair(2u, 4u);
2729 case RISCV::PseudoVSPILL3_M1:
2730 case RISCV::PseudoVRELOAD3_M1:
2731 return std::make_pair(3u, 1u);
2732 case RISCV::PseudoVSPILL3_M2:
2733 case RISCV::PseudoVRELOAD3_M2:
2734 return std::make_pair(3u, 2u);
2735 case RISCV::PseudoVSPILL4_M1:
2736 case RISCV::PseudoVRELOAD4_M1:
2737 return std::make_pair(4u, 1u);
2738 case RISCV::PseudoVSPILL4_M2:
2739 case RISCV::PseudoVRELOAD4_M2:
2740 return std::make_pair(4u, 2u);
2741 case RISCV::PseudoVSPILL5_M1:
2742 case RISCV::PseudoVRELOAD5_M1:
2743 return std::make_pair(5u, 1u);
2744 case RISCV::PseudoVSPILL6_M1:
2745 case RISCV::PseudoVRELOAD6_M1:
2746 return std::make_pair(6u, 1u);
2747 case RISCV::PseudoVSPILL7_M1:
2748 case RISCV::PseudoVRELOAD7_M1:
2749 return std::make_pair(7u, 1u);
2750 case RISCV::PseudoVSPILL8_M1:
2751 case RISCV::PseudoVRELOAD8_M1:
2752 return std::make_pair(8u, 1u);
2757 return MI.getNumExplicitDefs() == 2 &&
MI.modifiesRegister(RISCV::VL) &&
2762 int16_t MI1FrmOpIdx =
2764 int16_t MI2FrmOpIdx =
2766 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
const char LLVMTargetMachineRef TM
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, MachineCombinerPattern Pattern)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isFSUB(unsigned Opc)
MachineOutlinerConstructionID
static bool isFMUL(unsigned Opc)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
#define CASE_OPERAND_UIMM(NUM)
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
static bool isFADD(unsigned Opc)
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
static unsigned getAddendOperandIdx(MachineCombinerPattern Pattern)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
static cl::opt< MachineTraceStrategy > ForceMachineCombinerStrategy("riscv-force-machine-combiner-strategy", cl::Hidden, cl::desc("Force machine combiner to use a specific strategy for machine " "trace metrics evaluation."), cl::init(MachineTraceStrategy::TS_NumStrategies), cl::values(clEnumValN(MachineTraceStrategy::TS_Local, "local", "Local strategy."), clEnumValN(MachineTraceStrategy::TS_MinInstrCount, "min-instr", "MinInstrCount strategy.")))
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
static DILocation * getMergedLocation(DILocation *LocA, DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
Diagnostic information for unsupported feature in backend.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Wrapper class representing physical registers. Should be passed by value.
unsigned pred_size() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
void setFlags(unsigned flags)
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
bool isNonTemporal() const
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
const RISCVSubtarget & STI
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
MachineTraceStrategy getMachineCombinerTraceStrategy() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
virtual outliner::InstrType getOutliningTypeImpl(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ArrayRef< std::pair< MachineMemOperand::Flags, const char * > > getSerializableMachineMemOperandTargetFlags() const override
MCInst getNop() const override
void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
void getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
bool hasStdExtCOrZca() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool hasMergeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static unsigned getMergeOpNum(const MCInstrDesc &Desc)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_UIMM10_LSB00_NONZERO
@ OPERAND_SIMM10_LSB0000_NONZERO
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineTraceStrategy
Strategies for selecting traces.
@ TS_MinInstrCount
Select the trace through a block that has the fewest instructions.
@ TS_Local
Select the trace that contains only the current basic block.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
static const MachineMemOperand::Flags MONontemporalBit1
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
auto enumerate(FirstRange &&First, RestRanges &&...Rest)
Given two or more input ranges, returns a new range whose values are are tuples (A,...
static const MachineMemOperand::Flags MONontemporalBit0
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
This represents a simple continuous liveness interval for a value.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.