36#define GEN_CHECK_COMPRESS_INSTR
37#include "RISCVGenCompressInstEmitter.inc"
39#define GET_INSTRINFO_CTOR_DTOR
40#define GET_INSTRINFO_NAMED_OPS
41#include "RISCVGenInstrInfo.inc"
45 cl::desc(
"Prefer whole register move for vector registers."));
51#define GET_RISCVVPseudosTable_IMPL
52#include "RISCVGenSearchableTables.inc"
70 int &FrameIndex)
const {
71 switch (
MI.getOpcode()) {
87 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
88 MI.getOperand(2).getImm() == 0) {
89 FrameIndex =
MI.getOperand(1).getIndex();
90 return MI.getOperand(0).getReg();
97 int &FrameIndex)
const {
98 switch (
MI.getOpcode()) {
111 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
112 MI.getOperand(2).getImm() == 0) {
113 FrameIndex =
MI.getOperand(1).getIndex();
114 return MI.getOperand(0).getReg();
122 return DstReg > SrcReg && (DstReg - SrcReg) < NumRegs;
133 assert(
MBBI->getOpcode() == TargetOpcode::COPY &&
134 "Unexpected COPY instruction.");
138 bool FoundDef =
false;
139 bool FirstVSetVLI =
false;
140 unsigned FirstSEW = 0;
143 if (
MBBI->isMetaInstruction())
146 if (
MBBI->getOpcode() == RISCV::PseudoVSETVLI ||
147 MBBI->getOpcode() == RISCV::PseudoVSETVLIX0 ||
148 MBBI->getOpcode() == RISCV::PseudoVSETIVLI) {
158 unsigned FirstVType =
MBBI->getOperand(2).getImm();
163 if (FirstLMul != LMul)
168 if (
MBBI->getOperand(0).getReg() != RISCV::X0)
170 if (
MBBI->getOperand(1).isImm())
172 if (
MBBI->getOperand(1).getReg() != RISCV::X0)
178 unsigned VType =
MBBI->getOperand(2).getImm();
196 }
else if (
MBBI->isInlineAsm() ||
MBBI->isCall()) {
198 }
else if (
MBBI->getNumDefs()) {
201 if (
MBBI->modifiesRegister(RISCV::VL))
207 if (!MO.isReg() || !MO.isDef())
209 if (!FoundDef &&
TRI->isSubRegisterEq(MO.getReg(), SrcReg)) {
224 if (MO.getReg() != SrcReg)
261 if (RISCV::GPRRegClass.
contains(DstReg, SrcReg)) {
269 if (RISCV::VCSRRegClass.
contains(SrcReg) &&
270 RISCV::GPRRegClass.
contains(DstReg)) {
273 .
addImm(RISCVSysReg::lookupSysRegByName(
TRI.getName(SrcReg))->Encoding)
280 bool IsScalableVector =
true;
283 unsigned SubRegIdx = RISCV::sub_vrm1_0;
284 if (RISCV::FPR16RegClass.
contains(DstReg, SrcReg)) {
285 if (!
STI.hasStdExtZfh() &&
STI.hasStdExtZfhmin()) {
288 DstReg =
TRI->getMatchingSuperReg(DstReg, RISCV::sub_16,
289 &RISCV::FPR32RegClass);
290 SrcReg =
TRI->getMatchingSuperReg(SrcReg, RISCV::sub_16,
291 &RISCV::FPR32RegClass);
292 Opc = RISCV::FSGNJ_S;
294 Opc = RISCV::FSGNJ_H;
296 IsScalableVector =
false;
297 }
else if (RISCV::FPR32RegClass.
contains(DstReg, SrcReg)) {
298 Opc = RISCV::FSGNJ_S;
299 IsScalableVector =
false;
300 }
else if (RISCV::FPR64RegClass.
contains(DstReg, SrcReg)) {
301 Opc = RISCV::FSGNJ_D;
302 IsScalableVector =
false;
303 }
else if (RISCV::VRRegClass.
contains(DstReg, SrcReg)) {
304 Opc = RISCV::VMV1R_V;
306 }
else if (RISCV::VRM2RegClass.
contains(DstReg, SrcReg)) {
307 Opc = RISCV::VMV2R_V;
309 }
else if (RISCV::VRM4RegClass.
contains(DstReg, SrcReg)) {
310 Opc = RISCV::VMV4R_V;
312 }
else if (RISCV::VRM8RegClass.
contains(DstReg, SrcReg)) {
313 Opc = RISCV::VMV8R_V;
315 }
else if (RISCV::VRN2M1RegClass.
contains(DstReg, SrcReg)) {
316 Opc = RISCV::VMV1R_V;
317 SubRegIdx = RISCV::sub_vrm1_0;
320 }
else if (RISCV::VRN2M2RegClass.
contains(DstReg, SrcReg)) {
321 Opc = RISCV::VMV2R_V;
322 SubRegIdx = RISCV::sub_vrm2_0;
325 }
else if (RISCV::VRN2M4RegClass.
contains(DstReg, SrcReg)) {
326 Opc = RISCV::VMV4R_V;
327 SubRegIdx = RISCV::sub_vrm4_0;
330 }
else if (RISCV::VRN3M1RegClass.
contains(DstReg, SrcReg)) {
331 Opc = RISCV::VMV1R_V;
332 SubRegIdx = RISCV::sub_vrm1_0;
335 }
else if (RISCV::VRN3M2RegClass.
contains(DstReg, SrcReg)) {
336 Opc = RISCV::VMV2R_V;
337 SubRegIdx = RISCV::sub_vrm2_0;
340 }
else if (RISCV::VRN4M1RegClass.
contains(DstReg, SrcReg)) {
341 Opc = RISCV::VMV1R_V;
342 SubRegIdx = RISCV::sub_vrm1_0;
345 }
else if (RISCV::VRN4M2RegClass.
contains(DstReg, SrcReg)) {
346 Opc = RISCV::VMV2R_V;
347 SubRegIdx = RISCV::sub_vrm2_0;
350 }
else if (RISCV::VRN5M1RegClass.
contains(DstReg, SrcReg)) {
351 Opc = RISCV::VMV1R_V;
352 SubRegIdx = RISCV::sub_vrm1_0;
355 }
else if (RISCV::VRN6M1RegClass.
contains(DstReg, SrcReg)) {
356 Opc = RISCV::VMV1R_V;
357 SubRegIdx = RISCV::sub_vrm1_0;
360 }
else if (RISCV::VRN7M1RegClass.
contains(DstReg, SrcReg)) {
361 Opc = RISCV::VMV1R_V;
362 SubRegIdx = RISCV::sub_vrm1_0;
365 }
else if (RISCV::VRN8M1RegClass.
contains(DstReg, SrcReg)) {
366 Opc = RISCV::VMV1R_V;
367 SubRegIdx = RISCV::sub_vrm1_0;
374 if (IsScalableVector) {
375 bool UseVMV_V_V =
false;
386 Opc = RISCV::PseudoVMV_V_V_M1;
387 VIOpc = RISCV::PseudoVMV_V_I_M1;
390 Opc = RISCV::PseudoVMV_V_V_M2;
391 VIOpc = RISCV::PseudoVMV_V_I_M2;
394 Opc = RISCV::PseudoVMV_V_V_M4;
395 VIOpc = RISCV::PseudoVMV_V_I_M4;
398 Opc = RISCV::PseudoVMV_V_V_M8;
399 VIOpc = RISCV::PseudoVMV_V_I_M8;
404 bool UseVMV_V_I =
false;
405 if (UseVMV_V_V && (DefMBBI->getOpcode() == VIOpc)) {
413 MIB = MIB.add(DefMBBI->getOperand(1));
426 int I = 0, End = NF, Incr = 1;
427 unsigned SrcEncoding =
TRI->getEncodingValue(SrcReg);
428 unsigned DstEncoding =
TRI->getEncodingValue(DstReg);
432 assert(!Fractional &&
"It is impossible be fractional lmul here.");
439 for (;
I != End;
I += Incr) {
441 TRI->getSubReg(DstReg, SubRegIdx +
I));
443 MIB = MIB.add(DefMBBI->getOperand(1));
445 MIB = MIB.addReg(
TRI->getSubReg(SrcReg, SubRegIdx +
I),
465 Register SrcReg,
bool IsKill,
int FI,
471 DL =
I->getDebugLoc();
477 bool IsScalableVector =
true;
478 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
479 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
480 RISCV::SW : RISCV::SD;
481 IsScalableVector =
false;
482 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
484 IsScalableVector =
false;
485 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
487 IsScalableVector =
false;
488 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
490 IsScalableVector =
false;
491 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
492 Opcode = RISCV::VS1R_V;
493 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
494 Opcode = RISCV::VS2R_V;
495 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
496 Opcode = RISCV::VS4R_V;
497 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
498 Opcode = RISCV::VS8R_V;
499 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
500 Opcode = RISCV::PseudoVSPILL2_M1;
501 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
502 Opcode = RISCV::PseudoVSPILL2_M2;
503 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
504 Opcode = RISCV::PseudoVSPILL2_M4;
505 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
506 Opcode = RISCV::PseudoVSPILL3_M1;
507 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
508 Opcode = RISCV::PseudoVSPILL3_M2;
509 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
510 Opcode = RISCV::PseudoVSPILL4_M1;
511 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
512 Opcode = RISCV::PseudoVSPILL4_M2;
513 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
514 Opcode = RISCV::PseudoVSPILL5_M1;
515 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
516 Opcode = RISCV::PseudoVSPILL6_M1;
517 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
518 Opcode = RISCV::PseudoVSPILL7_M1;
519 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
520 Opcode = RISCV::PseudoVSPILL8_M1;
524 if (IsScalableVector) {
555 DL =
I->getDebugLoc();
561 bool IsScalableVector =
true;
562 if (RISCV::GPRRegClass.hasSubClassEq(RC)) {
563 Opcode =
TRI->getRegSizeInBits(RISCV::GPRRegClass) == 32 ?
564 RISCV::LW : RISCV::LD;
565 IsScalableVector =
false;
566 }
else if (RISCV::FPR16RegClass.hasSubClassEq(RC)) {
568 IsScalableVector =
false;
569 }
else if (RISCV::FPR32RegClass.hasSubClassEq(RC)) {
571 IsScalableVector =
false;
572 }
else if (RISCV::FPR64RegClass.hasSubClassEq(RC)) {
574 IsScalableVector =
false;
575 }
else if (RISCV::VRRegClass.hasSubClassEq(RC)) {
576 Opcode = RISCV::VL1RE8_V;
577 }
else if (RISCV::VRM2RegClass.hasSubClassEq(RC)) {
578 Opcode = RISCV::VL2RE8_V;
579 }
else if (RISCV::VRM4RegClass.hasSubClassEq(RC)) {
580 Opcode = RISCV::VL4RE8_V;
581 }
else if (RISCV::VRM8RegClass.hasSubClassEq(RC)) {
582 Opcode = RISCV::VL8RE8_V;
583 }
else if (RISCV::VRN2M1RegClass.hasSubClassEq(RC))
584 Opcode = RISCV::PseudoVRELOAD2_M1;
585 else if (RISCV::VRN2M2RegClass.hasSubClassEq(RC))
586 Opcode = RISCV::PseudoVRELOAD2_M2;
587 else if (RISCV::VRN2M4RegClass.hasSubClassEq(RC))
588 Opcode = RISCV::PseudoVRELOAD2_M4;
589 else if (RISCV::VRN3M1RegClass.hasSubClassEq(RC))
590 Opcode = RISCV::PseudoVRELOAD3_M1;
591 else if (RISCV::VRN3M2RegClass.hasSubClassEq(RC))
592 Opcode = RISCV::PseudoVRELOAD3_M2;
593 else if (RISCV::VRN4M1RegClass.hasSubClassEq(RC))
594 Opcode = RISCV::PseudoVRELOAD4_M1;
595 else if (RISCV::VRN4M2RegClass.hasSubClassEq(RC))
596 Opcode = RISCV::PseudoVRELOAD4_M2;
597 else if (RISCV::VRN5M1RegClass.hasSubClassEq(RC))
598 Opcode = RISCV::PseudoVRELOAD5_M1;
599 else if (RISCV::VRN6M1RegClass.hasSubClassEq(RC))
600 Opcode = RISCV::PseudoVRELOAD6_M1;
601 else if (RISCV::VRN7M1RegClass.hasSubClassEq(RC))
602 Opcode = RISCV::PseudoVRELOAD7_M1;
603 else if (RISCV::VRN8M1RegClass.hasSubClassEq(RC))
604 Opcode = RISCV::PseudoVRELOAD8_M1;
608 if (IsScalableVector) {
643 if (Ops.
size() != 1 || Ops[0] != 1)
647 switch (
MI.getOpcode()) {
654 LoadOpc = RISCV::LWU;
658 LoadOpc = RISCV::LBU;
668 case RISCV::ZEXT_H_RV32:
669 case RISCV::ZEXT_H_RV64:
670 LoadOpc = RISCV::LHU;
680 return BuildMI(*
MI.getParent(), InsertPt,
MI.getDebugLoc(),
get(LoadOpc),
701 switch (Inst.getOpndKind()) {
758 "Unknown conditional branch");
771 return get(RISCV::BEQ);
773 return get(RISCV::BNE);
775 return get(RISCV::BLT);
777 return get(RISCV::BGE);
779 return get(RISCV::BLTU);
781 return get(RISCV::BGEU);
808 bool AllowModify)
const {
814 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
820 int NumTerminators = 0;
821 for (
auto J =
I.getReverse(); J !=
MBB.
rend() && isUnpredicatedTerminator(*J);
824 if (J->getDesc().isUnconditionalBranch() ||
825 J->getDesc().isIndirectBranch()) {
832 if (AllowModify && FirstUncondOrIndirectBr !=
MBB.
end()) {
833 while (std::next(FirstUncondOrIndirectBr) !=
MBB.
end()) {
834 std::next(FirstUncondOrIndirectBr)->eraseFromParent();
837 I = FirstUncondOrIndirectBr;
841 if (
I->getDesc().isIndirectBranch())
845 if (NumTerminators > 2)
849 if (NumTerminators == 1 &&
I->getDesc().isUnconditionalBranch()) {
855 if (NumTerminators == 1 &&
I->getDesc().isConditionalBranch()) {
861 if (NumTerminators == 2 && std::prev(
I)->getDesc().isConditionalBranch() &&
862 I->getDesc().isUnconditionalBranch()) {
873 int *BytesRemoved)
const {
880 if (!
I->getDesc().isUnconditionalBranch() &&
881 !
I->getDesc().isConditionalBranch())
887 I->eraseFromParent();
894 if (!
I->getDesc().isConditionalBranch())
900 I->eraseFromParent();
913 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
915 "RISCV branch conditions have two components!");
948 assert(RS &&
"RegScavenger required for long branching");
950 "new block should be inserted for expanding unconditional branch");
953 "restore block should be inserted for restoring clobbered registers");
960 if (!isInt<32>(BrOffset))
962 "Branch offsets outside of the signed 32-bit range not supported");
967 Register ScratchReg =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
979 if (TmpGPR != RISCV::NoRegister)
988 if (FrameIndex == -1)
993 TRI->eliminateFrameIndex(std::prev(
MI.getIterator()),
996 MI.getOperand(1).setMBB(&RestoreBB);
1000 TRI->eliminateFrameIndex(RestoreBB.
back(),
1004 MRI.replaceRegWith(ScratchReg, TmpGPR);
1005 MRI.clearVirtRegs();
1010 assert((
Cond.size() == 3) &&
"Invalid branch condition!");
1018 assert(
MI.getDesc().isBranch() &&
"Unexpected opcode!");
1020 int NumOp =
MI.getNumExplicitOperands();
1021 return MI.getOperand(NumOp - 1).getMBB();
1025 int64_t BrOffset)
const {
1039 return isIntN(13, BrOffset);
1041 case RISCV::PseudoBR:
1042 return isIntN(21, BrOffset);
1043 case RISCV::PseudoJump:
1053 case RISCV::ADD:
return RISCV::PseudoCCADD;
break;
1054 case RISCV::SUB:
return RISCV::PseudoCCSUB;
break;
1055 case RISCV::AND:
return RISCV::PseudoCCAND;
break;
1056 case RISCV::OR:
return RISCV::PseudoCCOR;
break;
1057 case RISCV::XOR:
return RISCV::PseudoCCXOR;
break;
1059 case RISCV::ADDW:
return RISCV::PseudoCCADDW;
break;
1060 case RISCV::SUBW:
return RISCV::PseudoCCSUBW;
break;
1063 return RISCV::INSTRUCTION_LIST_END;
1071 if (!Reg.isVirtual())
1073 if (!
MRI.hasOneNonDBGUse(Reg))
1082 for (
unsigned i = 1, e =
MI->getNumOperands(); i != e; ++i) {
1098 bool DontMoveAcrossStores =
true;
1099 if (!
MI->isSafeToMove(
nullptr, DontMoveAcrossStores))
1106 unsigned &TrueOp,
unsigned &FalseOp,
1107 bool &Optimizable)
const {
1108 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1109 "Unknown select instruction");
1119 Cond.push_back(
MI.getOperand(1));
1120 Cond.push_back(
MI.getOperand(2));
1121 Cond.push_back(
MI.getOperand(3));
1123 Optimizable =
STI.hasShortForwardBranchOpt();
1130 bool PreferFalse)
const {
1131 assert(
MI.getOpcode() == RISCV::PseudoCCMOVGPR &&
1132 "Unknown select instruction");
1133 if (!
STI.hasShortForwardBranchOpt())
1139 bool Invert = !
DefMI;
1147 Register DestReg =
MI.getOperand(0).getReg();
1149 if (!
MRI.constrainRegClass(DestReg, PreviousClass))
1153 assert(PredOpc != RISCV::INSTRUCTION_LIST_END &&
"Unexpected opcode!");
1160 NewMI.
add(
MI.getOperand(1));
1161 NewMI.
add(
MI.getOperand(2));
1170 NewMI.
add(FalseReg);
1194 if (
MI.isMetaInstruction())
1197 unsigned Opcode =
MI.getOpcode();
1199 if (Opcode == TargetOpcode::INLINEASM ||
1200 Opcode == TargetOpcode::INLINEASM_BR) {
1203 return getInlineAsmLength(
MI.getOperand(0).getSymbolName(),
1204 *
TM.getMCAsmInfo());
1207 if (
MI.getParent() &&
MI.getParent()->getParent()) {
1208 if (isCompressibleInst(
MI,
STI))
1211 return get(Opcode).getSize();
1215 const unsigned Opcode =
MI.getOpcode();
1219 case RISCV::FSGNJ_D:
1220 case RISCV::FSGNJ_S:
1221 case RISCV::FSGNJ_H:
1223 return MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1224 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg();
1228 return (
MI.getOperand(1).isReg() &&
1229 MI.getOperand(1).getReg() == RISCV::X0) ||
1230 (
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0);
1232 return MI.isAsCheapAsAMove();
1235std::optional<DestSourcePair>
1239 switch (
MI.getOpcode()) {
1244 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isImm() &&
1245 MI.getOperand(2).getImm() == 0)
1248 case RISCV::FSGNJ_D:
1249 case RISCV::FSGNJ_S:
1250 case RISCV::FSGNJ_H:
1252 if (
MI.getOperand(1).isReg() &&
MI.getOperand(2).isReg() &&
1253 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
1257 return std::nullopt;
1278 RISCV::OpName::frm) < 0;
1280 "New instructions require FRM whereas the old one does not have it");
1287 for (
auto *NewMI : InsInstrs) {
1289 NewMI->getOpcode(), RISCV::OpName::frm)) ==
1290 NewMI->getNumOperands() &&
1291 "Instruction has unexpected number of operands");
1333 bool &Commuted)
const {
1338 unsigned OperandIdx = Commuted ? 2 : 1;
1342 int16_t InstFrmOpIdx =
1344 int16_t SiblingFrmOpIdx =
1347 return (InstFrmOpIdx < 0 && SiblingFrmOpIdx < 0) ||
1352 bool Invert)
const {
1358 Opc = *InverseOpcode;
1403std::optional<unsigned>
1407 return std::nullopt;
1409 return RISCV::FSUB_H;
1411 return RISCV::FSUB_S;
1413 return RISCV::FSUB_D;
1415 return RISCV::FADD_H;
1417 return RISCV::FADD_S;
1419 return RISCV::FADD_D;
1433 bool DoRegPressureReduce) {
1449 if (DoRegPressureReduce && !
MRI.hasOneNonDBGUse(
MI->getOperand(0).getReg()))
1461 bool DoRegPressureReduce) {
1463 bool IsFAdd =
isFADD(Opc);
1464 if (!IsFAdd && !
isFSUB(Opc))
1468 DoRegPressureReduce)) {
1474 DoRegPressureReduce)) {
1484 bool DoRegPressureReduce) {
1490 bool DoRegPressureReduce)
const {
1496 DoRegPressureReduce);
1505 return RISCV::FMADD_H;
1507 return RISCV::FMADD_S;
1509 return RISCV::FMADD_D;
1555 BuildMI(*MF, MergedLoc,
TII->get(FusedOpc), DstReg)
1580 DelInstrs, InstrIdxForVirtReg);
1602 unsigned OpType = OI.value().OperandType;
1607 int64_t Imm = MO.
getImm();
1614#define CASE_OPERAND_UIMM(NUM) \
1615 case RISCVOp::OPERAND_UIMM##NUM: \
1616 Ok = isUInt<NUM>(Imm); \
1627 Ok = isShiftedUInt<1, 1>(Imm);
1630 Ok = isShiftedUInt<5, 2>(Imm);
1633 Ok = isShiftedUInt<6, 2>(Imm);
1636 Ok = isShiftedUInt<5, 3>(Imm);
1639 Ok = isShiftedInt<6, 4>(Imm) && (Imm != 0);
1648 Ok = (isInt<5>(Imm) && Imm != -16) || Imm == 16;
1654 Ok = Imm != 0 && isInt<6>(Imm);
1657 Ok = isUInt<10>(Imm);
1660 Ok = isUInt<11>(Imm);
1663 Ok = isInt<12>(Imm);
1666 Ok = isShiftedInt<7, 5>(Imm);
1669 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1672 Ok =
STI.
is64Bit() ? isUInt<6>(Imm) : isUInt<5>(Imm);
1673 Ok = Ok && Imm != 0;
1676 Ok =
STI.
is64Bit() ? isUInt<5>(Imm) : isUInt<4>(Imm);
1679 Ok = Imm >= 0 && Imm <= 10;
1683 ErrInfo =
"Invalid immediate";
1693 if (
MI.findTiedOperandIdx(0) != OpIdx) {
1694 ErrInfo =
"Merge op improperly tied";
1700 if (!Op.isImm() && !Op.isReg()) {
1701 ErrInfo =
"Invalid operand type for VL operand";
1704 if (Op.isReg() && Op.getReg() != RISCV::NoRegister) {
1706 auto *RC =
MRI.getRegClass(Op.getReg());
1707 if (!RISCV::GPRRegClass.hasSubClassEq(RC)) {
1708 ErrInfo =
"Invalid register class for VL operand";
1713 ErrInfo =
"VL operand w/o SEW operand?";
1721 ErrInfo =
"Unexpected SEW value";
1726 ErrInfo =
"Unexpected SEW value";
1732 uint64_t Policy =
MI.getOperand(OpIdx).getImm();
1734 ErrInfo =
"Invalid Policy Value";
1738 ErrInfo =
"policy operand w/o VL operand?";
1746 if (!
MI.isRegTiedToUseOperand(0, &
UseOpIdx)) {
1747 ErrInfo =
"policy operand w/o tied operand?";
1796 int64_t OffsetA = 0, OffsetB = 0;
1797 unsigned int WidthA = 0, WidthB = 0;
1801 int LowOffset = std::min(OffsetA, OffsetB);
1802 int HighOffset = std::max(OffsetA, OffsetB);
1803 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1804 if (LowOffset + LowWidth <= HighOffset)
1811std::pair<unsigned, unsigned>
1814 return std::make_pair(TF & Mask, TF & ~Mask);
1819 using namespace RISCVII;
1820 static const std::pair<unsigned, const char *> TargetFlags[] = {
1821 {MO_CALL,
"riscv-call"},
1822 {MO_PLT,
"riscv-plt"},
1823 {MO_LO,
"riscv-lo"},
1824 {MO_HI,
"riscv-hi"},
1825 {MO_PCREL_LO,
"riscv-pcrel-lo"},
1826 {MO_PCREL_HI,
"riscv-pcrel-hi"},
1827 {MO_GOT_HI,
"riscv-got-hi"},
1828 {MO_TPREL_LO,
"riscv-tprel-lo"},
1829 {MO_TPREL_HI,
"riscv-tprel-hi"},
1830 {MO_TPREL_ADD,
"riscv-tprel-add"},
1831 {MO_TLS_GOT_HI,
"riscv-tls-got-hi"},
1832 {MO_TLS_GD_HI,
"riscv-tls-gd-hi"}};
1840 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
1853 unsigned &Flags)
const {
1869 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
1875 return !
C.isAvailableAcrossAndOutOfSeq(RISCV::X5, *
TRI);
1881 if (RepeatedSequenceLocs.size() < 2)
1884 unsigned SequenceSize = 0;
1886 auto I = RepeatedSequenceLocs[0].front();
1887 auto E = std::next(RepeatedSequenceLocs[0].back());
1892 unsigned CallOverhead = 8;
1893 for (
auto &
C : RepeatedSequenceLocs)
1897 unsigned FrameOverhead = 4;
1898 if (RepeatedSequenceLocs[0]
1900 ->getSubtarget<RISCVSubtarget>()
1910 unsigned Flags)
const {
1915 const auto &
F =
MI.getMF()->getFunction();
1918 if (
MI.isPosition()) {
1920 if (
MI.isCFIInstruction())
1931 if (
MI.isInlineAsm())
1945 if (
MI.modifiesRegister(RISCV::X5,
TRI) ||
1946 MI.getDesc().hasImplicitDefOfPhysReg(RISCV::X5))
1950 for (
const auto &MO :
MI.operands()) {
1951 if (MO.isMBB() || MO.isBlockAddress() || MO.isCPI() || MO.isJTI())
1957 (
MI.getMF()->getTarget().getFunctionSections() ||
F.hasComdat() ||
1964 if (
MI.isMetaInstruction())
1975 bool Changed =
true;
1980 for (;
I !=
E; ++
I) {
1981 if (
I->isCFIInstruction()) {
1982 I->removeFromParent();
2005 .addGlobalAddress(M.getNamedValue(MF.
getName()), 0,
2015 std::string GenericComment =
2017 if (!GenericComment.empty())
2018 return GenericComment;
2022 return std::string();
2024 std::string Comment;
2031 if ((
MI.getOpcode() == RISCV::VSETVLI ||
MI.getOpcode() == RISCV::VSETIVLI ||
2032 MI.getOpcode() == RISCV::PseudoVSETVLI ||
2033 MI.getOpcode() == RISCV::PseudoVSETIVLI ||
2034 MI.getOpcode() == RISCV::PseudoVSETVLIX0) &&
2036 unsigned Imm =
MI.getOperand(OpIdx).getImm();
2040 unsigned Log2SEW =
MI.getOperand(OpIdx).getImm();
2046 unsigned Policy =
MI.getOperand(OpIdx).getImm();
2048 "Invalid Policy Value");
2058#define CASE_VFMA_OPCODE_COMMON(OP, TYPE, LMUL) \
2059 RISCV::PseudoV##OP##_##TYPE##_##LMUL
2061#define CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE) \
2062 CASE_VFMA_OPCODE_COMMON(OP, TYPE, M1): \
2063 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M2): \
2064 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M4): \
2065 case CASE_VFMA_OPCODE_COMMON(OP, TYPE, M8)
2067#define CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE) \
2068 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF2): \
2069 case CASE_VFMA_OPCODE_LMULS_M1(OP, TYPE)
2071#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE) \
2072 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF4): \
2073 case CASE_VFMA_OPCODE_LMULS_MF2(OP, TYPE)
2075#define CASE_VFMA_OPCODE_LMULS(OP, TYPE) \
2076 CASE_VFMA_OPCODE_COMMON(OP, TYPE, MF8): \
2077 case CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
2079#define CASE_VFMA_SPLATS(OP) \
2080 CASE_VFMA_OPCODE_LMULS_MF4(OP, VF16): \
2081 case CASE_VFMA_OPCODE_LMULS_MF2(OP, VF32): \
2082 case CASE_VFMA_OPCODE_LMULS_M1(OP, VF64)
2086 unsigned &SrcOpIdx1,
2087 unsigned &SrcOpIdx2)
const {
2092 switch (
MI.getOpcode()) {
2093 case RISCV::PseudoCCMOVGPR:
2095 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 4, 5);
2116 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2121 unsigned CommutableOpIdx1 = 1;
2122 unsigned CommutableOpIdx2 = 3;
2123 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2136 if ((
MI.getOperand(
MI.getNumExplicitOperands() - 1).getImm() & 1) == 0)
2143 if (SrcOpIdx1 != CommuteAnyOperandIndex && SrcOpIdx1 > 3)
2145 if (SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx2 > 3)
2149 if (SrcOpIdx1 != CommuteAnyOperandIndex &&
2150 SrcOpIdx2 != CommuteAnyOperandIndex && SrcOpIdx1 != 1 && SrcOpIdx2 != 1)
2156 if (SrcOpIdx1 == CommuteAnyOperandIndex ||
2157 SrcOpIdx2 == CommuteAnyOperandIndex) {
2160 unsigned CommutableOpIdx1 = SrcOpIdx1;
2161 if (SrcOpIdx1 == SrcOpIdx2) {
2164 CommutableOpIdx1 = 1;
2165 }
else if (SrcOpIdx1 == CommuteAnyOperandIndex) {
2167 CommutableOpIdx1 = SrcOpIdx2;
2172 unsigned CommutableOpIdx2;
2173 if (CommutableOpIdx1 != 1) {
2175 CommutableOpIdx2 = 1;
2177 Register Op1Reg =
MI.getOperand(CommutableOpIdx1).getReg();
2182 if (Op1Reg !=
MI.getOperand(2).getReg())
2183 CommutableOpIdx2 = 2;
2185 CommutableOpIdx2 = 3;
2190 if (!fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, CommutableOpIdx1,
2202#define CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, LMUL) \
2203 case RISCV::PseudoV##OLDOP##_##TYPE##_##LMUL: \
2204 Opc = RISCV::PseudoV##NEWOP##_##TYPE##_##LMUL; \
2207#define CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE) \
2208 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M1) \
2209 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M2) \
2210 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M4) \
2211 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, M8)
2213#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE) \
2214 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF2) \
2215 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, TYPE)
2217#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE) \
2218 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF4) \
2219 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, TYPE)
2221#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE) \
2222 CASE_VFMA_CHANGE_OPCODE_COMMON(OLDOP, NEWOP, TYPE, MF8) \
2223 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
2225#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP) \
2226 CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, VF16) \
2227 CASE_VFMA_CHANGE_OPCODE_LMULS_MF2(OLDOP, NEWOP, VF32) \
2228 CASE_VFMA_CHANGE_OPCODE_LMULS_M1(OLDOP, NEWOP, VF64)
2233 unsigned OpIdx2)
const {
2236 return *
MI.getParent()->getParent()->CloneMachineInstr(&
MI);
2240 switch (
MI.getOpcode()) {
2241 case RISCV::PseudoCCMOVGPR: {
2245 auto &WorkingMI = cloneIfNew(
MI);
2246 WorkingMI.getOperand(3).setImm(
CC);
2270 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
2271 assert((OpIdx1 == 3 || OpIdx2 == 3) &&
"Unexpected opcode index");
2273 switch (
MI.getOpcode()) {
2296 auto &WorkingMI = cloneIfNew(
MI);
2297 WorkingMI.setDesc(
get(Opc));
2307 assert((OpIdx1 == 1 || OpIdx2 == 1) &&
"Unexpected opcode index");
2310 if (OpIdx1 == 3 || OpIdx2 == 3) {
2312 switch (
MI.getOpcode()) {
2323 auto &WorkingMI = cloneIfNew(
MI);
2324 WorkingMI.setDesc(
get(Opc));
2336#undef CASE_VFMA_CHANGE_OPCODE_SPLATS
2337#undef CASE_VFMA_CHANGE_OPCODE_LMULS
2338#undef CASE_VFMA_CHANGE_OPCODE_COMMON
2339#undef CASE_VFMA_SPLATS
2340#undef CASE_VFMA_OPCODE_LMULS
2341#undef CASE_VFMA_OPCODE_COMMON
2344#define CASE_WIDEOP_OPCODE_COMMON(OP, LMUL) \
2345 RISCV::PseudoV##OP##_##LMUL##_TIED
2347#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP) \
2348 CASE_WIDEOP_OPCODE_COMMON(OP, MF4): \
2349 case CASE_WIDEOP_OPCODE_COMMON(OP, MF2): \
2350 case CASE_WIDEOP_OPCODE_COMMON(OP, M1): \
2351 case CASE_WIDEOP_OPCODE_COMMON(OP, M2): \
2352 case CASE_WIDEOP_OPCODE_COMMON(OP, M4)
2354#define CASE_WIDEOP_OPCODE_LMULS(OP) \
2355 CASE_WIDEOP_OPCODE_COMMON(OP, MF8): \
2356 case CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
2359#define CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, LMUL) \
2360 case RISCV::PseudoV##OP##_##LMUL##_TIED: \
2361 NewOpc = RISCV::PseudoV##OP##_##LMUL; \
2364#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP) \
2365 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF4) \
2366 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF2) \
2367 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M1) \
2368 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M2) \
2369 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, M4)
2371#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP) \
2372 CASE_WIDEOP_CHANGE_OPCODE_COMMON(OP, MF8) \
2373 CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
2378 switch (
MI.getOpcode()) {
2389 MI.getNumExplicitOperands() == 6);
2390 if ((
MI.getOperand(5).getImm() & 1) == 0)
2395 switch (
MI.getOpcode()) {
2409 .
add(
MI.getOperand(0))
2410 .
add(
MI.getOperand(1))
2411 .
add(
MI.getOperand(2))
2412 .
add(
MI.getOperand(3))
2413 .
add(
MI.getOperand(4));
2417 unsigned NumOps =
MI.getNumOperands();
2418 for (
unsigned I = 1;
I < NumOps; ++
I) {
2420 if (Op.isReg() && Op.isKill())
2428 if (
MI.getOperand(0).isEarlyClobber()) {
2434 if (S->
end ==
Idx.getRegSlot(
true))
2435 S->
end =
Idx.getRegSlot();
2446#undef CASE_WIDEOP_CHANGE_OPCODE_LMULS
2447#undef CASE_WIDEOP_CHANGE_OPCODE_COMMON
2448#undef CASE_WIDEOP_OPCODE_LMULS
2449#undef CASE_WIDEOP_OPCODE_COMMON
2457 assert(Amount > 0 &&
"There is no need to get VLEN scaled value.");
2458 assert(Amount % 8 == 0 &&
2459 "Reserve the stack by the multiple of one vector size.");
2462 int64_t NumOfVReg = Amount / 8;
2465 assert(isInt<32>(NumOfVReg) &&
2466 "Expect the number of vector registers within 32-bits.");
2469 if (ShiftAmount == 0)
2475 }
else if (
STI.hasStdExtZba() &&
2482 if (NumOfVReg % 9 == 0) {
2483 Opc = RISCV::SH3ADD;
2484 ShiftAmount =
Log2_64(NumOfVReg / 9);
2485 }
else if (NumOfVReg % 5 == 0) {
2486 Opc = RISCV::SH2ADD;
2487 ShiftAmount =
Log2_64(NumOfVReg / 5);
2488 }
else if (NumOfVReg % 3 == 0) {
2489 Opc = RISCV::SH1ADD;
2490 ShiftAmount =
Log2_64(NumOfVReg / 3);
2504 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2515 Register ScaledRegister =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2526 Register N =
MRI.createVirtualRegister(&RISCV::GPRRegClass);
2528 if (!
STI.hasStdExtM() && !
STI.hasStdExtZmmul())
2531 "M- or Zmmul-extension must be enabled to calculate the vscaled size/"
2545 unsigned OrigBits)
const {
2550 Worklist.
push_back(std::make_pair(&OrigMI, OrigBits));
2552 while (!Worklist.
empty()) {
2555 unsigned Bits =
P.second;
2557 if (!Visited.
insert(
P).second)
2561 if (
MI->getNumExplicitDefs() != 1)
2564 for (
auto &UserOp :
MRI.use_operands(
MI->getOperand(0).getReg())) {
2566 unsigned OpIdx = UserOp.getOperandNo();
2592 case RISCV::SLLI_UW:
2593 case RISCV::FMV_W_X:
2594 case RISCV::FCVT_H_W:
2595 case RISCV::FCVT_H_WU:
2596 case RISCV::FCVT_S_W:
2597 case RISCV::FCVT_S_WU:
2598 case RISCV::FCVT_D_W:
2599 case RISCV::FCVT_D_WU:
2609 case RISCV::FMV_H_X:
2610 case RISCV::ZEXT_H_RV32:
2611 case RISCV::ZEXT_H_RV64:
2628 Worklist.
push_back(std::make_pair(UserMI, Bits - ShAmt));
2639 Worklist.
push_back(std::make_pair(UserMI, Bits));
2645 Worklist.
push_back(std::make_pair(UserMI, Bits));
2650 if (Bits >= (
unsigned)llvm::bit_width<uint64_t>(~Imm))
2652 Worklist.
push_back(std::make_pair(UserMI, Bits));
2666 Worklist.
push_back(std::make_pair(UserMI, Bits));
2679 case RISCV::SH1ADD_UW:
2680 case RISCV::SH2ADD_UW:
2681 case RISCV::SH3ADD_UW:
2683 if (OpIdx == 1 && Bits >= 32)
2685 Worklist.
push_back(std::make_pair(UserMI, Bits));
2695 if (OpIdx == 0 && Bits >= 8)
2700 if (OpIdx == 0 && Bits >= 16)
2705 if (OpIdx == 0 && Bits >= 32)
2735 Worklist.
push_back(std::make_pair(UserMI, Bits));
2738 case RISCV::PseudoCCMOVGPR:
2742 if (OpIdx != 4 && OpIdx != 5)
2744 Worklist.
push_back(std::make_pair(UserMI, Bits));
2747 case RISCV::VT_MASKC:
2748 case RISCV::VT_MASKCN:
2751 Worklist.
push_back(std::make_pair(UserMI, Bits));
2762 return MI.getOpcode() == RISCV::ADDIW &&
MI.getOperand(1).isReg() &&
2763 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0;
2768 return MI.getOpcode() == RISCV::ADD_UW &&
MI.getOperand(1).isReg() &&
2769 MI.getOperand(2).isReg() &&
MI.getOperand(2).getReg() == RISCV::X0;
2774 return MI.getOpcode() == RISCV::ANDI &&
MI.getOperand(1).isReg() &&
2775 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 255;
2786 case RISCV::VL1RE8_V:
2787 case RISCV::VL2RE8_V:
2788 case RISCV::VL4RE8_V:
2789 case RISCV::VL8RE8_V:
2790 case RISCV::VL1RE16_V:
2791 case RISCV::VL2RE16_V:
2792 case RISCV::VL4RE16_V:
2793 case RISCV::VL8RE16_V:
2794 case RISCV::VL1RE32_V:
2795 case RISCV::VL2RE32_V:
2796 case RISCV::VL4RE32_V:
2797 case RISCV::VL8RE32_V:
2798 case RISCV::VL1RE64_V:
2799 case RISCV::VL2RE64_V:
2800 case RISCV::VL4RE64_V:
2801 case RISCV::VL8RE64_V:
2809 unsigned Opcode =
MI.getOpcode();
2810 if (!RISCVVPseudosTable::getPseudoInfo(Opcode) &&
2816std::optional<std::pair<unsigned, unsigned>>
2820 return std::nullopt;
2821 case RISCV::PseudoVSPILL2_M1:
2822 case RISCV::PseudoVRELOAD2_M1:
2823 return std::make_pair(2u, 1u);
2824 case RISCV::PseudoVSPILL2_M2:
2825 case RISCV::PseudoVRELOAD2_M2:
2826 return std::make_pair(2u, 2u);
2827 case RISCV::PseudoVSPILL2_M4:
2828 case RISCV::PseudoVRELOAD2_M4:
2829 return std::make_pair(2u, 4u);
2830 case RISCV::PseudoVSPILL3_M1:
2831 case RISCV::PseudoVRELOAD3_M1:
2832 return std::make_pair(3u, 1u);
2833 case RISCV::PseudoVSPILL3_M2:
2834 case RISCV::PseudoVRELOAD3_M2:
2835 return std::make_pair(3u, 2u);
2836 case RISCV::PseudoVSPILL4_M1:
2837 case RISCV::PseudoVRELOAD4_M1:
2838 return std::make_pair(4u, 1u);
2839 case RISCV::PseudoVSPILL4_M2:
2840 case RISCV::PseudoVRELOAD4_M2:
2841 return std::make_pair(4u, 2u);
2842 case RISCV::PseudoVSPILL5_M1:
2843 case RISCV::PseudoVRELOAD5_M1:
2844 return std::make_pair(5u, 1u);
2845 case RISCV::PseudoVSPILL6_M1:
2846 case RISCV::PseudoVRELOAD6_M1:
2847 return std::make_pair(6u, 1u);
2848 case RISCV::PseudoVSPILL7_M1:
2849 case RISCV::PseudoVRELOAD7_M1:
2850 return std::make_pair(7u, 1u);
2851 case RISCV::PseudoVSPILL8_M1:
2852 case RISCV::PseudoVRELOAD8_M1:
2853 return std::make_pair(8u, 1u);
2858 return MI.getNumExplicitDefs() == 2 &&
MI.modifiesRegister(RISCV::VL) &&
2863 int16_t MI1FrmOpIdx =
2865 int16_t MI2FrmOpIdx =
2867 if (MI1FrmOpIdx < 0 || MI2FrmOpIdx < 0)
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static ARCCC::CondCode getOppositeBranchCondition(ARCCC::CondCode CC)
Return the inverse of passed condition, i.e. turning COND_E to COND_NE.
SmallVector< MachineOperand, 4 > Cond
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
static M68k::CondCode getCondFromBranchOpc(unsigned BrOpc)
unsigned const TargetRegisterInfo * TRI
This file provides utility analysis objects describing memory locations.
const char LLVMTargetMachineRef TM
#define CASE_VFMA_CHANGE_OPCODE_SPLATS(OLDOP, NEWOP)
static bool isRVVWholeLoadStore(unsigned Opcode)
static void combineFPFusedMultiply(MachineInstr &Root, MachineInstr &Prev, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs)
static bool getFPFusedMultiplyPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS(OP)
static cl::opt< bool > PreferWholeRegisterMove("riscv-prefer-whole-register-move", cl::init(false), cl::Hidden, cl::desc("Prefer whole register move for vector registers."))
#define CASE_VFMA_SPLATS(OP)
unsigned getPredicatedOpcode(unsigned Opcode)
static unsigned getFPFusedMultiplyOpcode(unsigned RootOpc, MachineCombinerPattern Pattern)
#define CASE_WIDEOP_OPCODE_LMULS(OP)
static bool isFSUB(unsigned Opc)
MachineOutlinerConstructionID
static bool isFMUL(unsigned Opc)
#define CASE_WIDEOP_CHANGE_OPCODE_LMULS_MF4(OP)
#define CASE_OPERAND_UIMM(NUM)
#define CASE_VFMA_CHANGE_OPCODE_LMULS(OLDOP, NEWOP, TYPE)
static bool isFADD(unsigned Opc)
#define CASE_VFMA_CHANGE_OPCODE_LMULS_MF4(OLDOP, NEWOP, TYPE)
#define CASE_WIDEOP_OPCODE_LMULS_MF4(OP)
static unsigned getAddendOperandIdx(MachineCombinerPattern Pattern)
static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, RISCVII::VLMUL LMul)
#define CASE_VFMA_OPCODE_LMULS(OP, TYPE)
static MachineInstr * canFoldAsPredicatedOp(Register Reg, const MachineRegisterInfo &MRI, const TargetInstrInfo *TII)
Identify instructions that can be folded into a CCMOV instruction, and return the defining instructio...
static bool getFPPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce)
static bool canCombineFPFusedMultiply(const MachineInstr &Root, const MachineOperand &MO, bool DoRegPressureReduce)
#define CASE_VFMA_OPCODE_LMULS_MF4(OP, TYPE)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
This file declares the machine register scavenger class.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
static const DILocation * getMergedLocation(const DILocation *LocA, const DILocation *LocB)
When two instructions are combined into a single instruction we also need to combine the original loc...
Diagnostic information for unsupported feature in backend.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
void diagnose(const DiagnosticInfo &DI)
Report a message to the currently installed diagnostic handler.
LiveInterval - This class represents the liveness of a register, or stack slot.
LiveInterval & getInterval(Register Reg)
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
const Segment * getSegmentContaining(SlotIndex Idx) const
Return the segment that contains the specified index, or null if there is none.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
MCInstBuilder & addReg(unsigned Reg)
Add a new register operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool isCommutable() const
Return true if this may be a 2- or 3-address instruction (of the form "X = op Y, Z,...
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Wrapper class representing physical registers. Should be passed by value.
unsigned pred_size() const
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
void setStackID(int ObjectIdx, uint8_t ID)
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
void setFlags(unsigned flags)
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
uint16_t getFlags() const
Return the MI flags bitvector.
const MachineOperand & getOperand(unsigned i) const
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
A Module instance is used to store all the information related to an LLVM module.
void setSpecialOperandAttr(MachineInstr &OldMI1, MachineInstr &OldMI2, MachineInstr &NewMI1, MachineInstr &NewMI2) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool hasAllNBitUsers(const MachineInstr &MI, const MachineRegisterInfo &MRI, unsigned NBits) const
const MCInstrDesc & getBrCond(RISCVCC::CondCode CC) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &dl, int *BytesAdded=nullptr) const override
bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const override
RISCVInstrInfo(RISCVSubtarget &STI)
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
outliner::InstrType getOutliningType(MachineBasicBlock::iterator &MBBI, unsigned Flags) const override
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
void insertIndirectBranch(MachineBasicBlock &MBB, MachineBasicBlock &NewDestBB, MachineBasicBlock &RestoreBB, const DebugLoc &DL, int64_t BrOffset, RegScavenger *RS) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc) const override
outliner::OutlinedFunction getOutliningCandidateInfo(std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
const RISCVSubtarget & STI
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
MCInst getNop() const override
void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
void movImm(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register DstReg, uint64_t Val, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
void getVLENFactoredAmount(MachineFunction &MF, MachineBasicBlock &MBB, MachineBasicBlock::iterator II, const DebugLoc &DL, Register DestReg, int64_t Amount, MachineInstr::MIFlag Flag=MachineInstr::NoFlags) const
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DstReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool IsKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
RISCVMachineFunctionInfo - This class is derived from MachineFunctionInfo and contains private RISCV-...
int getBranchRelaxationScratchFrameIndex() const
bool hasStdExtCOrZca() const
const RISCVRegisterInfo * getRegisterInfo() const override
void enterBasicBlockEnd(MachineBasicBlock &MBB)
Start tracking liveness from the end of basic block MBB.
void setRegUsed(Register Reg, LaneBitmask LaneMask=LaneBitmask::getAll())
Tell the scavenger a register is used.
Register scavengeRegisterBackwards(const TargetRegisterClass &RC, MachineBasicBlock::iterator To, bool RestoreAfter, int SPAdj, bool AllowSpill=true)
Make a register of the specific register class available from the current position backwards to the p...
Wrapper class representing virtual and physical registers.
bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
SlotIndex - An opaque wrapper around machine indexes.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const
Optional target hook that returns true if MBB is safe to outline from, and returns any target-specifi...
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual bool hasReassociableSibling(const MachineInstr &Inst, bool &Commuted) const
Return true when \P Inst has reassociable sibling.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
Target - Wrapper for Target specific information.
A raw_ostream that writes to an std::string.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ C
The default llvm calling convention, compatible with C.
CondCode getOppositeBranchCondition(CondCode)
static unsigned getVecPolicyOpNum(const MCInstrDesc &Desc)
static bool hasMergeOp(uint64_t TSFlags)
static unsigned getVLOpNum(const MCInstrDesc &Desc)
static bool hasVLOp(uint64_t TSFlags)
static bool hasVecPolicyOp(uint64_t TSFlags)
static unsigned getMergeOpNum(const MCInstrDesc &Desc)
static bool isRVVWideningReduction(uint64_t TSFlags)
static unsigned getSEWOpNum(const MCInstrDesc &Desc)
static bool hasSEWOp(uint64_t TSFlags)
InstSeq generateInstSeq(int64_t Val, const FeatureBitset &ActiveFeatures)
@ OPERAND_UIMMLOG2XLEN_NONZERO
@ OPERAND_SIMM12_LSB00000
@ OPERAND_FIRST_RISCV_IMM
@ OPERAND_SIMM10_LSB0000_NONZERO
static bool isTailAgnostic(unsigned VType)
static RISCVII::VLMUL getVLMUL(unsigned VType)
std::pair< unsigned, bool > decodeVLMUL(RISCVII::VLMUL VLMUL)
static bool isValidSEW(unsigned SEW)
void printVType(unsigned VType, raw_ostream &OS)
static unsigned getSEW(unsigned VType)
bool hasEqualFRM(const MachineInstr &MI1, const MachineInstr &MI2)
int16_t getNamedOperandIdx(uint16_t Opcode, uint16_t NamedIndex)
bool isSEXT_W(const MachineInstr &MI)
bool isFaultFirstLoad(const MachineInstr &MI)
std::optional< std::pair< unsigned, unsigned > > isRVVSpillForZvlsseg(unsigned Opcode)
bool isZEXT_B(const MachineInstr &MI)
bool isRVVSpill(const MachineInstr &MI)
bool isZEXT_W(const MachineInstr &MI)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
initializer< Ty > init(const Ty &Val)
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
int bit_width(T Value)
Returns the number of bits needed to represent Value if Value is nonzero.
constexpr bool isPowerOf2_64(uint64_t Value)
Return true if the argument is a power of two > 0 (64 bit edition.)
unsigned Log2_64(uint64_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
detail::enumerator< R > enumerate(R &&TheRange)
Given an input range, returns a new range whose values are are pair (A,B) such that A is the 0-based ...
unsigned getKillRegState(bool B)
bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
This represents a simple continuous liveness interval for a value.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.