68#define DEBUG_TYPE "hexagon-instrinfo"
70#define GET_INSTRINFO_CTOR_DTOR
71#define GET_INSTRMAP_INFO
73#include "HexagonGenDFAPacketizer.inc"
74#include "HexagonGenInstrInfo.inc"
78 "packetization boundary."));
85 cl::desc(
"Disable schedule adjustment for new value stores."));
89 cl::desc(
"Enable timing class latency"));
93 cl::desc(
"Enable vec alu forwarding"));
97 cl::desc(
"Enable vec acc forwarding"));
105 cl::desc(
"Use the DFA based hazard recognizer."));
120void HexagonInstrInfo::anchor() {}
127namespace HexagonFUnits {
133 return (Reg >= Hexagon::R0 && Reg <= Hexagon::R7) ||
134 (Reg >= Hexagon::R16 && Reg <= Hexagon::R23);
146 for (; MIB != MIE; ++MIB) {
147 if (!MIB->isDebugInstr())
158 if (!(
MI.getMF()->getFunction().hasOptSize()))
159 return MI.isAsCheapAsAMove();
161 if (
MI.getOpcode() == Hexagon::A2_tfrsi) {
162 auto Op =
MI.getOperand(1);
170 int64_t Imm =
Op.getImm();
175 return MI.isAsCheapAsAMove();
190 if (
isFloat(
MI) &&
MI.hasRegisterImplicitUseOperand(Hexagon::USR))
204 if (EndLoopOp == Hexagon::ENDLOOP0) {
205 LOOPi = Hexagon::J2_loop0i;
206 LOOPr = Hexagon::J2_loop0r;
208 LOOPi = Hexagon::J2_loop1i;
209 LOOPr = Hexagon::J2_loop1r;
220 unsigned Opc =
I.getOpcode();
221 if (Opc == LOOPi || Opc == LOOPr)
225 if (Opc == EndLoopOp &&
I.getOperand(0).getMBB() != TargetBB)
252 Uses.push_back(MO.getReg());
291 int &FrameIndex)
const {
292 switch (
MI.getOpcode()) {
295 case Hexagon::L2_loadri_io:
296 case Hexagon::L2_loadrd_io:
297 case Hexagon::V6_vL32b_ai:
298 case Hexagon::V6_vL32b_nt_ai:
299 case Hexagon::V6_vL32Ub_ai:
300 case Hexagon::LDriw_pred:
301 case Hexagon::LDriw_ctr:
302 case Hexagon::PS_vloadrq_ai:
303 case Hexagon::PS_vloadrw_ai:
304 case Hexagon::PS_vloadrw_nt_ai: {
312 return MI.getOperand(0).getReg();
315 case Hexagon::L2_ploadrit_io:
316 case Hexagon::L2_ploadrif_io:
317 case Hexagon::L2_ploadrdt_io:
318 case Hexagon::L2_ploadrdf_io: {
326 return MI.getOperand(0).getReg();
339 int &FrameIndex)
const {
340 switch (
MI.getOpcode()) {
343 case Hexagon::S2_storerb_io:
344 case Hexagon::S2_storerh_io:
345 case Hexagon::S2_storeri_io:
346 case Hexagon::S2_storerd_io:
347 case Hexagon::V6_vS32b_ai:
348 case Hexagon::V6_vS32Ub_ai:
349 case Hexagon::STriw_pred:
350 case Hexagon::STriw_ctr:
351 case Hexagon::PS_vstorerq_ai:
352 case Hexagon::PS_vstorerw_ai: {
360 return MI.getOperand(2).getReg();
363 case Hexagon::S2_pstorerbt_io:
364 case Hexagon::S2_pstorerbf_io:
365 case Hexagon::S2_pstorerht_io:
366 case Hexagon::S2_pstorerhf_io:
367 case Hexagon::S2_pstorerit_io:
368 case Hexagon::S2_pstorerif_io:
369 case Hexagon::S2_pstorerdt_io:
370 case Hexagon::S2_pstorerdf_io: {
378 return MI.getOperand(3).getReg();
394 for (++MII; MII !=
MBB->
instr_end() && MII->isInsideBundle(); ++MII)
412 for (++MII; MII !=
MBB->
instr_end() && MII->isInsideBundle(); ++MII)
440 bool AllowModify)
const {
472 while (
I->isDebugInstr()) {
478 bool JumpToBlock =
I->getOpcode() == Hexagon::J2_jump &&
479 I->getOperand(0).isMBB();
481 if (AllowModify && JumpToBlock &&
484 I->eraseFromParent();
490 if (!isUnpredicatedTerminator(*
I))
498 if (&*
I != LastInst && !
I->isBundle() && isUnpredicatedTerminator(*
I)) {
500 SecondLastInst = &*
I;
511 int SecLastOpcode = SecondLastInst ? SecondLastInst->
getOpcode() : 0;
514 if (LastOpcode == Hexagon::J2_jump && !LastInst->
getOperand(0).
isMBB())
516 if (SecLastOpcode == Hexagon::J2_jump &&
527 if (LastInst && !SecondLastInst) {
528 if (LastOpcode == Hexagon::J2_jump) {
538 if (LastOpcodeHasJMP_c) {
553 <<
" with one jump\n";);
560 if (SecLastOpcodeHasJMP_c && (LastOpcode == Hexagon::J2_jump)) {
571 if (SecLastOpcodeHasNVJump &&
573 (LastOpcode == Hexagon::J2_jump)) {
584 if (SecLastOpcode == Hexagon::J2_jump && LastOpcode == Hexagon::J2_jump) {
588 I->eraseFromParent();
593 if (
isEndLoopN(SecLastOpcode) && LastOpcode == Hexagon::J2_jump) {
601 <<
" with two jumps";);
607 int *BytesRemoved)
const {
608 assert(!BytesRemoved &&
"code size not handled");
615 if (
I->isDebugInstr())
620 if (Count && (
I->getOpcode() == Hexagon::J2_jump))
634 int *BytesAdded)
const {
635 unsigned BOpc = Hexagon::J2_jump;
636 unsigned BccOpc = Hexagon::J2_jumpt;
638 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
639 assert(!BytesAdded &&
"code size not handled");
644 if (!
Cond.empty() &&
Cond[0].isImm())
645 BccOpc =
Cond[0].getImm();
665 int EndLoopOp =
Cond[0].getImm();
672 assert(
Loop !=
nullptr &&
"Inserting an ENDLOOP without a LOOP");
673 Loop->getOperand(0).setMBB(
TBB);
677 assert((
Cond.size() == 3) &&
"Only supporting rr/ri version of nvjump");
694 assert((
Cond.size() == 2) &&
"Malformed cond vector");
702 "Cond. cannot be empty when multiple branchings are required");
704 "NV-jump cannot be inserted with another branch");
707 int EndLoopOp =
Cond[0].getImm();
714 assert(
Loop !=
nullptr &&
"Inserting an ENDLOOP without a LOOP");
715 Loop->getOperand(0).setMBB(
TBB);
744 TripCount =
Loop->getOpcode() == Hexagon::J2_loop0r
746 :
Loop->getOperand(1).getImm();
748 LoopCount =
Loop->getOperand(1).getReg();
751 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
753 return MI == EndLoop;
756 std::optional<bool> createTripCountGreaterCondition(
759 if (TripCount == -1) {
763 TII->get(Hexagon::C2_cmpgtui),
Done)
771 return TripCount > TC;
779 void adjustTripCount(
int TripCountAdjust)
override {
782 if (
Loop->getOpcode() == Hexagon::J2_loop0i ||
783 Loop->getOpcode() == Hexagon::J2_loop1i) {
784 int64_t TripCount =
Loop->getOperand(1).getImm() + TripCountAdjust;
785 assert(TripCount > 0 &&
"Can't create an empty or negative loop!");
786 Loop->getOperand(1).setImm(TripCount);
795 TII->get(Hexagon::A2_addi), NewLoopCount)
798 Loop->getOperand(1).setReg(NewLoopCount);
801 void disposed()
override {
Loop->eraseFromParent(); }
805std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
813 LoopBB,
I->getOpcode(),
I->getOperand(0).getMBB(), VisitedBBs);
815 return std::make_unique<HexagonPipelinerLoopInfo>(LoopInst, &*
I);
821 unsigned NumCycles,
unsigned ExtraPredCycles,
835 return NumInstrs <= 4;
843 for (
auto I =
B.begin();
I != E; ++
I) {
853 for (
auto I =
B.rbegin();
I != E; ++
I)
864 if (Hexagon::IntRegsRegClass.
contains(SrcReg, DestReg)) {
866 .
addReg(SrcReg, KillFlag);
869 if (Hexagon::DoubleRegsRegClass.
contains(SrcReg, DestReg)) {
871 .
addReg(SrcReg, KillFlag);
874 if (Hexagon::PredRegsRegClass.
contains(SrcReg, DestReg)) {
880 if (Hexagon::CtrRegsRegClass.
contains(DestReg) &&
881 Hexagon::IntRegsRegClass.
contains(SrcReg)) {
883 .
addReg(SrcReg, KillFlag);
886 if (Hexagon::IntRegsRegClass.
contains(DestReg) &&
887 Hexagon::CtrRegsRegClass.
contains(SrcReg)) {
889 .
addReg(SrcReg, KillFlag);
892 if (Hexagon::ModRegsRegClass.
contains(DestReg) &&
893 Hexagon::IntRegsRegClass.
contains(SrcReg)) {
895 .
addReg(SrcReg, KillFlag);
898 if (Hexagon::PredRegsRegClass.
contains(SrcReg) &&
899 Hexagon::IntRegsRegClass.
contains(DestReg)) {
901 .
addReg(SrcReg, KillFlag);
904 if (Hexagon::IntRegsRegClass.
contains(SrcReg) &&
905 Hexagon::PredRegsRegClass.
contains(DestReg)) {
907 .
addReg(SrcReg, KillFlag);
910 if (Hexagon::PredRegsRegClass.
contains(SrcReg) &&
911 Hexagon::IntRegsRegClass.
contains(DestReg)) {
913 .
addReg(SrcReg, KillFlag);
916 if (Hexagon::HvxVRRegClass.
contains(SrcReg, DestReg)) {
918 addReg(SrcReg, KillFlag);
921 if (Hexagon::HvxWRRegClass.
contains(SrcReg, DestReg)) {
924 Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
925 Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
929 .
addReg(SrcHi, KillFlag | UndefHi)
930 .
addReg(SrcLo, KillFlag | UndefLo);
933 if (Hexagon::HvxQRRegClass.
contains(SrcReg, DestReg)) {
936 .
addReg(SrcReg, KillFlag);
939 if (Hexagon::HvxQRRegClass.
contains(SrcReg) &&
940 Hexagon::HvxVRRegClass.
contains(DestReg)) {
944 if (Hexagon::HvxQRRegClass.
contains(DestReg) &&
945 Hexagon::HvxVRRegClass.
contains(SrcReg)) {
960 Register SrcReg,
bool isKill,
int FI,
973 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
977 }
else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
981 }
else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
985 }
else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
989 }
else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
993 }
else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
997 }
else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
1020 if (Hexagon::IntRegsRegClass.hasSubClassEq(RC)) {
1023 }
else if (Hexagon::DoubleRegsRegClass.hasSubClassEq(RC)) {
1026 }
else if (Hexagon::PredRegsRegClass.hasSubClassEq(RC)) {
1029 }
else if (Hexagon::ModRegsRegClass.hasSubClassEq(RC)) {
1032 }
else if (Hexagon::HvxQRRegClass.hasSubClassEq(RC)) {
1035 }
else if (Hexagon::HvxVRRegClass.hasSubClassEq(RC)) {
1038 }
else if (Hexagon::HvxWRRegClass.hasSubClassEq(RC)) {
1059 unsigned Opc =
MI.getOpcode();
1061 auto RealCirc = [&](
unsigned Opc,
bool HasImm,
unsigned MxOp) {
1063 Register CSx = (Mx == Hexagon::M0 ? Hexagon::CS0 : Hexagon::CS1);
1065 .
add(
MI.getOperand((HasImm ? 5 : 4)));
1069 MIB.
add(
MI.getOperand(4));
1076 if (
MI.memoperands().empty())
1079 return MMO->getAlign() >= NeedAlign;
1084 case Hexagon::PS_call_instrprof_custom: {
1085 auto Op0 =
MI.getOperand(0);
1087 "First operand must be a global containing handler name.");
1091 StringRef NameStr = Arr->isCString() ? Arr->getAsCString() : Arr->getAsString();
1117 MIB.addExternalSymbol(cstr);
1121 case TargetOpcode::COPY: {
1132 case Hexagon::PS_aligna:
1135 .
addImm(-
MI.getOperand(1).getImm());
1138 case Hexagon::V6_vassignp: {
1141 Register SrcLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1142 Register SrcHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1149 .
addReg(SrcLo, Kill | UndefLo);
1153 case Hexagon::V6_lo: {
1156 Register SrcSubLo = HRI.getSubReg(SrcReg, Hexagon::vsub_lo);
1159 MRI.clearKillFlags(SrcSubLo);
1162 case Hexagon::V6_hi: {
1165 Register SrcSubHi = HRI.getSubReg(SrcReg, Hexagon::vsub_hi);
1168 MRI.clearKillFlags(SrcSubHi);
1171 case Hexagon::PS_vloadrv_ai: {
1175 int Offset =
MI.getOperand(2).getImm();
1176 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1177 unsigned NewOpc = UseAligned(
MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1178 : Hexagon::V6_vL32Ub_ai;
1186 case Hexagon::PS_vloadrw_ai: {
1190 int Offset =
MI.getOperand(2).getImm();
1191 unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1192 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1193 unsigned NewOpc = UseAligned(
MI, NeedAlign) ? Hexagon::V6_vL32b_ai
1194 : Hexagon::V6_vL32Ub_ai;
1196 HRI.getSubReg(DstReg, Hexagon::vsub_lo))
1201 HRI.getSubReg(DstReg, Hexagon::vsub_hi))
1208 case Hexagon::PS_vstorerv_ai: {
1213 int Offset =
MI.getOperand(1).getImm();
1214 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1215 unsigned NewOpc = UseAligned(
MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1216 : Hexagon::V6_vS32Ub_ai;
1225 case Hexagon::PS_vstorerw_ai: {
1229 int Offset =
MI.getOperand(1).getImm();
1230 unsigned VecOffset = HRI.getSpillSize(Hexagon::HvxVRRegClass);
1231 Align NeedAlign = HRI.getSpillAlign(Hexagon::HvxVRRegClass);
1232 unsigned NewOpc = UseAligned(
MI, NeedAlign) ? Hexagon::V6_vS32b_ai
1233 : Hexagon::V6_vS32Ub_ai;
1237 .
addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_lo))
1242 .
addReg(HRI.getSubReg(SrcReg, Hexagon::vsub_hi))
1247 case Hexagon::PS_true: {
1255 case Hexagon::PS_false: {
1263 case Hexagon::PS_qtrue: {
1270 case Hexagon::PS_qfalse: {
1277 case Hexagon::PS_vdd0: {
1285 case Hexagon::PS_vmulw: {
1288 Register Src1Reg =
MI.getOperand(1).getReg();
1289 Register Src2Reg =
MI.getOperand(2).getReg();
1290 Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1291 Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1292 Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1293 Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1295 HRI.getSubReg(DstReg, Hexagon::isub_hi))
1299 HRI.getSubReg(DstReg, Hexagon::isub_lo))
1303 MRI.clearKillFlags(Src1SubHi);
1304 MRI.clearKillFlags(Src1SubLo);
1305 MRI.clearKillFlags(Src2SubHi);
1306 MRI.clearKillFlags(Src2SubLo);
1309 case Hexagon::PS_vmulw_acc: {
1312 Register Src1Reg =
MI.getOperand(1).getReg();
1313 Register Src2Reg =
MI.getOperand(2).getReg();
1314 Register Src3Reg =
MI.getOperand(3).getReg();
1315 Register Src1SubHi = HRI.getSubReg(Src1Reg, Hexagon::isub_hi);
1316 Register Src1SubLo = HRI.getSubReg(Src1Reg, Hexagon::isub_lo);
1317 Register Src2SubHi = HRI.getSubReg(Src2Reg, Hexagon::isub_hi);
1318 Register Src2SubLo = HRI.getSubReg(Src2Reg, Hexagon::isub_lo);
1319 Register Src3SubHi = HRI.getSubReg(Src3Reg, Hexagon::isub_hi);
1320 Register Src3SubLo = HRI.getSubReg(Src3Reg, Hexagon::isub_lo);
1322 HRI.getSubReg(DstReg, Hexagon::isub_hi))
1327 HRI.getSubReg(DstReg, Hexagon::isub_lo))
1332 MRI.clearKillFlags(Src1SubHi);
1333 MRI.clearKillFlags(Src1SubLo);
1334 MRI.clearKillFlags(Src2SubHi);
1335 MRI.clearKillFlags(Src2SubLo);
1336 MRI.clearKillFlags(Src3SubHi);
1337 MRI.clearKillFlags(Src3SubLo);
1340 case Hexagon::PS_pselect: {
1355 .
addReg(Pu, (Rd == Rt) ? K1 : 0)
1364 case Hexagon::PS_vselect: {
1376 unsigned S = Op0.
getReg() != Op3.
getReg() ? PState & ~RegState::Kill
1397 case Hexagon::PS_wselect: {
1409 unsigned S = Op0.
getReg() != Op3.
getReg() ? PState & ~RegState::Kill
1437 case Hexagon::PS_crash: {
1454 OS <<
"MisalignedCrash";
1458 static const CrashPseudoSourceValue CrashPSV(MF.
getTarget());
1470 case Hexagon::PS_tailcall_i:
1471 MI.setDesc(
get(Hexagon::J2_jump));
1473 case Hexagon::PS_tailcall_r:
1474 case Hexagon::PS_jmpret:
1475 MI.setDesc(
get(Hexagon::J2_jumpr));
1477 case Hexagon::PS_jmprett:
1478 MI.setDesc(
get(Hexagon::J2_jumprt));
1480 case Hexagon::PS_jmpretf:
1481 MI.setDesc(
get(Hexagon::J2_jumprf));
1483 case Hexagon::PS_jmprettnewpt:
1484 MI.setDesc(
get(Hexagon::J2_jumprtnewpt));
1486 case Hexagon::PS_jmpretfnewpt:
1487 MI.setDesc(
get(Hexagon::J2_jumprfnewpt));
1489 case Hexagon::PS_jmprettnew:
1490 MI.setDesc(
get(Hexagon::J2_jumprtnew));
1492 case Hexagon::PS_jmpretfnew:
1493 MI.setDesc(
get(Hexagon::J2_jumprfnew));
1496 case Hexagon::PS_loadrub_pci:
1497 return RealCirc(Hexagon::L2_loadrub_pci,
true, 4);
1498 case Hexagon::PS_loadrb_pci:
1499 return RealCirc(Hexagon::L2_loadrb_pci,
true, 4);
1500 case Hexagon::PS_loadruh_pci:
1501 return RealCirc(Hexagon::L2_loadruh_pci,
true, 4);
1502 case Hexagon::PS_loadrh_pci:
1503 return RealCirc(Hexagon::L2_loadrh_pci,
true, 4);
1504 case Hexagon::PS_loadri_pci:
1505 return RealCirc(Hexagon::L2_loadri_pci,
true, 4);
1506 case Hexagon::PS_loadrd_pci:
1507 return RealCirc(Hexagon::L2_loadrd_pci,
true, 4);
1508 case Hexagon::PS_loadrub_pcr:
1509 return RealCirc(Hexagon::L2_loadrub_pcr,
false, 3);
1510 case Hexagon::PS_loadrb_pcr:
1511 return RealCirc(Hexagon::L2_loadrb_pcr,
false, 3);
1512 case Hexagon::PS_loadruh_pcr:
1513 return RealCirc(Hexagon::L2_loadruh_pcr,
false, 3);
1514 case Hexagon::PS_loadrh_pcr:
1515 return RealCirc(Hexagon::L2_loadrh_pcr,
false, 3);
1516 case Hexagon::PS_loadri_pcr:
1517 return RealCirc(Hexagon::L2_loadri_pcr,
false, 3);
1518 case Hexagon::PS_loadrd_pcr:
1519 return RealCirc(Hexagon::L2_loadrd_pcr,
false, 3);
1520 case Hexagon::PS_storerb_pci:
1521 return RealCirc(Hexagon::S2_storerb_pci,
true, 3);
1522 case Hexagon::PS_storerh_pci:
1523 return RealCirc(Hexagon::S2_storerh_pci,
true, 3);
1524 case Hexagon::PS_storerf_pci:
1525 return RealCirc(Hexagon::S2_storerf_pci,
true, 3);
1526 case Hexagon::PS_storeri_pci:
1527 return RealCirc(Hexagon::S2_storeri_pci,
true, 3);
1528 case Hexagon::PS_storerd_pci:
1529 return RealCirc(Hexagon::S2_storerd_pci,
true, 3);
1530 case Hexagon::PS_storerb_pcr:
1531 return RealCirc(Hexagon::S2_storerb_pcr,
false, 2);
1532 case Hexagon::PS_storerh_pcr:
1533 return RealCirc(Hexagon::S2_storerh_pcr,
false, 2);
1534 case Hexagon::PS_storerf_pcr:
1535 return RealCirc(Hexagon::S2_storerf_pcr,
false, 2);
1536 case Hexagon::PS_storeri_pcr:
1537 return RealCirc(Hexagon::S2_storeri_pcr,
false, 2);
1538 case Hexagon::PS_storerd_pcr:
1539 return RealCirc(Hexagon::S2_storerd_pcr,
false, 2);
1549 unsigned Opc =
MI.getOpcode();
1553 case Hexagon::V6_vgathermh_pseudo:
1555 .
add(
MI.getOperand(2))
1556 .
add(
MI.getOperand(3))
1557 .
add(
MI.getOperand(4));
1559 .
add(
MI.getOperand(0))
1563 return First.getInstrIterator();
1565 case Hexagon::V6_vgathermw_pseudo:
1567 .
add(
MI.getOperand(2))
1568 .
add(
MI.getOperand(3))
1569 .
add(
MI.getOperand(4));
1571 .
add(
MI.getOperand(0))
1575 return First.getInstrIterator();
1577 case Hexagon::V6_vgathermhw_pseudo:
1579 .
add(
MI.getOperand(2))
1580 .
add(
MI.getOperand(3))
1581 .
add(
MI.getOperand(4));
1583 .
add(
MI.getOperand(0))
1587 return First.getInstrIterator();
1589 case Hexagon::V6_vgathermhq_pseudo:
1591 .
add(
MI.getOperand(2))
1592 .
add(
MI.getOperand(3))
1593 .
add(
MI.getOperand(4))
1594 .
add(
MI.getOperand(5));
1596 .
add(
MI.getOperand(0))
1600 return First.getInstrIterator();
1602 case Hexagon::V6_vgathermwq_pseudo:
1604 .
add(
MI.getOperand(2))
1605 .
add(
MI.getOperand(3))
1606 .
add(
MI.getOperand(4))
1607 .
add(
MI.getOperand(5));
1609 .
add(
MI.getOperand(0))
1613 return First.getInstrIterator();
1615 case Hexagon::V6_vgathermhwq_pseudo:
1617 .
add(
MI.getOperand(2))
1618 .
add(
MI.getOperand(3))
1619 .
add(
MI.getOperand(4))
1620 .
add(
MI.getOperand(5));
1622 .
add(
MI.getOperand(0))
1626 return First.getInstrIterator();
1629 return MI.getIterator();
1638 assert(
Cond[0].
isImm() &&
"First entry in the cond vector not imm-val");
1639 unsigned opcode =
Cond[0].getImm();
1645 Cond[0].setImm(NewOpcode);
1679 int Opc =
MI.getOpcode();
1692 unsigned NOp = 0, NumOps =
MI.getNumOperands();
1693 while (NOp < NumOps) {
1695 if (!
Op.isReg() || !
Op.isDef() ||
Op.isImplicit())
1702 unsigned PredRegPos, PredRegFlags;
1703 bool GotPredReg =
getPredReg(
Cond, PredReg, PredRegPos, PredRegFlags);
1706 T.addReg(PredReg, PredRegFlags);
1707 while (NOp < NumOps)
1708 T.add(
MI.getOperand(NOp++));
1710 MI.setDesc(
get(PredOpc));
1711 while (
unsigned n =
MI.getNumOperands())
1712 MI.removeOperand(n-1);
1713 for (
unsigned i = 0, n =
T->getNumOperands(); i < n; ++i)
1714 MI.addOperand(
T->getOperand(i));
1720 MRI.clearKillFlags(PredReg);
1731 std::vector<MachineOperand> &Pred,
1732 bool SkipDead)
const {
1740 if (RC == &Hexagon::PredRegsRegClass) {
1745 }
else if (MO.isRegMask()) {
1746 for (
Register PR : Hexagon::PredRegsRegClass) {
1747 if (!
MI.modifiesRegister(PR, &HRI))
1758 if (!
MI.getDesc().isPredicable())
1768 switch (
MI.getOpcode()) {
1769 case Hexagon::V6_vL32b_ai:
1770 case Hexagon::V6_vL32b_pi:
1771 case Hexagon::V6_vL32b_ppu:
1772 case Hexagon::V6_vL32b_cur_ai:
1773 case Hexagon::V6_vL32b_cur_pi:
1774 case Hexagon::V6_vL32b_cur_ppu:
1775 case Hexagon::V6_vL32b_nt_ai:
1776 case Hexagon::V6_vL32b_nt_pi:
1777 case Hexagon::V6_vL32b_nt_ppu:
1778 case Hexagon::V6_vL32b_tmp_ai:
1779 case Hexagon::V6_vL32b_tmp_pi:
1780 case Hexagon::V6_vL32b_tmp_ppu:
1781 case Hexagon::V6_vL32b_nt_cur_ai:
1782 case Hexagon::V6_vL32b_nt_cur_pi:
1783 case Hexagon::V6_vL32b_nt_cur_ppu:
1784 case Hexagon::V6_vL32b_nt_tmp_ai:
1785 case Hexagon::V6_vL32b_nt_tmp_pi:
1786 case Hexagon::V6_vL32b_nt_tmp_ppu:
1802 if (
MI.isDebugInstr())
1818 if (
MI.getDesc().isTerminator() ||
MI.isPosition())
1822 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1846 bool atInsnStart =
true;
1849 for (; *Str; ++Str) {
1853 if (atInsnStart && !isSpace(
static_cast<unsigned char>(*Str))) {
1855 atInsnStart =
false;
1859 atInsnStart =
false;
1882 int64_t &
Value)
const {
1883 unsigned Opc =
MI.getOpcode();
1887 case Hexagon::C2_cmpeq:
1888 case Hexagon::C2_cmpeqp:
1889 case Hexagon::C2_cmpgt:
1890 case Hexagon::C2_cmpgtp:
1891 case Hexagon::C2_cmpgtu:
1892 case Hexagon::C2_cmpgtup:
1893 case Hexagon::C4_cmpneq:
1894 case Hexagon::C4_cmplte:
1895 case Hexagon::C4_cmplteu:
1896 case Hexagon::C2_cmpeqi:
1897 case Hexagon::C2_cmpgti:
1898 case Hexagon::C2_cmpgtui:
1899 case Hexagon::C4_cmpneqi:
1900 case Hexagon::C4_cmplteui:
1901 case Hexagon::C4_cmpltei:
1902 SrcReg =
MI.getOperand(1).getReg();
1905 case Hexagon::A4_cmpbeq:
1906 case Hexagon::A4_cmpbgt:
1907 case Hexagon::A4_cmpbgtu:
1908 case Hexagon::A4_cmpbeqi:
1909 case Hexagon::A4_cmpbgti:
1910 case Hexagon::A4_cmpbgtui:
1911 SrcReg =
MI.getOperand(1).getReg();
1914 case Hexagon::A4_cmpheq:
1915 case Hexagon::A4_cmphgt:
1916 case Hexagon::A4_cmphgtu:
1917 case Hexagon::A4_cmpheqi:
1918 case Hexagon::A4_cmphgti:
1919 case Hexagon::A4_cmphgtui:
1920 SrcReg =
MI.getOperand(1).getReg();
1927 case Hexagon::C2_cmpeq:
1928 case Hexagon::C2_cmpeqp:
1929 case Hexagon::C2_cmpgt:
1930 case Hexagon::C2_cmpgtp:
1931 case Hexagon::C2_cmpgtu:
1932 case Hexagon::C2_cmpgtup:
1933 case Hexagon::A4_cmpbeq:
1934 case Hexagon::A4_cmpbgt:
1935 case Hexagon::A4_cmpbgtu:
1936 case Hexagon::A4_cmpheq:
1937 case Hexagon::A4_cmphgt:
1938 case Hexagon::A4_cmphgtu:
1939 case Hexagon::C4_cmpneq:
1940 case Hexagon::C4_cmplte:
1941 case Hexagon::C4_cmplteu:
1942 SrcReg2 =
MI.getOperand(2).getReg();
1946 case Hexagon::C2_cmpeqi:
1947 case Hexagon::C2_cmpgtui:
1948 case Hexagon::C2_cmpgti:
1949 case Hexagon::C4_cmpneqi:
1950 case Hexagon::C4_cmplteui:
1951 case Hexagon::C4_cmpltei:
1952 case Hexagon::A4_cmpbeqi:
1953 case Hexagon::A4_cmpbgti:
1954 case Hexagon::A4_cmpbgtui:
1955 case Hexagon::A4_cmpheqi:
1956 case Hexagon::A4_cmphgti:
1957 case Hexagon::A4_cmphgtui: {
1962 Value =
MI.getOperand(2).getImm();
1972 unsigned *PredCost)
const {
1998 unsigned BasePosA, OffsetPosA;
2006 unsigned BasePosB, OffsetPosB;
2013 if (BaseRegA != BaseRegB || BaseSubA != BaseSubB)
2031 if (OffsetA > OffsetB) {
2033 return SizeB <= OffDiff;
2035 if (OffsetA < OffsetB) {
2037 return SizeA <= OffDiff;
2047 unsigned BasePos = 0, OffsetPos = 0;
2051 if (OffsetOp.
isImm()) {
2055 }
else if (
MI.getOpcode() == Hexagon::A2_addi) {
2057 if (AddOp.
isImm()) {
2066std::pair<unsigned, unsigned>
2074 using namespace HexagonII;
2076 static const std::pair<unsigned, const char*> Flags[] = {
2077 {MO_PCREL,
"hexagon-pcrel"},
2078 {MO_GOT,
"hexagon-got"},
2079 {MO_LO16,
"hexagon-lo16"},
2080 {MO_HI16,
"hexagon-hi16"},
2081 {MO_GPREL,
"hexagon-gprel"},
2082 {MO_GDGOT,
"hexagon-gdgot"},
2083 {MO_GDPLT,
"hexagon-gdplt"},
2084 {MO_IE,
"hexagon-ie"},
2085 {MO_IEGOT,
"hexagon-iegot"},
2086 {MO_TPREL,
"hexagon-tprel"}
2093 using namespace HexagonII;
2095 static const std::pair<unsigned, const char*> Flags[] = {
2096 {HMOTF_ConstExtended,
"hexagon-ext"}
2104 if (VT == MVT::i1) {
2105 TRC = &Hexagon::PredRegsRegClass;
2106 }
else if (VT == MVT::i32 || VT == MVT::f32) {
2107 TRC = &Hexagon::IntRegsRegClass;
2108 }
else if (VT == MVT::i64 || VT == MVT::f64) {
2109 TRC = &Hexagon::DoubleRegsRegClass;
2133 !
MI.getDesc().mayStore() &&
2134 MI.getDesc().getOpcode() != Hexagon::S2_allocframe &&
2135 MI.getDesc().getOpcode() != Hexagon::L2_deallocframe &&
2180 assert(MO.
isImm() &&
"Extendable operand must be Immediate type");
2184 int32_t SValue =
Value;
2187 return SValue < MinValue || SValue > MaxValue;
2192 return UValue < MinValue || UValue > MaxValue;
2196 switch (
MI.getOpcode()) {
2197 case Hexagon::L4_return:
2198 case Hexagon::L4_return_t:
2199 case Hexagon::L4_return_f:
2200 case Hexagon::L4_return_tnew_pnt:
2201 case Hexagon::L4_return_fnew_pnt:
2202 case Hexagon::L4_return_tnew_pt:
2203 case Hexagon::L4_return_fnew_pt:
2224 for (
auto &RegA : DefsA)
2225 for (
auto &RegB : UsesB) {
2242 switch (
MI.getOpcode()) {
2243 case Hexagon::V6_vL32b_cur_pi:
2244 case Hexagon::V6_vL32b_cur_ai:
2268 return (Opcode == Hexagon::ENDLOOP0 ||
2269 Opcode == Hexagon::ENDLOOP1);
2294 switch (
MI.getOpcode()) {
2296 case Hexagon::PS_fi:
2297 case Hexagon::PS_fia:
2322 unsigned Opcode =
MI.getOpcode();
2332 if (!
I.mayLoad() && !
I.mayStore())
2338 switch (
MI.getOpcode()) {
2339 case Hexagon::J2_callr:
2340 case Hexagon::J2_callrf:
2341 case Hexagon::J2_callrt:
2342 case Hexagon::PS_call_nr:
2349 switch (
MI.getOpcode()) {
2350 case Hexagon::L4_return:
2351 case Hexagon::L4_return_t:
2352 case Hexagon::L4_return_f:
2353 case Hexagon::L4_return_fnew_pnt:
2354 case Hexagon::L4_return_fnew_pt:
2355 case Hexagon::L4_return_tnew_pnt:
2356 case Hexagon::L4_return_tnew_pt:
2363 switch (
MI.getOpcode()) {
2364 case Hexagon::J2_jumpr:
2365 case Hexagon::J2_jumprt:
2366 case Hexagon::J2_jumprf:
2367 case Hexagon::J2_jumprtnewpt:
2368 case Hexagon::J2_jumprfnewpt:
2369 case Hexagon::J2_jumprtnew:
2370 case Hexagon::J2_jumprfnew:
2381 unsigned offset)
const {
2385 return isInt<11>(offset);
2387 switch (
MI.getOpcode()) {
2391 case Hexagon::J2_jump:
2392 case Hexagon::J2_call:
2393 case Hexagon::PS_call_nr:
2394 return isInt<24>(offset);
2395 case Hexagon::J2_jumpt:
2396 case Hexagon::J2_jumpf:
2397 case Hexagon::J2_jumptnew:
2398 case Hexagon::J2_jumptnewpt:
2399 case Hexagon::J2_jumpfnew:
2400 case Hexagon::J2_jumpfnewpt:
2401 case Hexagon::J2_callt:
2402 case Hexagon::J2_callf:
2403 return isInt<17>(offset);
2404 case Hexagon::J2_loop0i:
2405 case Hexagon::J2_loop0iext:
2406 case Hexagon::J2_loop0r:
2407 case Hexagon::J2_loop0rext:
2408 case Hexagon::J2_loop1i:
2409 case Hexagon::J2_loop1iext:
2410 case Hexagon::J2_loop1r:
2411 case Hexagon::J2_loop1rext:
2412 return isInt<9>(offset);
2414 case Hexagon::J4_cmpeqi_tp0_jump_nt:
2415 case Hexagon::J4_cmpeqi_tp1_jump_nt:
2416 case Hexagon::J4_cmpeqn1_tp0_jump_nt:
2417 case Hexagon::J4_cmpeqn1_tp1_jump_nt:
2418 return isInt<11>(offset);
2429 unsigned Opcode =
MI.getOpcode();
2430 return Opcode == Hexagon::J2_loop0i ||
2431 Opcode == Hexagon::J2_loop0r ||
2432 Opcode == Hexagon::J2_loop0iext ||
2433 Opcode == Hexagon::J2_loop0rext ||
2434 Opcode == Hexagon::J2_loop1i ||
2435 Opcode == Hexagon::J2_loop1r ||
2436 Opcode == Hexagon::J2_loop1iext ||
2437 Opcode == Hexagon::J2_loop1rext;
2441 switch (
MI.getOpcode()) {
2442 default:
return false;
2443 case Hexagon::L4_iadd_memopw_io:
2444 case Hexagon::L4_isub_memopw_io:
2445 case Hexagon::L4_add_memopw_io:
2446 case Hexagon::L4_sub_memopw_io:
2447 case Hexagon::L4_and_memopw_io:
2448 case Hexagon::L4_or_memopw_io:
2449 case Hexagon::L4_iadd_memoph_io:
2450 case Hexagon::L4_isub_memoph_io:
2451 case Hexagon::L4_add_memoph_io:
2452 case Hexagon::L4_sub_memoph_io:
2453 case Hexagon::L4_and_memoph_io:
2454 case Hexagon::L4_or_memoph_io:
2455 case Hexagon::L4_iadd_memopb_io:
2456 case Hexagon::L4_isub_memopb_io:
2457 case Hexagon::L4_add_memopb_io:
2458 case Hexagon::L4_sub_memopb_io:
2459 case Hexagon::L4_and_memopb_io:
2460 case Hexagon::L4_or_memopb_io:
2461 case Hexagon::L4_ior_memopb_io:
2462 case Hexagon::L4_ior_memoph_io:
2463 case Hexagon::L4_ior_memopw_io:
2464 case Hexagon::L4_iand_memopb_io:
2465 case Hexagon::L4_iand_memoph_io:
2466 case Hexagon::L4_iand_memopw_io:
2506 unsigned OperandNum)
const {
2556 return MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4 ||
2557 MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT ||
2558 MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_PIC ||
2559 MI.getOpcode() == Hexagon::SAVE_REGISTERS_CALL_V4_EXT_PIC;
2563 switch (
MI.getOpcode()) {
2565 case Hexagon::L2_loadrb_io:
2566 case Hexagon::L4_loadrb_ur:
2567 case Hexagon::L4_loadrb_ap:
2568 case Hexagon::L2_loadrb_pr:
2569 case Hexagon::L2_loadrb_pbr:
2570 case Hexagon::L2_loadrb_pi:
2571 case Hexagon::L2_loadrb_pci:
2572 case Hexagon::L2_loadrb_pcr:
2573 case Hexagon::L2_loadbsw2_io:
2574 case Hexagon::L4_loadbsw2_ur:
2575 case Hexagon::L4_loadbsw2_ap:
2576 case Hexagon::L2_loadbsw2_pr:
2577 case Hexagon::L2_loadbsw2_pbr:
2578 case Hexagon::L2_loadbsw2_pi:
2579 case Hexagon::L2_loadbsw2_pci:
2580 case Hexagon::L2_loadbsw2_pcr:
2581 case Hexagon::L2_loadbsw4_io:
2582 case Hexagon::L4_loadbsw4_ur:
2583 case Hexagon::L4_loadbsw4_ap:
2584 case Hexagon::L2_loadbsw4_pr:
2585 case Hexagon::L2_loadbsw4_pbr:
2586 case Hexagon::L2_loadbsw4_pi:
2587 case Hexagon::L2_loadbsw4_pci:
2588 case Hexagon::L2_loadbsw4_pcr:
2589 case Hexagon::L4_loadrb_rr:
2590 case Hexagon::L2_ploadrbt_io:
2591 case Hexagon::L2_ploadrbt_pi:
2592 case Hexagon::L2_ploadrbf_io:
2593 case Hexagon::L2_ploadrbf_pi:
2594 case Hexagon::L2_ploadrbtnew_io:
2595 case Hexagon::L2_ploadrbfnew_io:
2596 case Hexagon::L4_ploadrbt_rr:
2597 case Hexagon::L4_ploadrbf_rr:
2598 case Hexagon::L4_ploadrbtnew_rr:
2599 case Hexagon::L4_ploadrbfnew_rr:
2600 case Hexagon::L2_ploadrbtnew_pi:
2601 case Hexagon::L2_ploadrbfnew_pi:
2602 case Hexagon::L4_ploadrbt_abs:
2603 case Hexagon::L4_ploadrbf_abs:
2604 case Hexagon::L4_ploadrbtnew_abs:
2605 case Hexagon::L4_ploadrbfnew_abs:
2606 case Hexagon::L2_loadrbgp:
2608 case Hexagon::L2_loadrh_io:
2609 case Hexagon::L4_loadrh_ur:
2610 case Hexagon::L4_loadrh_ap:
2611 case Hexagon::L2_loadrh_pr:
2612 case Hexagon::L2_loadrh_pbr:
2613 case Hexagon::L2_loadrh_pi:
2614 case Hexagon::L2_loadrh_pci:
2615 case Hexagon::L2_loadrh_pcr:
2616 case Hexagon::L4_loadrh_rr:
2617 case Hexagon::L2_ploadrht_io:
2618 case Hexagon::L2_ploadrht_pi:
2619 case Hexagon::L2_ploadrhf_io:
2620 case Hexagon::L2_ploadrhf_pi:
2621 case Hexagon::L2_ploadrhtnew_io:
2622 case Hexagon::L2_ploadrhfnew_io:
2623 case Hexagon::L4_ploadrht_rr:
2624 case Hexagon::L4_ploadrhf_rr:
2625 case Hexagon::L4_ploadrhtnew_rr:
2626 case Hexagon::L4_ploadrhfnew_rr:
2627 case Hexagon::L2_ploadrhtnew_pi:
2628 case Hexagon::L2_ploadrhfnew_pi:
2629 case Hexagon::L4_ploadrht_abs:
2630 case Hexagon::L4_ploadrhf_abs:
2631 case Hexagon::L4_ploadrhtnew_abs:
2632 case Hexagon::L4_ploadrhfnew_abs:
2633 case Hexagon::L2_loadrhgp:
2646 switch (
MI.getOpcode()) {
2647 case Hexagon::STriw_pred:
2648 case Hexagon::LDriw_pred:
2659 for (
auto &
Op :
MI.operands())
2660 if (
Op.isGlobal() ||
Op.isSymbol())
2667 unsigned SchedClass =
MI.getDesc().getSchedClass();
2668 return is_TC1(SchedClass);
2672 unsigned SchedClass =
MI.getDesc().getSchedClass();
2673 return is_TC2(SchedClass);
2677 unsigned SchedClass =
MI.getDesc().getSchedClass();
2682 unsigned SchedClass =
MI.getDesc().getSchedClass();
2693 for (
int I = 0;
I <
N;
I++)
2698 if (MI2.
getOpcode() == Hexagon::V6_vS32b_pi)
2730 return isInt<4>(Count);
2740 return isInt<3>(Count);
2759 case Hexagon::PS_vstorerq_ai:
2760 case Hexagon::PS_vstorerv_ai:
2761 case Hexagon::PS_vstorerw_ai:
2762 case Hexagon::PS_vstorerw_nt_ai:
2763 case Hexagon::PS_vloadrq_ai:
2764 case Hexagon::PS_vloadrv_ai:
2765 case Hexagon::PS_vloadrw_ai:
2766 case Hexagon::PS_vloadrw_nt_ai:
2767 case Hexagon::V6_vL32b_ai:
2768 case Hexagon::V6_vS32b_ai:
2769 case Hexagon::V6_vS32b_pred_ai:
2770 case Hexagon::V6_vS32b_npred_ai:
2771 case Hexagon::V6_vS32b_qpred_ai:
2772 case Hexagon::V6_vS32b_nqpred_ai:
2773 case Hexagon::V6_vS32b_new_ai:
2774 case Hexagon::V6_vS32b_new_pred_ai:
2775 case Hexagon::V6_vS32b_new_npred_ai:
2776 case Hexagon::V6_vS32b_nt_pred_ai:
2777 case Hexagon::V6_vS32b_nt_npred_ai:
2778 case Hexagon::V6_vS32b_nt_new_ai:
2779 case Hexagon::V6_vS32b_nt_new_pred_ai:
2780 case Hexagon::V6_vS32b_nt_new_npred_ai:
2781 case Hexagon::V6_vS32b_nt_qpred_ai:
2782 case Hexagon::V6_vS32b_nt_nqpred_ai:
2783 case Hexagon::V6_vL32b_nt_ai:
2784 case Hexagon::V6_vS32b_nt_ai:
2785 case Hexagon::V6_vL32Ub_ai:
2786 case Hexagon::V6_vS32Ub_ai:
2787 case Hexagon::V6_vL32b_cur_ai:
2788 case Hexagon::V6_vL32b_tmp_ai:
2789 case Hexagon::V6_vL32b_pred_ai:
2790 case Hexagon::V6_vL32b_npred_ai:
2791 case Hexagon::V6_vL32b_cur_pred_ai:
2792 case Hexagon::V6_vL32b_cur_npred_ai:
2793 case Hexagon::V6_vL32b_tmp_pred_ai:
2794 case Hexagon::V6_vL32b_tmp_npred_ai:
2795 case Hexagon::V6_vL32b_nt_cur_ai:
2796 case Hexagon::V6_vL32b_nt_tmp_ai:
2797 case Hexagon::V6_vL32b_nt_pred_ai:
2798 case Hexagon::V6_vL32b_nt_npred_ai:
2799 case Hexagon::V6_vL32b_nt_cur_pred_ai:
2800 case Hexagon::V6_vL32b_nt_cur_npred_ai:
2801 case Hexagon::V6_vL32b_nt_tmp_pred_ai:
2802 case Hexagon::V6_vL32b_nt_tmp_npred_ai:
2803 case Hexagon::V6_vgathermh_pseudo:
2804 case Hexagon::V6_vgathermw_pseudo:
2805 case Hexagon::V6_vgathermhw_pseudo:
2806 case Hexagon::V6_vgathermhq_pseudo:
2807 case Hexagon::V6_vgathermwq_pseudo:
2808 case Hexagon::V6_vgathermhwq_pseudo: {
2809 unsigned VectorSize =
TRI->getSpillSize(Hexagon::HvxVRRegClass);
2811 if (
Offset & (VectorSize-1))
2816 case Hexagon::J2_loop0i:
2817 case Hexagon::J2_loop1i:
2818 return isUInt<10>(
Offset);
2820 case Hexagon::S4_storeirb_io:
2821 case Hexagon::S4_storeirbt_io:
2822 case Hexagon::S4_storeirbf_io:
2823 return isUInt<6>(
Offset);
2825 case Hexagon::S4_storeirh_io:
2826 case Hexagon::S4_storeirht_io:
2827 case Hexagon::S4_storeirhf_io:
2828 return isShiftedUInt<6,1>(
Offset);
2830 case Hexagon::S4_storeiri_io:
2831 case Hexagon::S4_storeirit_io:
2832 case Hexagon::S4_storeirif_io:
2833 return isShiftedUInt<6,2>(
Offset);
2835 case Hexagon::A4_cmpbeqi:
2836 return isUInt<8>(
Offset);
2837 case Hexagon::A4_cmpbgti:
2845 case Hexagon::L2_loadri_io:
2846 case Hexagon::S2_storeri_io:
2850 case Hexagon::L2_loadrd_io:
2851 case Hexagon::S2_storerd_io:
2855 case Hexagon::L2_loadrh_io:
2856 case Hexagon::L2_loadruh_io:
2857 case Hexagon::S2_storerh_io:
2858 case Hexagon::S2_storerf_io:
2862 case Hexagon::L2_loadrb_io:
2863 case Hexagon::L2_loadrub_io:
2864 case Hexagon::S2_storerb_io:
2868 case Hexagon::A2_addi:
2872 case Hexagon::L4_iadd_memopw_io:
2873 case Hexagon::L4_isub_memopw_io:
2874 case Hexagon::L4_add_memopw_io:
2875 case Hexagon::L4_sub_memopw_io:
2876 case Hexagon::L4_iand_memopw_io:
2877 case Hexagon::L4_ior_memopw_io:
2878 case Hexagon::L4_and_memopw_io:
2879 case Hexagon::L4_or_memopw_io:
2882 case Hexagon::L4_iadd_memoph_io:
2883 case Hexagon::L4_isub_memoph_io:
2884 case Hexagon::L4_add_memoph_io:
2885 case Hexagon::L4_sub_memoph_io:
2886 case Hexagon::L4_iand_memoph_io:
2887 case Hexagon::L4_ior_memoph_io:
2888 case Hexagon::L4_and_memoph_io:
2889 case Hexagon::L4_or_memoph_io:
2892 case Hexagon::L4_iadd_memopb_io:
2893 case Hexagon::L4_isub_memopb_io:
2894 case Hexagon::L4_add_memopb_io:
2895 case Hexagon::L4_sub_memopb_io:
2896 case Hexagon::L4_iand_memopb_io:
2897 case Hexagon::L4_ior_memopb_io:
2898 case Hexagon::L4_and_memopb_io:
2899 case Hexagon::L4_or_memopb_io:
2904 case Hexagon::STriw_pred:
2905 case Hexagon::LDriw_pred:
2906 case Hexagon::STriw_ctr:
2907 case Hexagon::LDriw_ctr:
2910 case Hexagon::PS_fi:
2911 case Hexagon::PS_fia:
2912 case Hexagon::INLINEASM:
2915 case Hexagon::L2_ploadrbt_io:
2916 case Hexagon::L2_ploadrbf_io:
2917 case Hexagon::L2_ploadrubt_io:
2918 case Hexagon::L2_ploadrubf_io:
2919 case Hexagon::S2_pstorerbt_io:
2920 case Hexagon::S2_pstorerbf_io:
2921 return isUInt<6>(
Offset);
2923 case Hexagon::L2_ploadrht_io:
2924 case Hexagon::L2_ploadrhf_io:
2925 case Hexagon::L2_ploadruht_io:
2926 case Hexagon::L2_ploadruhf_io:
2927 case Hexagon::S2_pstorerht_io:
2928 case Hexagon::S2_pstorerhf_io:
2929 return isShiftedUInt<6,1>(
Offset);
2931 case Hexagon::L2_ploadrit_io:
2932 case Hexagon::L2_ploadrif_io:
2933 case Hexagon::S2_pstorerit_io:
2934 case Hexagon::S2_pstorerif_io:
2935 return isShiftedUInt<6,2>(
Offset);
2937 case Hexagon::L2_ploadrdt_io:
2938 case Hexagon::L2_ploadrdf_io:
2939 case Hexagon::S2_pstorerdt_io:
2940 case Hexagon::S2_pstorerdf_io:
2941 return isShiftedUInt<6,3>(
Offset);
2943 case Hexagon::L2_loadbsw2_io:
2944 case Hexagon::L2_loadbzw2_io:
2945 return isShiftedInt<11,1>(
Offset);
2947 case Hexagon::L2_loadbsw4_io:
2948 case Hexagon::L2_loadbzw4_io:
2949 return isShiftedInt<11,2>(
Offset);
2952 dbgs() <<
"Failed Opcode is : " << Opcode <<
" (" <<
getName(Opcode)
2955 "Please define it in the above switch statement!");
2985 switch (
MI.getOpcode()) {
2987 case Hexagon::L2_loadrub_io:
2988 case Hexagon::L4_loadrub_ur:
2989 case Hexagon::L4_loadrub_ap:
2990 case Hexagon::L2_loadrub_pr:
2991 case Hexagon::L2_loadrub_pbr:
2992 case Hexagon::L2_loadrub_pi:
2993 case Hexagon::L2_loadrub_pci:
2994 case Hexagon::L2_loadrub_pcr:
2995 case Hexagon::L2_loadbzw2_io:
2996 case Hexagon::L4_loadbzw2_ur:
2997 case Hexagon::L4_loadbzw2_ap:
2998 case Hexagon::L2_loadbzw2_pr:
2999 case Hexagon::L2_loadbzw2_pbr:
3000 case Hexagon::L2_loadbzw2_pi:
3001 case Hexagon::L2_loadbzw2_pci:
3002 case Hexagon::L2_loadbzw2_pcr:
3003 case Hexagon::L2_loadbzw4_io:
3004 case Hexagon::L4_loadbzw4_ur:
3005 case Hexagon::L4_loadbzw4_ap:
3006 case Hexagon::L2_loadbzw4_pr:
3007 case Hexagon::L2_loadbzw4_pbr:
3008 case Hexagon::L2_loadbzw4_pi:
3009 case Hexagon::L2_loadbzw4_pci:
3010 case Hexagon::L2_loadbzw4_pcr:
3011 case Hexagon::L4_loadrub_rr:
3012 case Hexagon::L2_ploadrubt_io:
3013 case Hexagon::L2_ploadrubt_pi:
3014 case Hexagon::L2_ploadrubf_io:
3015 case Hexagon::L2_ploadrubf_pi:
3016 case Hexagon::L2_ploadrubtnew_io:
3017 case Hexagon::L2_ploadrubfnew_io:
3018 case Hexagon::L4_ploadrubt_rr:
3019 case Hexagon::L4_ploadrubf_rr:
3020 case Hexagon::L4_ploadrubtnew_rr:
3021 case Hexagon::L4_ploadrubfnew_rr:
3022 case Hexagon::L2_ploadrubtnew_pi:
3023 case Hexagon::L2_ploadrubfnew_pi:
3024 case Hexagon::L4_ploadrubt_abs:
3025 case Hexagon::L4_ploadrubf_abs:
3026 case Hexagon::L4_ploadrubtnew_abs:
3027 case Hexagon::L4_ploadrubfnew_abs:
3028 case Hexagon::L2_loadrubgp:
3030 case Hexagon::L2_loadruh_io:
3031 case Hexagon::L4_loadruh_ur:
3032 case Hexagon::L4_loadruh_ap:
3033 case Hexagon::L2_loadruh_pr:
3034 case Hexagon::L2_loadruh_pbr:
3035 case Hexagon::L2_loadruh_pi:
3036 case Hexagon::L2_loadruh_pci:
3037 case Hexagon::L2_loadruh_pcr:
3038 case Hexagon::L4_loadruh_rr:
3039 case Hexagon::L2_ploadruht_io:
3040 case Hexagon::L2_ploadruht_pi:
3041 case Hexagon::L2_ploadruhf_io:
3042 case Hexagon::L2_ploadruhf_pi:
3043 case Hexagon::L2_ploadruhtnew_io:
3044 case Hexagon::L2_ploadruhfnew_io:
3045 case Hexagon::L4_ploadruht_rr:
3046 case Hexagon::L4_ploadruhf_rr:
3047 case Hexagon::L4_ploadruhtnew_rr:
3048 case Hexagon::L4_ploadruhfnew_rr:
3049 case Hexagon::L2_ploadruhtnew_pi:
3050 case Hexagon::L2_ploadruhfnew_pi:
3051 case Hexagon::L4_ploadruht_abs:
3052 case Hexagon::L4_ploadruhf_abs:
3053 case Hexagon::L4_ploadruhtnew_abs:
3054 case Hexagon::L4_ploadruhfnew_abs:
3055 case Hexagon::L2_loadruhgp:
3076 OffsetIsScalable =
false;
3078 if (!BaseOp || !BaseOp->
isReg())
3087 if (Second.
mayStore() &&
First.getOpcode() == Hexagon::S2_allocframe) {
3089 if (
Op.isReg() &&
Op.isUse() &&
Op.getReg() == Hexagon::R29)
3099 if (!Stored.
isReg())
3101 for (
unsigned i = 0, e =
First.getNumOperands(); i < e; ++i) {
3103 if (
Op.isReg() &&
Op.isDef() &&
Op.getReg() == Stored.
getReg())
3112 return Opc == Hexagon::PS_call_nr || Opc == Hexagon::PS_callr_nr;
3128 if (Hexagon::getRegForm(
MI.getOpcode()) >= 0)
3131 if (
MI.getDesc().mayLoad() ||
MI.getDesc().mayStore()) {
3138 NonExtOpcode = Hexagon::changeAddrMode_abs_io(
MI.getOpcode());
3144 NonExtOpcode = Hexagon::changeAddrMode_io_rr(
MI.getOpcode());
3147 NonExtOpcode = Hexagon::changeAddrMode_ur_rr(
MI.getOpcode());
3152 if (NonExtOpcode < 0)
3160 return Hexagon::getRealHWInstr(
MI.getOpcode(),
3161 Hexagon::InstrType_Pseudo) >= 0;
3218 if (!MII->isBundle())
3221 for (++MII; MII != MIE && MII->isInsideBundle(); ++MII) {
3233 if (MO.isRegMask() && MO.clobbersPhysReg(PredReg))
3235 if (MO.isReg() && MO.isDef() && MO.isImplicit() && (MO.getReg() == PredReg))
3241 switch (
MI.getOpcode()) {
3242 case Hexagon::A4_addp_c:
3243 case Hexagon::A4_subp_c:
3244 case Hexagon::A4_tlbmatch:
3245 case Hexagon::A5_ACS:
3246 case Hexagon::F2_sfinvsqrta:
3247 case Hexagon::F2_sfrecipa:
3248 case Hexagon::J2_endloop0:
3249 case Hexagon::J2_endloop01:
3250 case Hexagon::J2_ploop1si:
3251 case Hexagon::J2_ploop1sr:
3252 case Hexagon::J2_ploop2si:
3253 case Hexagon::J2_ploop2sr:
3254 case Hexagon::J2_ploop3si:
3255 case Hexagon::J2_ploop3sr:
3256 case Hexagon::S2_cabacdecbin:
3257 case Hexagon::S2_storew_locked:
3258 case Hexagon::S4_stored_locked:
3265 return Opcode == Hexagon::J2_jumpt ||
3266 Opcode == Hexagon::J2_jumptpt ||
3267 Opcode == Hexagon::J2_jumpf ||
3268 Opcode == Hexagon::J2_jumpfpt ||
3269 Opcode == Hexagon::J2_jumptnew ||
3270 Opcode == Hexagon::J2_jumpfnew ||
3271 Opcode == Hexagon::J2_jumptnewpt ||
3272 Opcode == Hexagon::J2_jumpfnewpt;
3301 unsigned BasePos = 0, OffsetPos = 0;
3311 if (!OffsetOp.
isImm())
3324 unsigned &BasePos,
unsigned &OffsetPos)
const {
3332 }
else if (
MI.mayStore()) {
3335 }
else if (
MI.mayLoad()) {
3350 if (!
MI.getOperand(BasePos).isReg() || !
MI.getOperand(OffsetPos).isImm())
3389 while (
I->isDebugInstr()) {
3394 if (!isUnpredicatedTerminator(*
I))
3403 if (&*
I != LastInst && !
I->isBundle() && isUnpredicatedTerminator(*
I)) {
3404 if (!SecondLastInst) {
3405 SecondLastInst = &*
I;
3427 Register DstReg, SrcReg, Src1Reg, Src2Reg;
3429 switch (
MI.getOpcode()) {
3438 case Hexagon::C2_cmpeq:
3439 case Hexagon::C2_cmpgt:
3440 case Hexagon::C2_cmpgtu:
3441 DstReg =
MI.getOperand(0).getReg();
3442 Src1Reg =
MI.getOperand(1).getReg();
3443 Src2Reg =
MI.getOperand(2).getReg();
3444 if (Hexagon::PredRegsRegClass.
contains(DstReg) &&
3445 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3449 case Hexagon::C2_cmpeqi:
3450 case Hexagon::C2_cmpgti:
3451 case Hexagon::C2_cmpgtui:
3453 DstReg =
MI.getOperand(0).getReg();
3454 SrcReg =
MI.getOperand(1).getReg();
3455 if (Hexagon::PredRegsRegClass.
contains(DstReg) &&
3456 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3458 ((isUInt<5>(
MI.getOperand(2).getImm())) ||
3459 (
MI.getOperand(2).getImm() == -1)))
3462 case Hexagon::A2_tfr:
3464 DstReg =
MI.getOperand(0).getReg();
3465 SrcReg =
MI.getOperand(1).getReg();
3469 case Hexagon::A2_tfrsi:
3473 DstReg =
MI.getOperand(0).getReg();
3477 case Hexagon::S2_tstbit_i:
3478 DstReg =
MI.getOperand(0).getReg();
3479 Src1Reg =
MI.getOperand(1).getReg();
3480 if (Hexagon::PredRegsRegClass.
contains(DstReg) &&
3481 (Hexagon::P0 == DstReg || Hexagon::P1 == DstReg) &&
3482 MI.getOperand(2).isImm() &&
3490 case Hexagon::J2_jumptnew:
3491 case Hexagon::J2_jumpfnew:
3492 case Hexagon::J2_jumptnewpt:
3493 case Hexagon::J2_jumpfnewpt:
3494 Src1Reg =
MI.getOperand(0).getReg();
3495 if (Hexagon::PredRegsRegClass.
contains(Src1Reg) &&
3496 (Hexagon::P0 == Src1Reg || Hexagon::P1 == Src1Reg))
3503 case Hexagon::J2_jump:
3504 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
3505 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
3517 if ((GA.
getOpcode() != Hexagon::C2_cmpeqi) ||
3518 (GB.
getOpcode() != Hexagon::J2_jumptnew))
3523 if (DestReg != Hexagon::P0 && DestReg != Hexagon::P1)
3531 return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqn1_tp0_jump_nt
3532 : Hexagon::J4_cmpeqn1_tp1_jump_nt;
3535 return DestReg == Hexagon::P0 ? Hexagon::J4_cmpeqi_tp0_jump_nt
3536 : Hexagon::J4_cmpeqi_tp1_jump_nt;
3541 bool ForBigCore)
const {
3549 static const std::map<unsigned, unsigned> DupMap = {
3550 {Hexagon::A2_add, Hexagon::dup_A2_add},
3551 {Hexagon::A2_addi, Hexagon::dup_A2_addi},
3552 {Hexagon::A2_andir, Hexagon::dup_A2_andir},
3553 {Hexagon::A2_combineii, Hexagon::dup_A2_combineii},
3554 {Hexagon::A2_sxtb, Hexagon::dup_A2_sxtb},
3555 {Hexagon::A2_sxth, Hexagon::dup_A2_sxth},
3556 {Hexagon::A2_tfr, Hexagon::dup_A2_tfr},
3557 {Hexagon::A2_tfrsi, Hexagon::dup_A2_tfrsi},
3558 {Hexagon::A2_zxtb, Hexagon::dup_A2_zxtb},
3559 {Hexagon::A2_zxth, Hexagon::dup_A2_zxth},
3560 {Hexagon::A4_combineii, Hexagon::dup_A4_combineii},
3561 {Hexagon::A4_combineir, Hexagon::dup_A4_combineir},
3562 {Hexagon::A4_combineri, Hexagon::dup_A4_combineri},
3563 {Hexagon::C2_cmoveif, Hexagon::dup_C2_cmoveif},
3564 {Hexagon::C2_cmoveit, Hexagon::dup_C2_cmoveit},
3565 {Hexagon::C2_cmovenewif, Hexagon::dup_C2_cmovenewif},
3566 {Hexagon::C2_cmovenewit, Hexagon::dup_C2_cmovenewit},
3567 {Hexagon::C2_cmpeqi, Hexagon::dup_C2_cmpeqi},
3568 {Hexagon::L2_deallocframe, Hexagon::dup_L2_deallocframe},
3569 {Hexagon::L2_loadrb_io, Hexagon::dup_L2_loadrb_io},
3570 {Hexagon::L2_loadrd_io, Hexagon::dup_L2_loadrd_io},
3571 {Hexagon::L2_loadrh_io, Hexagon::dup_L2_loadrh_io},
3572 {Hexagon::L2_loadri_io, Hexagon::dup_L2_loadri_io},
3573 {Hexagon::L2_loadrub_io, Hexagon::dup_L2_loadrub_io},
3574 {Hexagon::L2_loadruh_io, Hexagon::dup_L2_loadruh_io},
3575 {Hexagon::S2_allocframe, Hexagon::dup_S2_allocframe},
3576 {Hexagon::S2_storerb_io, Hexagon::dup_S2_storerb_io},
3577 {Hexagon::S2_storerd_io, Hexagon::dup_S2_storerd_io},
3578 {Hexagon::S2_storerh_io, Hexagon::dup_S2_storerh_io},
3579 {Hexagon::S2_storeri_io, Hexagon::dup_S2_storeri_io},
3580 {Hexagon::S4_storeirb_io, Hexagon::dup_S4_storeirb_io},
3581 {Hexagon::S4_storeiri_io, Hexagon::dup_S4_storeiri_io},
3583 unsigned OpNum =
MI.getOpcode();
3586 auto Iter = DupMap.find(OpNum);
3587 if (Iter != DupMap.end())
3588 return Iter->second;
3590 for (
const auto &Iter : DupMap)
3591 if (Iter.second == OpNum)
3598 enum Hexagon::PredSense inPredSense;
3599 inPredSense = invertPredicate ? Hexagon::PredSense_false :
3600 Hexagon::PredSense_true;
3601 int CondOpcode = Hexagon::getPredOpcode(Opc, inPredSense);
3602 if (CondOpcode >= 0)
3610 switch (
MI.getOpcode()) {
3612 case Hexagon::V6_vL32b_pi:
3613 return Hexagon::V6_vL32b_cur_pi;
3614 case Hexagon::V6_vL32b_ai:
3615 return Hexagon::V6_vL32b_cur_ai;
3616 case Hexagon::V6_vL32b_nt_pi:
3617 return Hexagon::V6_vL32b_nt_cur_pi;
3618 case Hexagon::V6_vL32b_nt_ai:
3619 return Hexagon::V6_vL32b_nt_cur_ai;
3620 case Hexagon::V6_vL32b_ppu:
3621 return Hexagon::V6_vL32b_cur_ppu;
3622 case Hexagon::V6_vL32b_nt_ppu:
3623 return Hexagon::V6_vL32b_nt_cur_ppu;
3630 switch (
MI.getOpcode()) {
3632 case Hexagon::V6_vL32b_cur_pi:
3633 return Hexagon::V6_vL32b_pi;
3634 case Hexagon::V6_vL32b_cur_ai:
3635 return Hexagon::V6_vL32b_ai;
3636 case Hexagon::V6_vL32b_nt_cur_pi:
3637 return Hexagon::V6_vL32b_nt_pi;
3638 case Hexagon::V6_vL32b_nt_cur_ai:
3639 return Hexagon::V6_vL32b_nt_ai;
3640 case Hexagon::V6_vL32b_cur_ppu:
3641 return Hexagon::V6_vL32b_ppu;
3642 case Hexagon::V6_vL32b_nt_cur_ppu:
3643 return Hexagon::V6_vL32b_nt_ppu;
3731 int NVOpcode = Hexagon::getNewValueOpcode(
MI.getOpcode());
3735 switch (
MI.getOpcode()) {
3738 std::to_string(
MI.getOpcode()));
3739 case Hexagon::S4_storerb_ur:
3740 return Hexagon::S4_storerbnew_ur;
3742 case Hexagon::S2_storerb_pci:
3743 return Hexagon::S2_storerb_pci;
3745 case Hexagon::S2_storeri_pci:
3746 return Hexagon::S2_storeri_pci;
3748 case Hexagon::S2_storerh_pci:
3749 return Hexagon::S2_storerh_pci;
3751 case Hexagon::S2_storerd_pci:
3752 return Hexagon::S2_storerd_pci;
3754 case Hexagon::S2_storerf_pci:
3755 return Hexagon::S2_storerf_pci;
3757 case Hexagon::V6_vS32b_ai:
3758 return Hexagon::V6_vS32b_new_ai;
3760 case Hexagon::V6_vS32b_pi:
3761 return Hexagon::V6_vS32b_new_pi;
3786 if (BrTarget.
isMBB()) {
3788 Taken = getEdgeProbability(Src, Dst) >= OneHalf;
3801 bool SawCond =
false, Bad =
false;
3805 if (
I.isConditionalBranch()) {
3812 if (
I.isUnconditionalBranch() && !SawCond) {
3820 if (NextIt ==
B.instr_end()) {
3823 if (!
B.isLayoutSuccessor(SB))
3825 Taken = getEdgeProbability(Src, SB) < OneHalf;
3829 assert(NextIt->isUnconditionalBranch());
3838 Taken =
BT && getEdgeProbability(Src,
BT) < OneHalf;
3845 switch (
MI.getOpcode()) {
3846 case Hexagon::J2_jumpt:
3847 return Taken ? Hexagon::J2_jumptnewpt : Hexagon::J2_jumptnew;
3848 case Hexagon::J2_jumpf:
3849 return Taken ? Hexagon::J2_jumpfnewpt : Hexagon::J2_jumpfnew;
3859 switch (
MI.getOpcode()) {
3861 case Hexagon::J2_jumpt:
3862 case Hexagon::J2_jumpf:
3866 int NewOpcode = Hexagon::getPredNewOpcode(
MI.getOpcode());
3873 int NewOp =
MI.getOpcode();
3875 NewOp = Hexagon::getPredOldOpcode(NewOp);
3879 if (!Subtarget.hasFeature(Hexagon::ArchV60)) {
3881 case Hexagon::J2_jumptpt:
3882 NewOp = Hexagon::J2_jumpt;
3884 case Hexagon::J2_jumpfpt:
3885 NewOp = Hexagon::J2_jumpf;
3887 case Hexagon::J2_jumprtpt:
3888 NewOp = Hexagon::J2_jumprt;
3890 case Hexagon::J2_jumprfpt:
3891 NewOp = Hexagon::J2_jumprf;
3896 "Couldn't change predicate new instruction to its old form.");
3900 NewOp = Hexagon::getNonNVStore(NewOp);
3901 assert(NewOp >= 0 &&
"Couldn't change new-value store to its old form.");
3909 case Hexagon::J2_jumpfpt:
3910 return Hexagon::J2_jumpf;
3911 case Hexagon::J2_jumptpt:
3912 return Hexagon::J2_jumpt;
3913 case Hexagon::J2_jumprfpt:
3914 return Hexagon::J2_jumprf;
3915 case Hexagon::J2_jumprtpt:
3916 return Hexagon::J2_jumprt;
3925 Register DstReg, SrcReg, Src1Reg, Src2Reg;
3928 switch (
MI.getOpcode()) {
3936 case Hexagon::L2_loadri_io:
3937 case Hexagon::dup_L2_loadri_io:
3938 DstReg =
MI.getOperand(0).getReg();
3939 SrcReg =
MI.getOperand(1).getReg();
3943 if (Hexagon::IntRegsRegClass.
contains(SrcReg) &&
3945 MI.getOperand(2).isImm() &&
3946 isShiftedUInt<5,2>(
MI.getOperand(2).getImm()))
3950 (
MI.getOperand(2).isImm() &&
3951 isShiftedUInt<4,2>(
MI.getOperand(2).getImm())))
3955 case Hexagon::L2_loadrub_io:
3956 case Hexagon::dup_L2_loadrub_io:
3958 DstReg =
MI.getOperand(0).getReg();
3959 SrcReg =
MI.getOperand(1).getReg();
3961 MI.getOperand(2).isImm() && isUInt<4>(
MI.getOperand(2).getImm()))
3974 case Hexagon::L2_loadrh_io:
3975 case Hexagon::L2_loadruh_io:
3976 case Hexagon::dup_L2_loadrh_io:
3977 case Hexagon::dup_L2_loadruh_io:
3979 DstReg =
MI.getOperand(0).getReg();
3980 SrcReg =
MI.getOperand(1).getReg();
3982 MI.getOperand(2).isImm() &&
3983 isShiftedUInt<3,1>(
MI.getOperand(2).getImm()))
3986 case Hexagon::L2_loadrb_io:
3987 case Hexagon::dup_L2_loadrb_io:
3989 DstReg =
MI.getOperand(0).getReg();
3990 SrcReg =
MI.getOperand(1).getReg();
3992 MI.getOperand(2).isImm() &&
3993 isUInt<3>(
MI.getOperand(2).getImm()))
3996 case Hexagon::L2_loadrd_io:
3997 case Hexagon::dup_L2_loadrd_io:
3999 DstReg =
MI.getOperand(0).getReg();
4000 SrcReg =
MI.getOperand(1).getReg();
4002 Hexagon::IntRegsRegClass.
contains(SrcReg) &&
4004 MI.getOperand(2).isImm() &&
4005 isShiftedUInt<5,3>(
MI.getOperand(2).getImm()))
4010 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4:
4011 case Hexagon::RESTORE_DEALLOC_RET_JMP_V4_PIC:
4012 case Hexagon::L4_return:
4013 case Hexagon::L2_deallocframe:
4014 case Hexagon::dup_L2_deallocframe:
4016 case Hexagon::EH_RETURN_JMPR:
4017 case Hexagon::PS_jmpret:
4018 case Hexagon::SL2_jumpr31:
4021 DstReg =
MI.getOperand(0).getReg();
4022 if (Hexagon::IntRegsRegClass.
contains(DstReg) && (Hexagon::R31 == DstReg))
4025 case Hexagon::PS_jmprett:
4026 case Hexagon::PS_jmpretf:
4027 case Hexagon::PS_jmprettnewpt:
4028 case Hexagon::PS_jmpretfnewpt:
4029 case Hexagon::PS_jmprettnew:
4030 case Hexagon::PS_jmpretfnew:
4031 case Hexagon::SL2_jumpr31_t:
4032 case Hexagon::SL2_jumpr31_f:
4033 case Hexagon::SL2_jumpr31_tnew:
4034 case Hexagon::SL2_jumpr31_fnew:
4035 DstReg =
MI.getOperand(1).getReg();
4036 SrcReg =
MI.getOperand(0).getReg();
4038 if ((Hexagon::PredRegsRegClass.
contains(SrcReg) &&
4039 (Hexagon::P0 == SrcReg)) &&
4040 (Hexagon::IntRegsRegClass.
contains(DstReg) && (Hexagon::R31 == DstReg)))
4043 case Hexagon::L4_return_t:
4044 case Hexagon::L4_return_f:
4045 case Hexagon::L4_return_tnew_pnt:
4046 case Hexagon::L4_return_fnew_pnt:
4047 case Hexagon::L4_return_tnew_pt:
4048 case Hexagon::L4_return_fnew_pt:
4050 SrcReg =
MI.getOperand(0).getReg();
4051 if (Hexagon::PredRegsRegClass.
contains(SrcReg) && (Hexagon::P0 == SrcReg))
4059 case Hexagon::S2_storeri_io:
4060 case Hexagon::dup_S2_storeri_io:
4063 Src1Reg =
MI.getOperand(0).getReg();
4064 Src2Reg =
MI.getOperand(2).getReg();
4065 if (Hexagon::IntRegsRegClass.
contains(Src1Reg) &&
4068 isShiftedUInt<5,2>(
MI.getOperand(1).getImm()))
4072 MI.getOperand(1).isImm() &&
4073 isShiftedUInt<4,2>(
MI.getOperand(1).getImm()))
4076 case Hexagon::S2_storerb_io:
4077 case Hexagon::dup_S2_storerb_io:
4079 Src1Reg =
MI.getOperand(0).getReg();
4080 Src2Reg =
MI.getOperand(2).getReg();
4082 MI.getOperand(1).isImm() && isUInt<4>(
MI.getOperand(1).getImm()))
4094 case Hexagon::S2_storerh_io:
4095 case Hexagon::dup_S2_storerh_io:
4097 Src1Reg =
MI.getOperand(0).getReg();
4098 Src2Reg =
MI.getOperand(2).getReg();
4100 MI.getOperand(1).isImm() &&
4101 isShiftedUInt<3,1>(
MI.getOperand(1).getImm()))
4104 case Hexagon::S2_storerd_io:
4105 case Hexagon::dup_S2_storerd_io:
4107 Src1Reg =
MI.getOperand(0).getReg();
4108 Src2Reg =
MI.getOperand(2).getReg();
4110 Hexagon::IntRegsRegClass.
contains(Src1Reg) &&
4112 isShiftedInt<6,3>(
MI.getOperand(1).getImm()))
4115 case Hexagon::S4_storeiri_io:
4116 case Hexagon::dup_S4_storeiri_io:
4118 Src1Reg =
MI.getOperand(0).getReg();
4120 isShiftedUInt<4,2>(
MI.getOperand(1).getImm()) &&
4121 MI.getOperand(2).isImm() && isUInt<1>(
MI.getOperand(2).getImm()))
4124 case Hexagon::S4_storeirb_io:
4125 case Hexagon::dup_S4_storeirb_io:
4127 Src1Reg =
MI.getOperand(0).getReg();
4129 MI.getOperand(1).isImm() && isUInt<4>(
MI.getOperand(1).getImm()) &&
4130 MI.getOperand(2).isImm() && isUInt<1>(
MI.getOperand(2).getImm()))
4133 case Hexagon::S2_allocframe:
4134 case Hexagon::dup_S2_allocframe:
4135 if (
MI.getOperand(2).isImm() &&
4136 isShiftedUInt<5,3>(
MI.getOperand(2).getImm()))
4157 case Hexagon::A2_addi:
4158 case Hexagon::dup_A2_addi:
4159 DstReg =
MI.getOperand(0).getReg();
4160 SrcReg =
MI.getOperand(1).getReg();
4163 if (Hexagon::IntRegsRegClass.
contains(SrcReg) &&
4165 isShiftedUInt<6,2>(
MI.getOperand(2).getImm()))
4168 if ((DstReg == SrcReg) &&
MI.getOperand(2).isImm() &&
4169 isInt<7>(
MI.getOperand(2).getImm()))
4174 ((
MI.getOperand(2).getImm() == 1) ||
4175 (
MI.getOperand(2).getImm() == -1)))
4179 case Hexagon::A2_add:
4180 case Hexagon::dup_A2_add:
4182 DstReg =
MI.getOperand(0).getReg();
4183 Src1Reg =
MI.getOperand(1).getReg();
4184 Src2Reg =
MI.getOperand(2).getReg();
4189 case Hexagon::A2_andir:
4190 case Hexagon::dup_A2_andir:
4194 DstReg =
MI.getOperand(0).getReg();
4195 SrcReg =
MI.getOperand(1).getReg();
4197 MI.getOperand(2).isImm() &&
4198 ((
MI.getOperand(2).getImm() == 1) ||
4199 (
MI.getOperand(2).getImm() == 255)))
4202 case Hexagon::A2_tfr:
4203 case Hexagon::dup_A2_tfr:
4205 DstReg =
MI.getOperand(0).getReg();
4206 SrcReg =
MI.getOperand(1).getReg();
4210 case Hexagon::A2_tfrsi:
4211 case Hexagon::dup_A2_tfrsi:
4216 DstReg =
MI.getOperand(0).getReg();
4220 case Hexagon::C2_cmoveit:
4221 case Hexagon::C2_cmovenewit:
4222 case Hexagon::C2_cmoveif:
4223 case Hexagon::C2_cmovenewif:
4224 case Hexagon::dup_C2_cmoveit:
4225 case Hexagon::dup_C2_cmovenewit:
4226 case Hexagon::dup_C2_cmoveif:
4227 case Hexagon::dup_C2_cmovenewif:
4231 DstReg =
MI.getOperand(0).getReg();
4232 SrcReg =
MI.getOperand(1).getReg();
4234 Hexagon::PredRegsRegClass.
contains(SrcReg) && Hexagon::P0 == SrcReg &&
4235 MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0)
4238 case Hexagon::C2_cmpeqi:
4239 case Hexagon::dup_C2_cmpeqi:
4241 DstReg =
MI.getOperand(0).getReg();
4242 SrcReg =
MI.getOperand(1).getReg();
4243 if (Hexagon::PredRegsRegClass.
contains(DstReg) &&
4245 MI.getOperand(2).isImm() && isUInt<2>(
MI.getOperand(2).getImm()))
4248 case Hexagon::A2_combineii:
4249 case Hexagon::A4_combineii:
4250 case Hexagon::dup_A2_combineii:
4251 case Hexagon::dup_A4_combineii:
4253 DstReg =
MI.getOperand(0).getReg();
4255 ((
MI.getOperand(1).isImm() && isUInt<2>(
MI.getOperand(1).getImm())) ||
4256 (
MI.getOperand(1).isGlobal() &&
4257 isUInt<2>(
MI.getOperand(1).getOffset()))) &&
4258 ((
MI.getOperand(2).isImm() && isUInt<2>(
MI.getOperand(2).getImm())) ||
4259 (
MI.getOperand(2).isGlobal() &&
4260 isUInt<2>(
MI.getOperand(2).getOffset()))))
4263 case Hexagon::A4_combineri:
4264 case Hexagon::dup_A4_combineri:
4267 DstReg =
MI.getOperand(0).getReg();
4268 SrcReg =
MI.getOperand(1).getReg();
4270 ((
MI.getOperand(2).isImm() &&
MI.getOperand(2).getImm() == 0) ||
4271 (
MI.getOperand(2).isGlobal() &&
MI.getOperand(2).getOffset() == 0)))
4274 case Hexagon::A4_combineir:
4275 case Hexagon::dup_A4_combineir:
4277 DstReg =
MI.getOperand(0).getReg();
4278 SrcReg =
MI.getOperand(2).getReg();
4280 ((
MI.getOperand(1).isImm() &&
MI.getOperand(1).getImm() == 0) ||
4281 (
MI.getOperand(1).isGlobal() &&
MI.getOperand(1).getOffset() == 0)))
4284 case Hexagon::A2_sxtb:
4285 case Hexagon::A2_sxth:
4286 case Hexagon::A2_zxtb:
4287 case Hexagon::A2_zxth:
4288 case Hexagon::dup_A2_sxtb:
4289 case Hexagon::dup_A2_sxth:
4290 case Hexagon::dup_A2_zxtb:
4291 case Hexagon::dup_A2_zxth:
4293 DstReg =
MI.getOperand(0).getReg();
4294 SrcReg =
MI.getOperand(1).getReg();
4304 return Hexagon::getRealHWInstr(
MI.getOpcode(), Hexagon::InstrType_Real);
4314 if (
MI.isTransient())
4338 int Idx =
DefMI.findRegisterDefOperandIdx(SR, &HRI,
false,
false);
4349 int Idx =
UseMI.findRegisterUseOperandIdx(SR, &HRI,
false);
4376 Cond[0].setImm(Opc);
4383 : Hexagon::getTruePredOpcode(Opc);
4384 if (InvPredOpcode >= 0)
4385 return InvPredOpcode;
4399 return ~(-1U << (bits - 1));
4401 return ~(-1U << bits);
4406 switch (
MI.getOpcode()) {
4407 case Hexagon::L2_loadrbgp:
4408 case Hexagon::L2_loadrdgp:
4409 case Hexagon::L2_loadrhgp:
4410 case Hexagon::L2_loadrigp:
4411 case Hexagon::L2_loadrubgp:
4412 case Hexagon::L2_loadruhgp:
4413 case Hexagon::S2_storerbgp:
4414 case Hexagon::S2_storerbnewgp:
4415 case Hexagon::S2_storerhgp:
4416 case Hexagon::S2_storerhnewgp:
4417 case Hexagon::S2_storerigp:
4418 case Hexagon::S2_storerinewgp:
4419 case Hexagon::S2_storerdgp:
4420 case Hexagon::S2_storerfgp:
4438 if (
MI.getOpcode() == Hexagon::A4_ext)
4452 bool ToBigInstrs)
const {
4464 MII->setDesc(
get(Opcode));
4470 bool ToBigInstrs)
const {
4473 End = MB.instr_end();
4474 Instr !=
End; ++Instr)
4482 while ((MII !=
MBB->
instr_end()) && MII->isInsideBundle()) {
4489 using namespace HexagonII;
4492 unsigned S = (
F >> MemAccessSizePos) & MemAccesSizeMask;
4493 unsigned Size = getMemAccessSizeInBytes(MemAccessSize(S));
4497 if (
MI.getOpcode() == Hexagon::Y2_dcfetchbo)
4504 return HRI.getSpillSize(Hexagon::HvxVRRegClass);
4519 return -1U << (bits - 1);
4528 short NonExtOpcode = Hexagon::getRegForm(
MI.getOpcode());
4529 if (NonExtOpcode >= 0)
4530 return NonExtOpcode;
4532 if (
MI.getDesc().mayLoad() ||
MI.getDesc().mayStore()) {
4536 return Hexagon::changeAddrMode_abs_io(
MI.getOpcode());
4538 return Hexagon::changeAddrMode_io_rr(
MI.getOpcode());
4540 return Hexagon::changeAddrMode_ur_rr(
MI.getOpcode());
4550 Register &PredReg,
unsigned &PredRegPos,
unsigned &PredRegFlags)
const {
4558 PredReg =
Cond[1].getReg();
4562 if (
Cond[1].isImplicit())
4570 return Hexagon::getRealHWInstr(
MI.getOpcode(), Hexagon::InstrType_Pseudo);
4574 return Hexagon::getRegForm(
MI.getOpcode());
4582 if (
MI.isDebugInstr() ||
MI.isPosition())
4585 unsigned Size =
MI.getDesc().getSize();
4601 unsigned NumDefs = 0;
4602 for (;
MI.getOperand(NumDefs).
isReg() &&
MI.getOperand(NumDefs).isDef();
4604 assert(NumDefs !=
MI.getNumOperands()-2 &&
"No asm string?");
4606 assert(
MI.getOperand(NumDefs).isSymbol() &&
"No asm string?");
4608 const char *AsmStr =
MI.getOperand(NumDefs).getSymbolName();
4622 const InstrStage &IS = *
II.beginStage(
MI.getDesc().getSchedClass());
4634 assert(BundleHead->isBundle() &&
"Not a bundle header");
4644 "Instruction must be extendable");
4650 "Branch with unknown extendable field type");
4662 int TargetPos =
MI.getNumOperands() - 1;
4665 while ((TargetPos > -1) && !
MI.getOperand(TargetPos).isMBB())
4667 assert((TargetPos >= 0) &&
MI.getOperand(TargetPos).isMBB());
4668 MI.getOperand(TargetPos).setMBB(NewTarget);
4672 MI.setDesc(
get(NewOpcode));
4684 for (
unsigned insn = TargetOpcode::GENERIC_OP_END+1;
4685 insn < Hexagon::INSTRUCTION_LIST_END; ++insn) {
4706 int PredRevOpcode = -1;
4708 PredRevOpcode = Hexagon::notTakenBranchPrediction(Opcode);
4710 PredRevOpcode = Hexagon::takenBranchPrediction(Opcode);
4711 assert(PredRevOpcode > 0);
4712 return PredRevOpcode;
4718 return Cond.empty() || (
Cond[0].isImm() && (
Cond.size() != 1));
4725 if (Operand.
isImm())
4726 Operand.
setImm(Operand.
getImm() | memShufDisabledMask);
4734 return (Operand.
isImm() && (Operand.
getImm() & memShufDisabledMask) != 0);
4739 return Opc >= 0 ? Hexagon::changeAddrMode_abs_io(Opc) : Opc;
4743 return Opc >= 0 ? Hexagon::changeAddrMode_io_abs(Opc) : Opc;
4747 return Opc >= 0 ? Hexagon::changeAddrMode_io_pi(Opc) : Opc;
4751 return Opc >= 0 ? Hexagon::changeAddrMode_io_rr(Opc) : Opc;
4755 return Opc >= 0 ? Hexagon::changeAddrMode_pi_io(Opc) : Opc;
4759 return Opc >= 0 ? Hexagon::changeAddrMode_rr_io(Opc) : Opc;
4763 return Opc >= 0 ? Hexagon::changeAddrMode_rr_ur(Opc) : Opc;
4767 return Opc >= 0 ? Hexagon::changeAddrMode_ur_rr(Opc) : Opc;
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
static bool mayAlias(MachineInstr &MIa, SmallVectorImpl< MachineInstr * > &MemInsns, AliasAnalysis *AA)
static bool isConstant(const MachineInstr &MI)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
static bool isSigned(unsigned int Opcode)
Rewrite Partial Register Uses
const HexagonInstrInfo * TII
static bool isUndef(ArrayRef< int > Mask)
static cl::opt< bool > DisableNVSchedule("disable-hexagon-nv-schedule", cl::Hidden, cl::desc("Disable schedule adjustment for new value stores."))
const int Hexagon_MEMH_OFFSET_MAX
const int Hexagon_MEMB_OFFSET_MAX
const int Hexagon_MEMH_OFFSET_MIN
const int Hexagon_MEMD_OFFSET_MAX
static cl::opt< bool > EnableTimingClassLatency("enable-timing-class-latency", cl::Hidden, cl::init(false), cl::desc("Enable timing class latency"))
const int Hexagon_MEMD_OFFSET_MIN
const int Hexagon_ADDI_OFFSET_MAX
static cl::opt< bool > EnableACCForwarding("enable-acc-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec acc forwarding"))
static void getLiveInRegsAt(LivePhysRegs &Regs, const MachineInstr &MI)
const int Hexagon_MEMW_OFFSET_MAX
Constants for Hexagon instructions.
const int Hexagon_MEMW_OFFSET_MIN
cl::opt< bool > ScheduleInlineAsm("hexagon-sched-inline-asm", cl::Hidden, cl::init(false), cl::desc("Do not consider inline-asm a scheduling/" "packetization boundary."))
const int Hexagon_ADDI_OFFSET_MIN
static cl::opt< bool > BranchRelaxAsmLarge("branch-relax-asm-large", cl::init(true), cl::Hidden, cl::desc("branch relax asm"))
static void parseOperands(const MachineInstr &MI, SmallVectorImpl< Register > &Defs, SmallVectorImpl< Register > &Uses)
Gather register def/uses from MI.
static cl::opt< bool > EnableALUForwarding("enable-alu-forwarding", cl::Hidden, cl::init(true), cl::desc("Enable vec alu forwarding"))
const int Hexagon_MEMB_OFFSET_MIN
static unsigned nonDbgMICount(MachineBasicBlock::const_instr_iterator MIB, MachineBasicBlock::const_instr_iterator MIE)
Calculate number of instructions excluding the debug instructions.
static cl::opt< bool > EnableBranchPrediction("hexagon-enable-branch-prediction", cl::Hidden, cl::init(true), cl::desc("Enable branch prediction"))
static bool isDblRegForSubInst(Register Reg, const HexagonRegisterInfo &HRI)
static void getLiveOutRegsAt(LivePhysRegs &Regs, const MachineInstr &MI)
static cl::opt< bool > UseDFAHazardRec("dfa-hazard-rec", cl::init(true), cl::Hidden, cl::desc("Use the DFA based hazard recognizer."))
static bool isIntRegForSubInst(Register Reg)
static bool isDuplexPairMatch(unsigned Ga, unsigned Gb)
#define HEXAGON_INSTR_SIZE
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
uint64_t IntrinsicInst * II
PassBuilder PB(Machine, PassOpts->PTO, std::nullopt, &PIC)
static StringRef getName(Value *V)
static bool isBranch(unsigned Opcode)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool isImm(const MachineOperand &MO, MachineRegisterInfo *MRI)
This file defines the SmallPtrSet class.
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This class represents an Operation in the Expression.
const Constant * getInitializer() const
getInitializer - Return the initializer for this global variable.
short getEquivalentHWInstr(const MachineInstr &MI) const
int getDuplexOpcode(const MachineInstr &MI, bool ForBigCore=true) const
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Remove the branching code at the end of the specific MBB.
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
bool isHVXMemWithAIndirect(const MachineInstr &I, const MachineInstr &J) const
short changeAddrMode_abs_io(short Opc) const
bool isRestrictNoSlot1Store(const MachineInstr &MI) const
short getRegForm(const MachineInstr &MI) const
bool isVecALU(const MachineInstr &MI) const
bool isCompoundBranchInstr(const MachineInstr &MI) const
bool isDuplexPair(const MachineInstr &MIa, const MachineInstr &MIb) const
Symmetrical. See if these two instructions are fit for duplex pair.
bool isJumpR(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
Decompose the machine operand's target flags into two values - the direct target flag value and any o...
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Store the specified register of the given register class to the specified stack frame index.
bool producesStall(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
bool invertAndChangeJumpTarget(MachineInstr &MI, MachineBasicBlock *NewTarget) const
bool isPredictedTaken(unsigned Opcode) const
bool isSaveCalleeSavedRegsCall(const MachineInstr &MI) const
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
TargetInstrInfo overrides.
unsigned nonDbgBundleSize(MachineBasicBlock::const_iterator BundleHead) const
int getDotNewPredOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
If the specified instruction defines any predicate or condition code register(s) used for predication...
unsigned getInvertedPredicatedOpcode(const int Opc) const
bool isPureSlot0(const MachineInstr &MI) const
bool doesNotReturn(const MachineInstr &CallMI) const
HexagonII::SubInstructionGroup getDuplexCandidateGroup(const MachineInstr &MI) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e....
bool getPredReg(ArrayRef< MachineOperand > Cond, Register &PredReg, unsigned &PredRegPos, unsigned &PredRegFlags) const
bool isPredicatedNew(const MachineInstr &MI) const
bool isSignExtendingLoad(const MachineInstr &MI) const
bool isVecAcc(const MachineInstr &MI) const
bool reversePredSense(MachineInstr &MI) const
unsigned getAddrMode(const MachineInstr &MI) const
MCInst getNop() const override
bool isJumpWithinBranchRange(const MachineInstr &MI, unsigned offset) const
bool mayBeNewStore(const MachineInstr &MI) const
bool isOperandExtended(const MachineInstr &MI, unsigned OperandNum) const
bool canExecuteInBundle(const MachineInstr &First, const MachineInstr &Second) const
Can these instructions execute at the same time in a bundle.
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
getOperandLatency - Compute and return the use operand latency of a given pair of def and use.
bool isAddrModeWithOffset(const MachineInstr &MI) const
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
Get the base register and byte offset of a load/store instr.
bool isValidOffset(unsigned Opcode, int Offset, const TargetRegisterInfo *TRI, bool Extend=true) const
bool isBaseImmOffset(const MachineInstr &MI) const
bool isAbsoluteSet(const MachineInstr &MI) const
short changeAddrMode_io_pi(short Opc) const
short changeAddrMode_pi_io(short Opc) const
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
For a comparison instruction, return the source registers in SrcReg and SrcReg2 if having two registe...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
std::unique_ptr< PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
bool isLoopN(const MachineInstr &MI) const
bool isSpillPredRegOp(const MachineInstr &MI) const
bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has store to stack slots.
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Return an array that contains the direct target flag values and their names.
bool isIndirectCall(const MachineInstr &MI) const
short changeAddrMode_ur_rr(short Opc) const
bool isValidAutoIncImm(const EVT VT, const int Offset) const
bool hasNonExtEquivalent(const MachineInstr &MI) const
bool isConstExtended(const MachineInstr &MI) const
bool getIncrementValue(const MachineInstr &MI, int &Value) const override
If the instruction is an increment of a constant value, return the amount.
int getCondOpcode(int Opc, bool sense) const
MachineInstr * findLoopInstr(MachineBasicBlock *BB, unsigned EndLoopOp, MachineBasicBlock *TargetBB, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
unsigned getInstrTimingClassLatency(const InstrItineraryData *ItinData, const MachineInstr &MI) const
bool isAccumulator(const MachineInstr &MI) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
Insert branch code into the end of the specified MachineBasicBlock.
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
Compute the instruction latency of a given instruction.
bool PredOpcodeHasJMP_c(unsigned Opcode) const
bool isNewValue(const MachineInstr &MI) const
Register createVR(MachineFunction *MF, MVT VT) const
HexagonInstrInfo specifics.
bool isDotCurInst(const MachineInstr &MI) const
bool validateBranchCond(const ArrayRef< MachineOperand > &Cond) const
bool isExtended(const MachineInstr &MI) const
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
Return true if it's profitable to predicate instructions with accumulated instruction latency of "Num...
bool isAsCheapAsAMove(const MachineInstr &MI) const override
int getMaxValue(const MachineInstr &MI) const
bool isPredicateLate(unsigned Opcode) const
short changeAddrMode_rr_ur(short Opc) const
bool hasPseudoInstrPair(const MachineInstr &MI) const
bool isNewValueInst(const MachineInstr &MI) const
unsigned getInlineAsmLength(const char *Str, const MCAsmInfo &MAI, const TargetSubtargetInfo *STI=nullptr) const override
Measure the specified inline asm to determine an approximation of its length.
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
int getNonDotCurOp(const MachineInstr &MI) const
bool isIndirectL4Return(const MachineInstr &MI) const
unsigned reversePrediction(unsigned Opcode) const
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
Return an array that contains the bitmask target flag values and their names.
InstrStage::FuncUnits getUnits(const MachineInstr &MI) const
unsigned getMemAccessSize(const MachineInstr &MI) const
bool predOpcodeHasNot(ArrayRef< MachineOperand > Cond) const
bool isComplex(const MachineInstr &MI) const
bool isPostIncrement(const MachineInstr &MI) const override
Return true for post-incremented instructions.
void setBundleNoShuf(MachineBasicBlock::instr_iterator MIB) const
MachineBasicBlock::instr_iterator expandVGatherPseudo(MachineInstr &MI) const
int getDotNewOp(const MachineInstr &MI) const
void changeDuplexOpcode(MachineBasicBlock::instr_iterator MII, bool ToBigInstrs) const
bool isMemOp(const MachineInstr &MI) const
int getDotOldOp(const MachineInstr &MI) const
short getPseudoInstrPair(const MachineInstr &MI) const
bool hasUncondBranch(const MachineBasicBlock *B) const
short getNonExtOpcode(const MachineInstr &MI) const
bool isTailCall(const MachineInstr &MI) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
Insert a noop into the instruction stream at the specified point.
bool isDeallocRet(const MachineInstr &MI) const
unsigned getCExtOpNum(const MachineInstr &MI) const
bool isSolo(const MachineInstr &MI) const
DFAPacketizer * CreateTargetScheduleState(const TargetSubtargetInfo &STI) const override
Create machine specific model for scheduling.
bool isLateSourceInstr(const MachineInstr &MI) const
bool isDotNewInst(const MachineInstr &MI) const
void translateInstrsForDup(MachineFunction &MF, bool ToBigInstrs=true) const
bool isTC1(const MachineInstr &MI) const
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
Test if the given instruction should be considered a scheduling boundary.
bool predCanBeUsedAsDotNew(const MachineInstr &MI, Register PredReg) const
unsigned getSize(const MachineInstr &MI) const
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
Return true if it's profitable for if-converter to duplicate instructions of specified accumulated in...
short changeAddrMode_io_abs(short Opc) const
int getDotCurOp(const MachineInstr &MI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
This function is called for all pseudo instructions that remain after register allocation.
bool isExpr(unsigned OpType) const
void genAllInsnTimingClasses(MachineFunction &MF) const
bool isTC2Early(const MachineInstr &MI) const
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
Load the specified register of the given register class from the specified stack frame index.
bool hasEHLabel(const MachineBasicBlock *B) const
bool shouldSink(const MachineInstr &MI) const override
bool isZeroExtendingLoad(const MachineInstr &MI) const
short changeAddrMode_rr_io(short Opc) const
bool isHVXVec(const MachineInstr &MI) const
bool isDependent(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
short changeAddrMode_io_rr(short Opc) const
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
Returns true if the first specified predicate subsumes the second, e.g.
bool mayBeCurLoad(const MachineInstr &MI) const
bool getBundleNoShuf(const MachineInstr &MIB) const
bool isNewValueJump(const MachineInstr &MI) const
bool isTC4x(const MachineInstr &MI) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Cond) const override
Convert the instruction into a predicated instruction.
bool isFloat(const MachineInstr &MI) const
bool isToBeScheduledASAP(const MachineInstr &MI1, const MachineInstr &MI2) const
MachineOperand * getBaseAndOffset(const MachineInstr &MI, int64_t &Offset, LocationSize &AccessSize) const
bool getInvertedPredSense(SmallVectorImpl< MachineOperand > &Cond) const
unsigned nonDbgBBSize(const MachineBasicBlock *BB) const
getInstrTimingClassLatency - Compute the instruction latency of a given instruction using Timing Clas...
uint64_t getType(const MachineInstr &MI) const
bool isEndLoopN(unsigned Opcode) const
bool getBaseAndOffsetPosition(const MachineInstr &MI, unsigned &BasePos, unsigned &OffsetPos) const override
For instructions with a base and offset, return the position of the base register and offset operands...
bool isPredicable(const MachineInstr &MI) const override
Return true if the specified instruction can be predicated.
bool isExtendable(const MachineInstr &MI) const
void immediateExtend(MachineInstr &MI) const
immediateExtend - Changes the instruction in place to one using an immediate extender.
HexagonII::CompoundGroup getCompoundCandidateGroup(const MachineInstr &MI) const
bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const override
Check if the instruction or the bundle of instructions has load from stack slots.
SmallVector< MachineInstr *, 2 > getBranchingInstrs(MachineBasicBlock &MBB) const
HexagonInstrInfo(HexagonSubtarget &ST)
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
bool isPredicatedTrue(const MachineInstr &MI) const
bool isNewValueStore(const MachineInstr &MI) const
int getMinValue(const MachineInstr &MI) const
bool isVecUsableNextPacket(const MachineInstr &ProdMI, const MachineInstr &ConsMI) const
unsigned getCompoundOpcode(const MachineInstr &GA, const MachineInstr &GB) const
bool addLatencyToSchedule(const MachineInstr &MI1, const MachineInstr &MI2) const
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
If the specified machine instruction is a direct store to a stack slot, return the virtual or physica...
int getDotNewPredJumpOp(const MachineInstr &MI, const MachineBranchProbabilityInfo *MBPI) const
bool isTC2(const MachineInstr &MI) const
Register getStackRegister() const
Register getFrameRegister(const MachineFunction &MF) const override
const InstrItineraryData * getInstrItineraryData() const override
getInstrItins - Return the instruction itineraries based on subtarget selection.
bool usePredicatedCalls() const
const HexagonRegisterInfo * getRegisterInfo() const override
bool useNewValueStores() const
Itinerary data supplied by a subtarget to be used by a target.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
bool contains(MCPhysReg Reg) const
Returns true if register Reg is contained in the set.
void stepForward(const MachineInstr &MI, SmallVectorImpl< std::pair< MCPhysReg, const MachineOperand * > > &Clobbers)
Simulates liveness when stepping forward over an instruction(bundle).
bool available(const MachineRegisterInfo &MRI, MCPhysReg Reg) const
Returns true if register Reg and no aliasing register is in the set.
void stepBackward(const MachineInstr &MI)
Simulates liveness when stepping backwards over an instruction(bundle).
void addLiveIns(const MachineBasicBlock &MBB)
Adds all live-in registers of basic block MBB.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds all live-out registers of basic block MBB.
Represents a single loop in the control flow graph.
This class is intended to be used as a base class for asm properties and features specific to the tar...
virtual unsigned getMaxInstLength(const MCSubtargetInfo *STI=nullptr) const
Returns the maximum possible encoded instruction size in bytes.
StringRef getCommentString() const
const char * getSeparatorString() const
MCInstBuilder & addInst(const MCInst *Val)
Add a new MCInst operand.
MCInstBuilder & addImm(int64_t Val)
Add a new integer immediate operand.
Instances of this class represent a single low-level machine instruction.
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
Wrapper class representing physical registers. Should be passed by value.
instr_iterator instr_begin()
MachineInstrBundleIterator< const MachineInstr > const_iterator
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
DebugLoc findDebugLoc(instr_iterator MBBI)
Find the next valid DebugLoc starting at MBBI, skipping any debug instructions.
Instructions::iterator instr_iterator
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
instr_iterator instr_end()
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
iterator_range< pred_iterator > predecessors()
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
BranchProbability getEdgeProbability(const MachineBasicBlock *Src, const MachineBasicBlock *Dst) const
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
const char * createExternalSymbolName(StringRef Name)
Allocate a string and populate it with the given external symbol name.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
instr_iterator getInstrIterator() const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
unsigned getNumOperands() const
Retuns the total number of operands.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
bool mayStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly modify memory.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
bool isIndirectBranch(QueryType Type=AnyInBundle) const
Return true if this is an indirect branch, such as a branch through a register.
A description of a memory reference used in the backend.
@ MOVolatile
The memory access is volatile.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
bool isJTI() const
isJTI - Tests if this is a MO_JumpTableIndex operand.
unsigned getTargetFlags() const
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
bool isBlockAddress() const
isBlockAddress - Tests if this is a MO_BlockAddress operand.
Register getReg() const
getReg - Returns the register number.
void addTargetFlag(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
@ MO_ConstantPoolIndex
Address of indexed Constant in Constant Pool.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_MachineBasicBlock
MachineBasicBlock reference.
@ MO_ExternalSymbol
Name of external global symbol.
@ MO_JumpTableIndex
Address of indexed Jump Table for switch.
bool isFPImm() const
isFPImm - Tests if this is a MO_FPImmediate operand.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
Special value supplied for machine level alias analysis.
Wrapper class representing virtual and physical registers.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
constexpr size_t size() const
size - Get the string size.
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
size_t count(char C) const
Return the number of occurrences of C in the string.
Object returned by analyzeLoopForPipelining.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool hasStoreToStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a store to a stack slot, return true along with the FrameInd...
virtual std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, SDNode *DefNode, unsigned DefIdx, SDNode *UseNode, unsigned UseIdx) const
virtual bool hasLoadFromStackSlot(const MachineInstr &MI, SmallVectorImpl< const MachineMemOperand * > &Accesses) const
If the specified machine instruction has a load from a stack slot, return true along with the FrameIn...
Primary interface to the complete machine description for the target machine.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const InstrItineraryData * getInstrItineraryData() const
getInstrItineraryData - Returns instruction itinerary data for the target or specific subtarget.
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
LLVM Value Representation.
self_iterator getIterator()
This class implements an extremely fast bulk output stream that can only output to a stream.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool isSlot0Only(unsigned units)
unsigned const TypeCVI_LAST
@ RestrictNoSlot1StoreMask
@ RestrictNoSlot1StorePos
unsigned const TypeCVI_FIRST
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ InternalRead
Register reads a value that is defined inside the same instruction or bundle.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
bool is_TC1(unsigned SchedClass)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
auto reverse(ContainerTy &&C)
constexpr bool isPowerOf2_32(uint32_t Value)
Return true if the argument is a power of two > 0.
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
MachineBasicBlock::instr_iterator getBundleEnd(MachineBasicBlock::instr_iterator I)
Returns an iterator pointing beyond the bundle containing I.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
bool is_TC2(unsigned SchedClass)
unsigned getUndefRegState(bool B)
unsigned getRegState(const MachineOperand &RegOp)
Get all register state flags from machine operand RegOp.
bool is_TC2early(unsigned SchedClass)
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Printable printReg(Register Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
Printable printMBBReference(const MachineBasicBlock &MBB)
Prints a machine basic block reference.
bool is_TC4x(unsigned SchedClass)
This struct is a compact representation of a valid (non-zero power of two) alignment.
TypeSize getSizeInBits() const
Return the size of the specified value type in bits.
MVT getSimpleVT() const
Return the SimpleValueType held in the specified simple EVT.
These values represent a non-pipelined step in the execution of an instruction.
FuncUnits getUnits() const
Returns the choice of FUs.
This class contains a discriminated union of information about pointers in memory operands,...
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.