75#define DEBUG_TYPE "arm-instrinfo"
77#define GET_INSTRINFO_CTOR_DTOR
78#include "ARMGenInstrInfo.inc"
82 cl::desc(
"Enable ARM 2-addr to 3-addr conv"));
96 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
97 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
98 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
99 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
100 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
101 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
102 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
103 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
106 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
107 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
108 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
109 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
110 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
111 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
112 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
113 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
119 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
132 if (usePreRAHazardRecognizer()) {
134 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
154 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
191 default:
return nullptr;
217 unsigned OffImm =
MI.getOperand(NumOps - 2).getImm();
230 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
235 }
else if (Amt != 0) {
239 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
248 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
261 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
268 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
277 std::vector<MachineInstr*> NewMIs;
281 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
292 NewMIs.push_back(MemMI);
293 NewMIs.push_back(UpdateMI);
297 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
310 NewMIs.push_back(UpdateMI);
311 NewMIs.push_back(MemMI);
317 if (MO.isReg() && MO.getReg().isVirtual()) {
322 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
326 if (MO.isUse() && MO.isKill()) {
327 for (
unsigned j = 0; j < 2; ++j) {
333 if (VI.removeKill(
MI))
334 VI.Kills.push_back(NewMI);
360 bool AllowModify)
const {
375 bool CantAnalyze =
false;
379 while (
I->isDebugInstr() || !
I->isTerminator() ||
381 I->getOpcode() == ARM::t2DoLoopStartTP){
393 TBB =
I->getOperand(0).getMBB();
399 assert(!FBB &&
"FBB should have been null.");
401 TBB =
I->getOperand(0).getMBB();
402 Cond.push_back(
I->getOperand(1));
403 Cond.push_back(
I->getOperand(2));
404 }
else if (
I->isReturn()) {
407 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
414 TBB =
I->getOperand(1).getMBB();
416 Cond.push_back(
I->getOperand(0));
473 int *BytesRemoved)
const {
474 assert(!BytesRemoved &&
"code size not handled");
485 I->eraseFromParent();
495 I->eraseFromParent();
504 int *BytesAdded)
const {
505 assert(!BytesAdded &&
"code size not handled");
514 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
516 "ARM branch conditions have two or three components!");
526 }
else if (
Cond.size() == 2) {
537 if (
Cond.size() == 2)
542 else if (
Cond.size() == 3)
553 if (
Cond.size() == 2) {
565 while (++
I != E &&
I->isInsideBundle()) {
566 int PIdx =
I->findFirstPredOperandIdx();
567 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
573 int PIdx =
MI.findFirstPredOperandIdx();
574 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
582 std::string GenericComment =
584 if (!GenericComment.empty())
585 return GenericComment;
589 return std::string();
593 int FirstPredOp =
MI.findFirstPredOperandIdx();
594 if (FirstPredOp != (
int) OpIdx)
595 return std::string();
597 std::string
CC =
"CC::";
604 unsigned Opc =
MI.getOpcode();
613 int PIdx =
MI.findFirstPredOperandIdx();
616 PMO.
setImm(Pred[0].getImm());
617 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
624 "CPSR def isn't expected operand");
625 assert((
MI.getOperand(1).isDead() ||
626 MI.getOperand(1).getReg() != ARM::CPSR) &&
627 "if conversion tried to stop defining used CPSR");
628 MI.getOperand(1).setReg(ARM::NoRegister);
638 if (Pred1.
size() > 2 || Pred2.
size() > 2)
663 std::vector<MachineOperand> &Pred,
664 bool SkipDead)
const {
667 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
668 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
669 if (ClobbersCPSR || IsCPSR) {
687 for (
const auto &MO :
MI.operands())
688 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
694 switch (
MI->getOpcode()) {
695 default:
return true;
726 if (!
MI.isPredicable())
764 if (!MO.isReg() || MO.isUndef() || MO.isUse())
766 if (MO.getReg() != ARM::CPSR)
786 switch (
MI.getOpcode()) {
794 case TargetOpcode::BUNDLE:
795 return getInstBundleLength(
MI);
796 case ARM::CONSTPOOL_ENTRY:
797 case ARM::JUMPTABLE_INSTS:
798 case ARM::JUMPTABLE_ADDRS:
799 case ARM::JUMPTABLE_TBB:
800 case ARM::JUMPTABLE_TBH:
803 return MI.getOperand(2).getImm();
805 return MI.getOperand(1).getImm();
807 case ARM::INLINEASM_BR: {
809 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
817unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
821 while (++
I != E &&
I->isInsideBundle()) {
822 assert(!
I->isBundle() &&
"No nested bundle!");
830 unsigned DestReg,
bool KillSrc,
832 unsigned Opc = Subtarget.isThumb()
833 ? (Subtarget.
isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
850 unsigned SrcReg,
bool KillSrc,
852 unsigned Opc = Subtarget.isThumb()
853 ? (Subtarget.
isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
887 unsigned Cond,
unsigned Inactive) {
896 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
897 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
899 if (GPRDest && GPRSrc) {
907 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
908 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
911 if (SPRDest && SPRSrc)
913 else if (GPRDest && SPRSrc)
915 else if (SPRDest && GPRSrc)
917 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
919 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
920 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
925 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR)
927 if (Opc == ARM::MVE_VORR)
929 else if (Opc != ARM::MQPRCopy)
935 unsigned BeginIdx = 0;
936 unsigned SubRegs = 0;
940 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
941 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
942 BeginIdx = ARM::qsub_0;
944 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
945 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
946 BeginIdx = ARM::qsub_0;
949 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
951 BeginIdx = ARM::dsub_0;
953 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
955 BeginIdx = ARM::dsub_0;
957 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
959 BeginIdx = ARM::dsub_0;
961 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
962 Opc = Subtarget.
isThumb2() ? ARM::tMOVr : ARM::MOVr;
963 BeginIdx = ARM::gsub_0;
965 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
967 BeginIdx = ARM::dsub_0;
970 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
972 BeginIdx = ARM::dsub_0;
975 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
977 BeginIdx = ARM::dsub_0;
980 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
981 !Subtarget.hasFP64()) {
983 BeginIdx = ARM::ssub_0;
985 }
else if (SrcReg == ARM::CPSR) {
988 }
else if (DestReg == ARM::CPSR) {
991 }
else if (DestReg == ARM::VPR) {
997 }
else if (SrcReg == ARM::VPR) {
1003 }
else if (DestReg == ARM::FPSCR_NZCV) {
1005 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
1009 }
else if (SrcReg == ARM::FPSCR_NZCV) {
1011 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
1017 assert(Opc &&
"Impossible reg-to-reg copy");
1023 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
1024 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
1030 for (
unsigned i = 0; i != SubRegs; ++i) {
1031 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
1032 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
1033 assert(Dst && Src &&
"Bad sub-register");
1035 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
1040 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) {
1044 if (Opc == ARM::MVE_VORR)
1049 if (Opc == ARM::MOVr)
1058std::optional<DestSourcePair>
1067 if (!
MI.isMoveReg() ||
1068 (
MI.getOpcode() == ARM::VORRq &&
1069 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
1070 return std::nullopt;
1074std::optional<ParamLoadedValue>
1078 Register DstReg = DstSrcPair->Destination->getReg();
1099 return std::nullopt;
1106 unsigned SubIdx,
unsigned State,
1109 return MIB.
addReg(Reg, State);
1112 return MIB.
addReg(
TRI->getSubReg(Reg, SubIdx), State);
1113 return MIB.
addReg(Reg, State, SubIdx);
1118 Register SrcReg,
bool isKill,
int FI,
1130 switch (
TRI->getSpillSize(*RC)) {
1132 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1143 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1150 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1157 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1168 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1175 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1176 if (Subtarget.hasV5TEOps()) {
1196 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1212 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1213 Subtarget.hasMVEIntegerOps()) {
1218 .addMemOperand(MMO);
1224 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1227 Subtarget.hasNEON()) {
1241 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1248 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1249 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1250 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1252 Subtarget.hasNEON()) {
1261 }
else if (Subtarget.hasMVEIntegerOps()) {
1273 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1274 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1281 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1282 Subtarget.hasMVEIntegerOps()) {
1287 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1293 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1294 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1295 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, 0,
TRI);
1296 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, 0,
TRI);
1297 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, 0,
TRI);
1298 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, 0,
TRI);
1309 int &FrameIndex)
const {
1310 switch (
MI.getOpcode()) {
1314 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1315 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1316 MI.getOperand(3).getImm() == 0) {
1317 FrameIndex =
MI.getOperand(1).getIndex();
1318 return MI.getOperand(0).getReg();
1326 case ARM::VSTR_P0_off:
1327 case ARM::MVE_VSTRWU32:
1328 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1329 MI.getOperand(2).getImm() == 0) {
1330 FrameIndex =
MI.getOperand(1).getIndex();
1331 return MI.getOperand(0).getReg();
1335 case ARM::VST1d64TPseudo:
1336 case ARM::VST1d64QPseudo:
1337 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1338 FrameIndex =
MI.getOperand(0).getIndex();
1339 return MI.getOperand(2).getReg();
1343 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1344 FrameIndex =
MI.getOperand(1).getIndex();
1345 return MI.getOperand(0).getReg();
1348 case ARM::MQQPRStore:
1349 case ARM::MQQQQPRStore:
1350 if (
MI.getOperand(1).isFI()) {
1351 FrameIndex =
MI.getOperand(1).getIndex();
1352 return MI.getOperand(0).getReg();
1361 int &FrameIndex)
const {
1363 if (
MI.mayStore() && hasStoreToStackSlot(
MI, Accesses) &&
1364 Accesses.
size() == 1) {
1366 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1388 switch (
TRI->getSpillSize(*RC)) {
1390 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1400 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1406 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1412 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1422 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1428 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1431 if (Subtarget.hasV5TEOps()) {
1454 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1467 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1468 Subtarget.hasMVEIntegerOps()) {
1470 MIB.addFrameIndex(FI)
1472 .addMemOperand(MMO);
1478 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1480 Subtarget.hasNEON()) {
1501 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1502 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1503 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1505 Subtarget.hasNEON()) {
1511 }
else if (Subtarget.hasMVEIntegerOps()) {
1531 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1532 Subtarget.hasMVEIntegerOps()) {
1536 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1560 int &FrameIndex)
const {
1561 switch (
MI.getOpcode()) {
1565 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1566 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1567 MI.getOperand(3).getImm() == 0) {
1568 FrameIndex =
MI.getOperand(1).getIndex();
1569 return MI.getOperand(0).getReg();
1577 case ARM::VLDR_P0_off:
1578 case ARM::MVE_VLDRWU32:
1579 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1580 MI.getOperand(2).getImm() == 0) {
1581 FrameIndex =
MI.getOperand(1).getIndex();
1582 return MI.getOperand(0).getReg();
1586 case ARM::VLD1d8TPseudo:
1587 case ARM::VLD1d16TPseudo:
1588 case ARM::VLD1d32TPseudo:
1589 case ARM::VLD1d64TPseudo:
1590 case ARM::VLD1d8QPseudo:
1591 case ARM::VLD1d16QPseudo:
1592 case ARM::VLD1d32QPseudo:
1593 case ARM::VLD1d64QPseudo:
1594 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1595 FrameIndex =
MI.getOperand(1).getIndex();
1596 return MI.getOperand(0).getReg();
1600 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1601 FrameIndex =
MI.getOperand(1).getIndex();
1602 return MI.getOperand(0).getReg();
1605 case ARM::MQQPRLoad:
1606 case ARM::MQQQQPRLoad:
1607 if (
MI.getOperand(1).isFI()) {
1608 FrameIndex =
MI.getOperand(1).getIndex();
1609 return MI.getOperand(0).getReg();
1618 int &FrameIndex)
const {
1620 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI, Accesses) &&
1621 Accesses.
size() == 1) {
1623 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1634 bool isThumb2 = Subtarget.
isThumb2();
1641 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1643 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1644 : isThumb1 ? ARM::tLDMIA_UPD
1648 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1651 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1653 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1654 : isThumb1 ? ARM::tSTMIA_UPD
1658 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1673 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1674 return TRI.getEncodingValue(Reg1) <
1675 TRI.getEncodingValue(Reg2);
1678 for (
const auto &Reg : ScratchRegs) {
1687 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1688 expandLoadStackGuard(
MI);
1689 MI.getParent()->erase(
MI);
1693 if (
MI.getOpcode() == ARM::MEMCPY) {
1702 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1707 Register DstRegS =
MI.getOperand(0).getReg();
1708 Register SrcRegS =
MI.getOperand(1).getReg();
1709 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1713 unsigned DstRegD =
TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1715 unsigned SrcRegD =
TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1717 if (!DstRegD || !SrcRegD)
1723 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1727 if (
MI.getOperand(0).isDead())
1736 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1737 if (ImpDefIdx != -1)
1738 MI.removeOperand(ImpDefIdx);
1741 MI.setDesc(
get(ARM::VMOVD));
1742 MI.getOperand(0).setReg(DstRegD);
1743 MI.getOperand(1).setReg(SrcRegD);
1750 MI.getOperand(1).setIsUndef();
1755 if (
MI.getOperand(1).isKill()) {
1756 MI.getOperand(1).setIsKill(
false);
1757 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1771 assert(MCPE.isMachineConstantPoolEntry() &&
1772 "Expecting a machine constantpool entry!");
1786 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
ARMCP::CPValue,
1791 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1794 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1802 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1822 case ARM::tLDRpci_pic:
1823 case ARM::t2LDRpci_pic: {
1843 switch (
I->getOpcode()) {
1844 case ARM::tLDRpci_pic:
1845 case ARM::t2LDRpci_pic: {
1847 unsigned CPI =
I->getOperand(1).getIndex();
1849 I->getOperand(1).setIndex(CPI);
1850 I->getOperand(2).setImm(PCLabelId);
1854 if (!
I->isBundledWithSucc())
1865 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1866 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1867 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1868 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1869 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1870 Opcode == ARM::t2MOV_ga_pcrel) {
1881 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1882 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1883 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1884 Opcode == ARM::t2MOV_ga_pcrel)
1896 if (isARMCP0 && isARMCP1) {
1902 }
else if (!isARMCP0 && !isARMCP1) {
1906 }
else if (Opcode == ARM::PICLDR) {
1914 if (Addr0 != Addr1) {
1950 int64_t &Offset2)
const {
1957 auto IsLoadOpcode = [&](
unsigned Opcode) {
1972 case ARM::t2LDRSHi8:
1974 case ARM::t2LDRBi12:
1975 case ARM::t2LDRSHi12:
1994 if (isa<ConstantSDNode>(Load1->
getOperand(1)) &&
1996 Offset1 = cast<ConstantSDNode>(Load1->
getOperand(1))->getSExtValue();
1997 Offset2 = cast<ConstantSDNode>(Load2->
getOperand(1))->getSExtValue();
2016 int64_t Offset1, int64_t Offset2,
2017 unsigned NumLoads)
const {
2021 assert(Offset2 > Offset1);
2023 if ((Offset2 - Offset1) / 8 > 64)
2054 if (
MI.isDebugInstr())
2058 if (
MI.isTerminator() ||
MI.isPosition())
2062 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
2076 while (++
I !=
MBB->
end() &&
I->isDebugInstr())
2078 if (
I !=
MBB->
end() &&
I->getOpcode() == ARM::t2IT)
2089 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
2097 unsigned NumCycles,
unsigned ExtraPredCycles,
2107 if (!Pred->
empty()) {
2109 if (LastMI->
getOpcode() == ARM::t2Bcc) {
2118 MBB, 0, 0, Probability);
2123 unsigned TCycles,
unsigned TExtra,
2125 unsigned FCycles,
unsigned FExtra,
2142 const unsigned ScalingUpFactor = 1024;
2144 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
2145 unsigned UnpredCost;
2146 if (!Subtarget.hasBranchPredictor()) {
2149 unsigned NotTakenBranchCost = 1;
2151 unsigned TUnpredCycles, FUnpredCycles;
2154 TUnpredCycles = TCycles + NotTakenBranchCost;
2155 FUnpredCycles = TakenBranchCost;
2158 TUnpredCycles = TCycles + TakenBranchCost;
2159 FUnpredCycles = FCycles + NotTakenBranchCost;
2162 PredCost -= 1 * ScalingUpFactor;
2165 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2166 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2167 UnpredCost = TUnpredCost + FUnpredCost;
2170 if (Subtarget.
isThumb2() && TCycles + FCycles > 4) {
2171 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2174 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2175 unsigned FUnpredCost =
2177 UnpredCost = TUnpredCost + FUnpredCost;
2178 UnpredCost += 1 * ScalingUpFactor;
2182 return PredCost <= UnpredCost;
2187 unsigned NumInsts)
const {
2195 unsigned MaxInsts = Subtarget.
restrictIT() ? 1 : 4;
2204 if (
MI.getOpcode() == ARM::t2Bcc &&
2227 return Subtarget.isProfitableToUnpredicate();
2235 int PIdx =
MI.findFirstPredOperandIdx();
2241 PredReg =
MI.getOperand(PIdx+1).getReg();
2250 if (Opc == ARM::t2B)
2259 unsigned OpIdx2)
const {
2260 switch (
MI.getOpcode()) {
2262 case ARM::t2MOVCCr: {
2287 if (!Reg.isVirtual())
2289 if (!
MRI.hasOneNonDBGUse(Reg))
2301 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2308 if (MO.getReg().isPhysical())
2310 if (MO.isDef() && !MO.isDead())
2313 bool DontMoveAcrossStores =
true;
2314 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2321 unsigned &TrueOp,
unsigned &FalseOp,
2322 bool &Optimizable)
const {
2323 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2324 "Unknown select instruction");
2333 Cond.push_back(
MI.getOperand(3));
2334 Cond.push_back(
MI.getOperand(4));
2343 bool PreferFalse)
const {
2344 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2345 "Unknown select instruction");
2348 bool Invert = !
DefMI;
2350 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(),
MRI,
this);
2357 Register DestReg =
MI.getOperand(0).getReg();
2360 if (!
MRI.constrainRegClass(DestReg, FalseClass))
2362 if (!
MRI.constrainRegClass(DestReg, TrueClass))
2373 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2376 unsigned CondCode =
MI.getOperand(3).getImm();
2381 NewMI.
add(
MI.getOperand(4));
2392 NewMI.
add(FalseReg);
2423 {ARM::ADDSri, ARM::ADDri},
2424 {ARM::ADDSrr, ARM::ADDrr},
2425 {ARM::ADDSrsi, ARM::ADDrsi},
2426 {ARM::ADDSrsr, ARM::ADDrsr},
2428 {ARM::SUBSri, ARM::SUBri},
2429 {ARM::SUBSrr, ARM::SUBrr},
2430 {ARM::SUBSrsi, ARM::SUBrsi},
2431 {ARM::SUBSrsr, ARM::SUBrsr},
2433 {ARM::RSBSri, ARM::RSBri},
2434 {ARM::RSBSrsi, ARM::RSBrsi},
2435 {ARM::RSBSrsr, ARM::RSBrsr},
2437 {ARM::tADDSi3, ARM::tADDi3},
2438 {ARM::tADDSi8, ARM::tADDi8},
2439 {ARM::tADDSrr, ARM::tADDrr},
2440 {ARM::tADCS, ARM::tADC},
2442 {ARM::tSUBSi3, ARM::tSUBi3},
2443 {ARM::tSUBSi8, ARM::tSUBi8},
2444 {ARM::tSUBSrr, ARM::tSUBrr},
2445 {ARM::tSBCS, ARM::tSBC},
2446 {ARM::tRSBS, ARM::tRSB},
2447 {ARM::tLSLSri, ARM::tLSLri},
2449 {ARM::t2ADDSri, ARM::t2ADDri},
2450 {ARM::t2ADDSrr, ARM::t2ADDrr},
2451 {ARM::t2ADDSrs, ARM::t2ADDrs},
2453 {ARM::t2SUBSri, ARM::t2SUBri},
2454 {ARM::t2SUBSrr, ARM::t2SUBrr},
2455 {ARM::t2SUBSrs, ARM::t2SUBrs},
2457 {ARM::t2RSBSri, ARM::t2RSBri},
2458 {ARM::t2RSBSrs, ARM::t2RSBrs},
2463 if (OldOpc == Entry.PseudoOpc)
2464 return Entry.MachineOpc;
2475 if (NumBytes == 0 && DestReg != BaseReg) {
2484 bool isSub = NumBytes < 0;
2485 if (isSub) NumBytes = -NumBytes;
2489 unsigned ThisVal = NumBytes & llvm::rotr<uint32_t>(0xFF, RotAmt);
2490 assert(ThisVal &&
"Didn't extract field correctly");
2493 NumBytes &= ~ThisVal;
2498 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2511 unsigned NumBytes) {
2522 if (!IsPush && !IsPop)
2525 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2526 MI->getOpcode() == ARM::VLDMDIA_UPD;
2527 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2528 MI->getOpcode() == ARM::tPOP ||
2529 MI->getOpcode() == ARM::tPOP_RET;
2531 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2532 MI->getOperand(1).getReg() == ARM::SP)) &&
2533 "trying to fold sp update into non-sp-updating push/pop");
2538 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2543 int RegListIdx = IsT1PushPop ? 2 : 4;
2546 unsigned RegsNeeded;
2549 RegsNeeded = NumBytes / 8;
2550 RegClass = &ARM::DPRRegClass;
2552 RegsNeeded = NumBytes / 4;
2553 RegClass = &ARM::GPRRegClass;
2563 unsigned FirstRegEnc = -1;
2566 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2571 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2572 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2575 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2578 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2580 unsigned CurReg = RegClass->
getRegister(CurRegEnc);
2581 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2588 false,
false,
true));
2598 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2620 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2621 MI->removeOperand(i);
2634 unsigned Opcode =
MI.getOpcode();
2640 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2643 if (Opcode == ARM::ADDri) {
2644 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2647 MI.setDesc(
TII.get(ARM::MOVr));
2648 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2649 MI.removeOperand(FrameRegIdx+1);
2655 MI.setDesc(
TII.get(ARM::SUBri));
2661 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2662 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2670 unsigned ThisImmVal =
Offset & llvm::rotr<uint32_t>(0xFF, RotAmt);
2677 "Bit extraction didn't work?");
2678 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2680 unsigned ImmIdx = 0;
2682 unsigned NumBits = 0;
2686 ImmIdx = FrameRegIdx + 1;
2687 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2691 ImmIdx = FrameRegIdx+2;
2698 ImmIdx = FrameRegIdx+2;
2709 ImmIdx = FrameRegIdx+1;
2717 ImmIdx = FrameRegIdx+1;
2727 ImmIdx = FrameRegIdx+1;
2728 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2737 Offset += InstrOffs * Scale;
2738 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2748 int ImmedOffset =
Offset / Scale;
2749 unsigned Mask = (1 << NumBits) - 1;
2750 if ((
unsigned)
Offset <= Mask * Scale) {
2752 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2758 ImmedOffset = -ImmedOffset;
2760 ImmedOffset |= 1 << NumBits;
2768 ImmedOffset = ImmedOffset & Mask;
2771 ImmedOffset = -ImmedOffset;
2773 ImmedOffset |= 1 << NumBits;
2789 Register &SrcReg2, int64_t &CmpMask,
2790 int64_t &CmpValue)
const {
2791 switch (
MI.getOpcode()) {
2796 SrcReg =
MI.getOperand(0).getReg();
2799 CmpValue =
MI.getOperand(1).getImm();
2804 SrcReg =
MI.getOperand(0).getReg();
2805 SrcReg2 =
MI.getOperand(1).getReg();
2811 SrcReg =
MI.getOperand(0).getReg();
2813 CmpMask =
MI.getOperand(1).getImm();
2826 int CmpMask,
bool CommonUse) {
2827 switch (
MI->getOpcode()) {
2830 if (CmpMask !=
MI->getOperand(2).getImm())
2832 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2922 switch (
MI->getOpcode()) {
2923 default:
return false;
3019 if (!
MI)
return false;
3022 if (CmpMask != ~0) {
3026 UI =
MRI->use_instr_begin(SrcReg), UE =
MRI->use_instr_end();
3028 if (UI->getParent() != CmpInstr.
getParent())
3037 if (!
MI)
return false;
3046 if (
I ==
B)
return false;
3057 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
3062 if (CmpInstr.
getOpcode() == ARM::CMPri ||
3070 bool IsThumb1 =
false;
3087 if (
MI && IsThumb1) {
3089 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
3090 bool CanReorder =
true;
3091 for (;
I != E; --
I) {
3092 if (
I->getOpcode() != ARM::tMOVi8) {
3098 MI =
MI->removeFromParent();
3109 bool SubAddIsThumb1 =
false;
3124 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
3125 Instr.readsRegister(ARM::CPSR,
TRI))
3147 IsThumb1 = SubAddIsThumb1;
3162 bool isSafe =
false;
3165 while (!isSafe && ++
I != E) {
3167 for (
unsigned IO = 0, EO = Instr.getNumOperands();
3168 !isSafe && IO != EO; ++IO) {
3182 bool IsInstrVSel =
true;
3183 switch (Instr.getOpcode()) {
3185 IsInstrVSel =
false;
3219 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr ||
3220 Opc == ARM::SUBri || Opc == ARM::t2SUBri ||
3221 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 ||
3223 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2;
3235 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3269 if (Succ->isLiveIn(ARM::CPSR))
3276 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3277 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3278 MI->getOperand(CPSRRegNum).setIsDef(
true);
3286 for (
unsigned i = 0, e = OperandsToUpdate.
size(); i < e; i++)
3287 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
3289 MI->clearRegisterDeads(ARM::CPSR);
3303 int64_t CmpMask, CmpValue;
3305 if (Next !=
MI.getParent()->end() &&
3316 unsigned DefOpc =
DefMI.getOpcode();
3317 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3318 DefOpc != ARM::tMOVi32imm)
3320 if (!
DefMI.getOperand(1).isImm())
3324 if (!
MRI->hasOneNonDBGUse(Reg))
3340 if (
UseMI.getOperand(NumOps - 1).
getReg() == ARM::CPSR)
3346 unsigned UseOpc =
UseMI.getOpcode();
3347 unsigned NewUseOpc = 0;
3349 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3350 bool Commute =
false;
3352 default:
return false;
3360 case ARM::t2EORrr: {
3366 if (UseOpc == ARM::SUBrr && Commute)
3372 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3375 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3389 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3390 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3394 case ARM::t2SUBrr: {
3395 if (UseOpc == ARM::t2SUBrr && Commute)
3400 const bool ToSP =
DefMI.getOperand(0).
getReg() == ARM::SP;
3401 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3402 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3404 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3407 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3422 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3423 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3430 unsigned OpIdx = Commute ? 2 : 1;
3432 bool isKill =
UseMI.getOperand(OpIdx).isKill();
3434 Register NewReg =
MRI->createVirtualRegister(TRC);
3442 UseMI.getOperand(1).setReg(NewReg);
3443 UseMI.getOperand(1).setIsKill();
3444 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3445 DefMI.eraseFromParent();
3452 case ARM::t2ADDspImm:
3453 case ARM::t2SUBspImm:
3463 switch (
MI.getOpcode()) {
3467 assert(UOps >= 0 &&
"bad # UOps");
3475 unsigned ShOpVal =
MI.getOperand(3).getImm();
3480 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3488 if (!
MI.getOperand(2).getReg())
3491 unsigned ShOpVal =
MI.getOperand(3).getImm();
3496 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3506 case ARM::LDRSB_POST:
3507 case ARM::LDRSH_POST: {
3510 return (Rt == Rm) ? 4 : 3;
3513 case ARM::LDR_PRE_REG:
3514 case ARM::LDRB_PRE_REG: {
3519 unsigned ShOpVal =
MI.getOperand(4).getImm();
3524 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3530 case ARM::STR_PRE_REG:
3531 case ARM::STRB_PRE_REG: {
3532 unsigned ShOpVal =
MI.getOperand(4).getImm();
3537 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3544 case ARM::STRH_PRE: {
3554 case ARM::LDR_POST_REG:
3555 case ARM::LDRB_POST_REG:
3556 case ARM::LDRH_POST: {
3559 return (Rt == Rm) ? 3 : 2;
3562 case ARM::LDR_PRE_IMM:
3563 case ARM::LDRB_PRE_IMM:
3564 case ARM::LDR_POST_IMM:
3565 case ARM::LDRB_POST_IMM:
3566 case ARM::STRB_POST_IMM:
3567 case ARM::STRB_POST_REG:
3568 case ARM::STRB_PRE_IMM:
3569 case ARM::STRH_POST:
3570 case ARM::STR_POST_IMM:
3571 case ARM::STR_POST_REG:
3572 case ARM::STR_PRE_IMM:
3575 case ARM::LDRSB_PRE:
3576 case ARM::LDRSH_PRE: {
3583 unsigned ShOpVal =
MI.getOperand(4).getImm();
3588 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3601 return (Rt == Rn) ? 3 : 2;
3612 case ARM::LDRD_POST:
3613 case ARM::t2LDRD_POST:
3616 case ARM::STRD_POST:
3617 case ARM::t2STRD_POST:
3620 case ARM::LDRD_PRE: {
3627 return (Rt == Rn) ? 4 : 3;
3630 case ARM::t2LDRD_PRE: {
3633 return (Rt == Rn) ? 4 : 3;
3636 case ARM::STRD_PRE: {
3644 case ARM::t2STRD_PRE:
3647 case ARM::t2LDR_POST:
3648 case ARM::t2LDRB_POST:
3649 case ARM::t2LDRB_PRE:
3650 case ARM::t2LDRSBi12:
3651 case ARM::t2LDRSBi8:
3652 case ARM::t2LDRSBpci:
3654 case ARM::t2LDRH_POST:
3655 case ARM::t2LDRH_PRE:
3657 case ARM::t2LDRSB_POST:
3658 case ARM::t2LDRSB_PRE:
3659 case ARM::t2LDRSH_POST:
3660 case ARM::t2LDRSH_PRE:
3661 case ARM::t2LDRSHi12:
3662 case ARM::t2LDRSHi8:
3663 case ARM::t2LDRSHpci:
3667 case ARM::t2LDRDi8: {
3670 return (Rt == Rn) ? 3 : 2;
3673 case ARM::t2STRB_POST:
3674 case ARM::t2STRB_PRE:
3677 case ARM::t2STRH_POST:
3678 case ARM::t2STRH_PRE:
3680 case ARM::t2STR_POST:
3681 case ARM::t2STR_PRE:
3712 E =
MI.memoperands_end();
3714 Size += (*I)->getSize().getValue();
3721 return std::min(
Size / 4, 16U);
3726 unsigned UOps = 1 + NumRegs;
3730 case ARM::VLDMDIA_UPD:
3731 case ARM::VLDMDDB_UPD:
3732 case ARM::VLDMSIA_UPD:
3733 case ARM::VLDMSDB_UPD:
3734 case ARM::VSTMDIA_UPD:
3735 case ARM::VSTMDDB_UPD:
3736 case ARM::VSTMSIA_UPD:
3737 case ARM::VSTMSDB_UPD:
3738 case ARM::LDMIA_UPD:
3739 case ARM::LDMDA_UPD:
3740 case ARM::LDMDB_UPD:
3741 case ARM::LDMIB_UPD:
3742 case ARM::STMIA_UPD:
3743 case ARM::STMDA_UPD:
3744 case ARM::STMDB_UPD:
3745 case ARM::STMIB_UPD:
3746 case ARM::tLDMIA_UPD:
3747 case ARM::tSTMIA_UPD:
3748 case ARM::t2LDMIA_UPD:
3749 case ARM::t2LDMDB_UPD:
3750 case ARM::t2STMIA_UPD:
3751 case ARM::t2STMDB_UPD:
3754 case ARM::LDMIA_RET:
3756 case ARM::t2LDMIA_RET:
3765 if (!ItinData || ItinData->
isEmpty())
3769 unsigned Class =
Desc.getSchedClass();
3771 if (ItinUOps >= 0) {
3778 unsigned Opc =
MI.getOpcode();
3797 case ARM::VLDMDIA_UPD:
3798 case ARM::VLDMDDB_UPD:
3800 case ARM::VLDMSIA_UPD:
3801 case ARM::VLDMSDB_UPD:
3803 case ARM::VSTMDIA_UPD:
3804 case ARM::VSTMDDB_UPD:
3806 case ARM::VSTMSIA_UPD:
3807 case ARM::VSTMSDB_UPD: {
3808 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3809 return (NumRegs / 2) + (NumRegs % 2) + 1;
3812 case ARM::LDMIA_RET:
3817 case ARM::LDMIA_UPD:
3818 case ARM::LDMDA_UPD:
3819 case ARM::LDMDB_UPD:
3820 case ARM::LDMIB_UPD:
3825 case ARM::STMIA_UPD:
3826 case ARM::STMDA_UPD:
3827 case ARM::STMDB_UPD:
3828 case ARM::STMIB_UPD:
3830 case ARM::tLDMIA_UPD:
3831 case ARM::tSTMIA_UPD:
3835 case ARM::t2LDMIA_RET:
3838 case ARM::t2LDMIA_UPD:
3839 case ARM::t2LDMDB_UPD:
3842 case ARM::t2STMIA_UPD:
3843 case ARM::t2STMDB_UPD: {
3844 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3856 unsigned UOps = (NumRegs / 2);
3862 unsigned UOps = (NumRegs / 2);
3865 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3876std::optional<unsigned>
3879 unsigned DefIdx,
unsigned DefAlign)
const {
3888 DefCycle = RegNo / 2 + 1;
3893 bool isSLoad =
false;
3898 case ARM::VLDMSIA_UPD:
3899 case ARM::VLDMSDB_UPD:
3906 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3910 DefCycle = RegNo + 2;
3916std::optional<unsigned>
3919 unsigned DefIdx,
unsigned DefAlign)
const {
3929 DefCycle = RegNo / 2;
3935 DefCycle = (RegNo / 2);
3938 if ((RegNo % 2) || DefAlign < 8)
3944 DefCycle = RegNo + 2;
3950std::optional<unsigned>
3953 unsigned UseIdx,
unsigned UseAlign)
const {
3961 UseCycle = RegNo / 2 + 1;
3966 bool isSStore =
false;
3971 case ARM::VSTMSIA_UPD:
3972 case ARM::VSTMSDB_UPD:
3979 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3983 UseCycle = RegNo + 2;
3989std::optional<unsigned>
3992 unsigned UseIdx,
unsigned UseAlign)
const {
3999 UseCycle = RegNo / 2;
4005 UseCycle = (RegNo / 2);
4008 if ((RegNo % 2) || UseAlign < 8)
4019 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
4020 unsigned UseIdx,
unsigned UseAlign)
const {
4030 std::optional<unsigned> DefCycle;
4031 bool LdmBypass =
false;
4038 case ARM::VLDMDIA_UPD:
4039 case ARM::VLDMDDB_UPD:
4041 case ARM::VLDMSIA_UPD:
4042 case ARM::VLDMSDB_UPD:
4043 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4046 case ARM::LDMIA_RET:
4051 case ARM::LDMIA_UPD:
4052 case ARM::LDMDA_UPD:
4053 case ARM::LDMDB_UPD:
4054 case ARM::LDMIB_UPD:
4056 case ARM::tLDMIA_UPD:
4058 case ARM::t2LDMIA_RET:
4061 case ARM::t2LDMIA_UPD:
4062 case ARM::t2LDMDB_UPD:
4064 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4072 std::optional<unsigned> UseCycle;
4079 case ARM::VSTMDIA_UPD:
4080 case ARM::VSTMDDB_UPD:
4082 case ARM::VSTMSIA_UPD:
4083 case ARM::VSTMSDB_UPD:
4084 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4091 case ARM::STMIA_UPD:
4092 case ARM::STMDA_UPD:
4093 case ARM::STMDB_UPD:
4094 case ARM::STMIB_UPD:
4095 case ARM::tSTMIA_UPD:
4100 case ARM::t2STMIA_UPD:
4101 case ARM::t2STMDB_UPD:
4102 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4110 if (UseCycle > *DefCycle + 1)
4111 return std::nullopt;
4113 UseCycle = *DefCycle - *UseCycle + 1;
4114 if (UseCycle > 0u) {
4120 UseCycle = *UseCycle - 1;
4122 UseClass, UseIdx)) {
4123 UseCycle = *UseCycle - 1;
4132 unsigned &DefIdx,
unsigned &Dist) {
4137 assert(
II->isInsideBundle() &&
"Empty bundle?");
4140 while (
II->isInsideBundle()) {
4141 Idx =
II->findRegisterDefOperandIdx(Reg,
TRI,
false,
true);
4148 assert(
Idx != -1 &&
"Cannot find bundled definition!");
4155 unsigned &UseIdx,
unsigned &Dist) {
4159 assert(
II->isInsideBundle() &&
"Empty bundle?");
4164 while (
II != E &&
II->isInsideBundle()) {
4165 Idx =
II->findRegisterUseOperandIdx(Reg,
TRI,
false);
4168 if (
II->getOpcode() != ARM::t2IT)
4196 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4206 case ARM::t2LDRSHs: {
4208 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4209 if (ShAmt == 0 || ShAmt == 2)
4214 }
else if (Subtarget.
isSwift()) {
4221 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4226 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4237 case ARM::t2LDRSHs: {
4239 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4240 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4247 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4254 case ARM::VLD1q8wb_fixed:
4255 case ARM::VLD1q16wb_fixed:
4256 case ARM::VLD1q32wb_fixed:
4257 case ARM::VLD1q64wb_fixed:
4258 case ARM::VLD1q8wb_register:
4259 case ARM::VLD1q16wb_register:
4260 case ARM::VLD1q32wb_register:
4261 case ARM::VLD1q64wb_register:
4268 case ARM::VLD2d8wb_fixed:
4269 case ARM::VLD2d16wb_fixed:
4270 case ARM::VLD2d32wb_fixed:
4271 case ARM::VLD2q8wb_fixed:
4272 case ARM::VLD2q16wb_fixed:
4273 case ARM::VLD2q32wb_fixed:
4274 case ARM::VLD2d8wb_register:
4275 case ARM::VLD2d16wb_register:
4276 case ARM::VLD2d32wb_register:
4277 case ARM::VLD2q8wb_register:
4278 case ARM::VLD2q16wb_register:
4279 case ARM::VLD2q32wb_register:
4284 case ARM::VLD3d8_UPD:
4285 case ARM::VLD3d16_UPD:
4286 case ARM::VLD3d32_UPD:
4287 case ARM::VLD1d64Twb_fixed:
4288 case ARM::VLD1d64Twb_register:
4289 case ARM::VLD3q8_UPD:
4290 case ARM::VLD3q16_UPD:
4291 case ARM::VLD3q32_UPD:
4296 case ARM::VLD4d8_UPD:
4297 case ARM::VLD4d16_UPD:
4298 case ARM::VLD4d32_UPD:
4299 case ARM::VLD1d64Qwb_fixed:
4300 case ARM::VLD1d64Qwb_register:
4301 case ARM::VLD4q8_UPD:
4302 case ARM::VLD4q16_UPD:
4303 case ARM::VLD4q32_UPD:
4304 case ARM::VLD1DUPq8:
4305 case ARM::VLD1DUPq16:
4306 case ARM::VLD1DUPq32:
4307 case ARM::VLD1DUPq8wb_fixed:
4308 case ARM::VLD1DUPq16wb_fixed:
4309 case ARM::VLD1DUPq32wb_fixed:
4310 case ARM::VLD1DUPq8wb_register:
4311 case ARM::VLD1DUPq16wb_register:
4312 case ARM::VLD1DUPq32wb_register:
4313 case ARM::VLD2DUPd8:
4314 case ARM::VLD2DUPd16:
4315 case ARM::VLD2DUPd32:
4316 case ARM::VLD2DUPd8wb_fixed:
4317 case ARM::VLD2DUPd16wb_fixed:
4318 case ARM::VLD2DUPd32wb_fixed:
4319 case ARM::VLD2DUPd8wb_register:
4320 case ARM::VLD2DUPd16wb_register:
4321 case ARM::VLD2DUPd32wb_register:
4322 case ARM::VLD4DUPd8:
4323 case ARM::VLD4DUPd16:
4324 case ARM::VLD4DUPd32:
4325 case ARM::VLD4DUPd8_UPD:
4326 case ARM::VLD4DUPd16_UPD:
4327 case ARM::VLD4DUPd32_UPD:
4329 case ARM::VLD1LNd16:
4330 case ARM::VLD1LNd32:
4331 case ARM::VLD1LNd8_UPD:
4332 case ARM::VLD1LNd16_UPD:
4333 case ARM::VLD1LNd32_UPD:
4335 case ARM::VLD2LNd16:
4336 case ARM::VLD2LNd32:
4337 case ARM::VLD2LNq16:
4338 case ARM::VLD2LNq32:
4339 case ARM::VLD2LNd8_UPD:
4340 case ARM::VLD2LNd16_UPD:
4341 case ARM::VLD2LNd32_UPD:
4342 case ARM::VLD2LNq16_UPD:
4343 case ARM::VLD2LNq32_UPD:
4345 case ARM::VLD4LNd16:
4346 case ARM::VLD4LNd32:
4347 case ARM::VLD4LNq16:
4348 case ARM::VLD4LNq32:
4349 case ARM::VLD4LNd8_UPD:
4350 case ARM::VLD4LNd16_UPD:
4351 case ARM::VLD4LNd32_UPD:
4352 case ARM::VLD4LNq16_UPD:
4353 case ARM::VLD4LNq32_UPD:
4367 if (!ItinData || ItinData->
isEmpty())
4368 return std::nullopt;
4374 unsigned DefAdj = 0;
4375 if (
DefMI.isBundle())
4384 unsigned UseAdj = 0;
4385 if (
UseMI.isBundle()) {
4389 return std::nullopt;
4392 return getOperandLatencyImpl(
4393 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4394 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4397std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4399 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4401 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4402 if (Reg == ARM::CPSR) {
4403 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4405 return Subtarget.
isLikeA9() ? 1 : 20;
4409 if (
UseMI.isBranch())
4429 return std::nullopt;
4431 unsigned DefAlign =
DefMI.hasOneMemOperand()
4434 unsigned UseAlign =
UseMI.hasOneMemOperand()
4440 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4443 return std::nullopt;
4446 int Adj = DefAdj + UseAdj;
4450 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4457std::optional<unsigned>
4459 SDNode *DefNode,
unsigned DefIdx,
4460 SDNode *UseNode,
unsigned UseIdx)
const {
4466 if (isZeroCost(DefMCID.
Opcode))
4469 if (!ItinData || ItinData->
isEmpty())
4470 return DefMCID.
mayLoad() ? 3 : 1;
4473 std::optional<unsigned>
Latency =
4476 int Threshold = 1 + Adj;
4481 auto *DefMN = cast<MachineSDNode>(DefNode);
4482 unsigned DefAlign = !DefMN->memoperands_empty()
4483 ? (*DefMN->memoperands_begin())->
getAlign().value()
4485 auto *UseMN = cast<MachineSDNode>(UseNode);
4486 unsigned UseAlign = !UseMN->memoperands_empty()
4487 ? (*UseMN->memoperands_begin())->
getAlign().value()
4490 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4492 return std::nullopt;
4513 case ARM::t2LDRSHs: {
4516 if (ShAmt == 0 || ShAmt == 2)
4531 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4548 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4555 case ARM::VLD1q8wb_register:
4556 case ARM::VLD1q16wb_register:
4557 case ARM::VLD1q32wb_register:
4558 case ARM::VLD1q64wb_register:
4559 case ARM::VLD1q8wb_fixed:
4560 case ARM::VLD1q16wb_fixed:
4561 case ARM::VLD1q32wb_fixed:
4562 case ARM::VLD1q64wb_fixed:
4566 case ARM::VLD2q8Pseudo:
4567 case ARM::VLD2q16Pseudo:
4568 case ARM::VLD2q32Pseudo:
4569 case ARM::VLD2d8wb_fixed:
4570 case ARM::VLD2d16wb_fixed:
4571 case ARM::VLD2d32wb_fixed:
4572 case ARM::VLD2q8PseudoWB_fixed:
4573 case ARM::VLD2q16PseudoWB_fixed:
4574 case ARM::VLD2q32PseudoWB_fixed:
4575 case ARM::VLD2d8wb_register:
4576 case ARM::VLD2d16wb_register:
4577 case ARM::VLD2d32wb_register:
4578 case ARM::VLD2q8PseudoWB_register:
4579 case ARM::VLD2q16PseudoWB_register:
4580 case ARM::VLD2q32PseudoWB_register:
4581 case ARM::VLD3d8Pseudo:
4582 case ARM::VLD3d16Pseudo:
4583 case ARM::VLD3d32Pseudo:
4584 case ARM::VLD1d8TPseudo:
4585 case ARM::VLD1d16TPseudo:
4586 case ARM::VLD1d32TPseudo:
4587 case ARM::VLD1d64TPseudo:
4588 case ARM::VLD1d64TPseudoWB_fixed:
4589 case ARM::VLD1d64TPseudoWB_register:
4590 case ARM::VLD3d8Pseudo_UPD:
4591 case ARM::VLD3d16Pseudo_UPD:
4592 case ARM::VLD3d32Pseudo_UPD:
4593 case ARM::VLD3q8Pseudo_UPD:
4594 case ARM::VLD3q16Pseudo_UPD:
4595 case ARM::VLD3q32Pseudo_UPD:
4596 case ARM::VLD3q8oddPseudo:
4597 case ARM::VLD3q16oddPseudo:
4598 case ARM::VLD3q32oddPseudo:
4599 case ARM::VLD3q8oddPseudo_UPD:
4600 case ARM::VLD3q16oddPseudo_UPD:
4601 case ARM::VLD3q32oddPseudo_UPD:
4602 case ARM::VLD4d8Pseudo:
4603 case ARM::VLD4d16Pseudo:
4604 case ARM::VLD4d32Pseudo:
4605 case ARM::VLD1d8QPseudo:
4606 case ARM::VLD1d16QPseudo:
4607 case ARM::VLD1d32QPseudo:
4608 case ARM::VLD1d64QPseudo:
4609 case ARM::VLD1d64QPseudoWB_fixed:
4610 case ARM::VLD1d64QPseudoWB_register:
4611 case ARM::VLD1q8HighQPseudo:
4612 case ARM::VLD1q8LowQPseudo_UPD:
4613 case ARM::VLD1q8HighTPseudo:
4614 case ARM::VLD1q8LowTPseudo_UPD:
4615 case ARM::VLD1q16HighQPseudo:
4616 case ARM::VLD1q16LowQPseudo_UPD:
4617 case ARM::VLD1q16HighTPseudo:
4618 case ARM::VLD1q16LowTPseudo_UPD:
4619 case ARM::VLD1q32HighQPseudo:
4620 case ARM::VLD1q32LowQPseudo_UPD:
4621 case ARM::VLD1q32HighTPseudo:
4622 case ARM::VLD1q32LowTPseudo_UPD:
4623 case ARM::VLD1q64HighQPseudo:
4624 case ARM::VLD1q64LowQPseudo_UPD:
4625 case ARM::VLD1q64HighTPseudo:
4626 case ARM::VLD1q64LowTPseudo_UPD:
4627 case ARM::VLD4d8Pseudo_UPD:
4628 case ARM::VLD4d16Pseudo_UPD:
4629 case ARM::VLD4d32Pseudo_UPD:
4630 case ARM::VLD4q8Pseudo_UPD:
4631 case ARM::VLD4q16Pseudo_UPD:
4632 case ARM::VLD4q32Pseudo_UPD:
4633 case ARM::VLD4q8oddPseudo:
4634 case ARM::VLD4q16oddPseudo:
4635 case ARM::VLD4q32oddPseudo:
4636 case ARM::VLD4q8oddPseudo_UPD:
4637 case ARM::VLD4q16oddPseudo_UPD:
4638 case ARM::VLD4q32oddPseudo_UPD:
4639 case ARM::VLD1DUPq8:
4640 case ARM::VLD1DUPq16:
4641 case ARM::VLD1DUPq32:
4642 case ARM::VLD1DUPq8wb_fixed:
4643 case ARM::VLD1DUPq16wb_fixed:
4644 case ARM::VLD1DUPq32wb_fixed:
4645 case ARM::VLD1DUPq8wb_register:
4646 case ARM::VLD1DUPq16wb_register:
4647 case ARM::VLD1DUPq32wb_register:
4648 case ARM::VLD2DUPd8:
4649 case ARM::VLD2DUPd16:
4650 case ARM::VLD2DUPd32:
4651 case ARM::VLD2DUPd8wb_fixed:
4652 case ARM::VLD2DUPd16wb_fixed:
4653 case ARM::VLD2DUPd32wb_fixed:
4654 case ARM::VLD2DUPd8wb_register:
4655 case ARM::VLD2DUPd16wb_register:
4656 case ARM::VLD2DUPd32wb_register:
4657 case ARM::VLD2DUPq8EvenPseudo:
4658 case ARM::VLD2DUPq8OddPseudo:
4659 case ARM::VLD2DUPq16EvenPseudo:
4660 case ARM::VLD2DUPq16OddPseudo:
4661 case ARM::VLD2DUPq32EvenPseudo:
4662 case ARM::VLD2DUPq32OddPseudo:
4663 case ARM::VLD3DUPq8EvenPseudo:
4664 case ARM::VLD3DUPq8OddPseudo:
4665 case ARM::VLD3DUPq16EvenPseudo:
4666 case ARM::VLD3DUPq16OddPseudo:
4667 case ARM::VLD3DUPq32EvenPseudo:
4668 case ARM::VLD3DUPq32OddPseudo:
4669 case ARM::VLD4DUPd8Pseudo:
4670 case ARM::VLD4DUPd16Pseudo:
4671 case ARM::VLD4DUPd32Pseudo:
4672 case ARM::VLD4DUPd8Pseudo_UPD:
4673 case ARM::VLD4DUPd16Pseudo_UPD:
4674 case ARM::VLD4DUPd32Pseudo_UPD:
4675 case ARM::VLD4DUPq8EvenPseudo:
4676 case ARM::VLD4DUPq8OddPseudo:
4677 case ARM::VLD4DUPq16EvenPseudo:
4678 case ARM::VLD4DUPq16OddPseudo:
4679 case ARM::VLD4DUPq32EvenPseudo:
4680 case ARM::VLD4DUPq32OddPseudo:
4681 case ARM::VLD1LNq8Pseudo:
4682 case ARM::VLD1LNq16Pseudo:
4683 case ARM::VLD1LNq32Pseudo:
4684 case ARM::VLD1LNq8Pseudo_UPD:
4685 case ARM::VLD1LNq16Pseudo_UPD:
4686 case ARM::VLD1LNq32Pseudo_UPD:
4687 case ARM::VLD2LNd8Pseudo:
4688 case ARM::VLD2LNd16Pseudo:
4689 case ARM::VLD2LNd32Pseudo:
4690 case ARM::VLD2LNq16Pseudo:
4691 case ARM::VLD2LNq32Pseudo:
4692 case ARM::VLD2LNd8Pseudo_UPD:
4693 case ARM::VLD2LNd16Pseudo_UPD:
4694 case ARM::VLD2LNd32Pseudo_UPD:
4695 case ARM::VLD2LNq16Pseudo_UPD:
4696 case ARM::VLD2LNq32Pseudo_UPD:
4697 case ARM::VLD4LNd8Pseudo:
4698 case ARM::VLD4LNd16Pseudo:
4699 case ARM::VLD4LNd32Pseudo:
4700 case ARM::VLD4LNq16Pseudo:
4701 case ARM::VLD4LNq32Pseudo:
4702 case ARM::VLD4LNd8Pseudo_UPD:
4703 case ARM::VLD4LNd16Pseudo_UPD:
4704 case ARM::VLD4LNd32Pseudo_UPD:
4705 case ARM::VLD4LNq16Pseudo_UPD:
4706 case ARM::VLD4LNq32Pseudo_UPD:
4716unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4717 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4727 !Subtarget.cheapPredicableCPSRDef())) {
4737 unsigned *PredCost)
const {
4738 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4744 if (
MI.isBundle()) {
4748 while (++
I != E &&
I->isInsideBundle()) {
4749 if (
I->getOpcode() != ARM::t2IT)
4750 Latency += getInstrLatency(ItinData, *
I, PredCost);
4757 !Subtarget.cheapPredicableCPSRDef()))) {
4765 return MI.mayLoad() ? 3 : 1;
4778 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->
getAlign().value() : 0;
4780 if (Adj >= 0 || (
int)
Latency > -Adj) {
4788 if (!
Node->isMachineOpcode())
4791 if (!ItinData || ItinData->
isEmpty())
4794 unsigned Opcode =
Node->getMachineOpcode();
4804bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4809 unsigned UseIdx)
const {
4812 if (Subtarget.nonpipelinedVFP() &&
4827 unsigned DefIdx)
const {
4829 if (!ItinData || ItinData->
isEmpty())
4834 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4835 std::optional<unsigned> DefCycle =
4837 return DefCycle && DefCycle <= 2U;
4845 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4848 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4850 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4851 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4852 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4856 if (
MI.getOpcode() == ARM::tPUSH ||
4857 MI.getOpcode() == ARM::tPOP ||
4858 MI.getOpcode() == ARM::tPOP_RET) {
4860 if (MO.isImplicit() || !MO.isReg())
4863 if (Reg < ARM::R0 || Reg > ARM::R7) {
4864 if (!(
MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) &&
4865 !(
MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) {
4866 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4872 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4873 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4874 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4875 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4876 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4897 for (
auto Op :
MI.operands()) {
4904 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4914 unsigned LoadImmOpc,
4915 unsigned LoadOpc)
const {
4917 "ROPI/RWPI not currently supported with stack guard");
4925 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4927 "TLS stack protector requires hardware TLS register");
4938 Offset = M.getStackProtectorGuardOffset();
4943 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4953 cast<GlobalValue>((*
MI->memoperands_begin())->getValue());
4962 else if (IsIndirect)
4964 }
else if (IsIndirect) {
4968 if (LoadImmOpc == ARM::tMOVi32imm) {
4971 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
5007 unsigned &AddSubOpc,
5008 bool &NegAcc,
bool &HasLane)
const {
5010 if (
I == MLxEntryMap.
end())
5014 MulOpc = Entry.MulOpc;
5015 AddSubOpc = Entry.AddSubOpc;
5016 NegAcc = Entry.NegAcc;
5017 HasLane = Entry.HasLane;
5041std::pair<uint16_t, uint16_t>
5045 if (Subtarget.hasNEON()) {
5054 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
5055 MI.getOpcode() == ARM::VMOVS))
5062 return std::make_pair(
ExeNEON, 0);
5067 return std::make_pair(
ExeNEON, 0);
5070 return std::make_pair(
ExeVFP, 0);
5076 unsigned SReg,
unsigned &Lane) {
5077 unsigned DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
5080 if (DReg != ARM::NoRegister)
5084 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
5086 assert(DReg &&
"S-register with no D super-register?");
5107 unsigned Lane,
unsigned &ImplicitSReg) {
5110 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
5116 ImplicitSReg =
TRI->getSubReg(DReg,
5117 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
5119 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
5134 unsigned DstReg, SrcReg, DReg;
5138 switch (
MI.getOpcode()) {
5150 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
5153 DstReg =
MI.getOperand(0).getReg();
5154 SrcReg =
MI.getOperand(1).getReg();
5156 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5157 MI.removeOperand(i - 1);
5160 MI.setDesc(
get(ARM::VORRd));
5172 DstReg =
MI.getOperand(0).getReg();
5173 SrcReg =
MI.getOperand(1).getReg();
5175 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5176 MI.removeOperand(i - 1);
5183 MI.setDesc(
get(ARM::VGETLNi32));
5199 DstReg =
MI.getOperand(0).getReg();
5200 SrcReg =
MI.getOperand(1).getReg();
5204 unsigned ImplicitSReg;
5208 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5209 MI.removeOperand(i - 1);
5213 MI.setDesc(
get(ARM::VSETLNi32));
5223 if (ImplicitSReg != 0)
5232 DstReg =
MI.getOperand(0).getReg();
5233 SrcReg =
MI.getOperand(1).getReg();
5235 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
5239 unsigned ImplicitSReg;
5243 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5244 MI.removeOperand(i - 1);
5249 MI.setDesc(
get(ARM::VDUPLN32d));
5259 if (ImplicitSReg != 0)
5283 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5284 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5287 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5288 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5293 if (SrcLane == DstLane)
5296 MI.setDesc(
get(ARM::VEXTd32));
5301 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5302 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5305 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5306 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5311 if (SrcLane != DstLane)
5317 if (ImplicitSReg != 0)
5344 if (!PartialUpdateClearance)
5355 switch (
MI.getOpcode()) {
5361 case ARM::VMOVv4i16:
5362 case ARM::VMOVv2i32:
5363 case ARM::VMOVv2f32:
5364 case ARM::VMOVv1i64:
5365 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5369 case ARM::VLD1LNd32:
5378 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5382 if (Reg.isVirtual()) {
5384 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5386 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5388 unsigned DReg =
TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
5390 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5396 return PartialUpdateClearance;
5403 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5408 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5409 unsigned DReg = Reg;
5412 if (ARM::SPRRegClass.
contains(Reg)) {
5413 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5414 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5417 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5418 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5431 MI.addRegisterKilled(DReg,
TRI,
true);
5435 return Subtarget.hasFeature(ARM::HasV6KOps);
5439 if (
MI->getNumOperands() < 4)
5441 unsigned ShOpVal =
MI->getOperand(3).getImm();
5445 ((ShImm == 1 || ShImm == 2) &&
5455 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5456 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5458 switch (
MI.getOpcode()) {
5470 MOReg = &
MI.getOperand(2);
5482 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5483 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5485 switch (
MI.getOpcode()) {
5496 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5505 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5506 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5508 switch (
MI.getOpcode()) {
5509 case ARM::VSETLNi32:
5510 case ARM::MVE_VMOV_to_lane_32:
5521 InsertedReg.
Reg = MOInsertedReg.
getReg();
5529std::pair<unsigned, unsigned>
5532 return std::make_pair(TF & Mask, TF & ~Mask);
5537 using namespace ARMII;
5539 static const std::pair<unsigned, const char *> TargetFlags[] = {
5540 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5541 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5542 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5549 using namespace ARMII;
5551 static const std::pair<unsigned, const char *> TargetFlags[] = {
5552 {MO_COFFSTUB,
"arm-coffstub"},
5553 {MO_GOT,
"arm-got"},
5554 {MO_SBREL,
"arm-sbrel"},
5555 {MO_DLLIMPORT,
"arm-dllimport"},
5556 {MO_SECREL,
"arm-secrel"},
5557 {MO_NONLAZY,
"arm-nonlazy"}};
5561std::optional<RegImmPair>
5564 unsigned Opcode =
MI.getOpcode();
5571 return std::nullopt;
5574 if (Opcode == ARM::SUBri)
5576 else if (Opcode != ARM::ADDri)
5577 return std::nullopt;
5582 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5583 return std::nullopt;
5585 Offset =
MI.getOperand(2).getImm() * Sign;
5593 for (
auto I =
From;
I != To; ++
I)
5594 if (
I->modifiesRegister(Reg,
TRI))
5607 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5609 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5615 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5617 Register Reg = CmpMI->getOperand(0).getReg();
5620 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5633 if (Subtarget->isThumb()) {
5635 return ForCodesize ? 2 : 1;
5636 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5639 return ForCodesize ? 4 : 1;
5641 return ForCodesize ? 4 : 2;
5643 return ForCodesize ? 4 : 2;
5645 return ForCodesize ? 4 : 2;
5648 return ForCodesize ? 4 : 1;
5650 return ForCodesize ? 4 : 1;
5651 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5652 return ForCodesize ? 4 : 1;
5654 return ForCodesize ? 8 : 2;
5656 return ForCodesize ? 8 : 2;
5659 return ForCodesize ? 8 : 2;
5660 return ForCodesize ? 8 : 3;
5809 : CallTailCall(target.
isThumb() ? 4 : 4),
5810 FrameTailCall(target.
isThumb() ? 0 : 0),
5811 CallThunk(target.
isThumb() ? 4 : 4),
5812 FrameThunk(target.
isThumb() ? 0 : 0),
5813 CallNoLRSave(target.
isThumb() ? 4 : 4),
5814 FrameNoLRSave(target.
isThumb() ? 2 : 4),
5815 CallRegSave(target.
isThumb() ? 8 : 12),
5816 FrameRegSave(target.
isThumb() ? 2 : 4),
5817 CallDefault(target.
isThumb() ? 8 : 12),
5818 FrameDefault(target.
isThumb() ? 2 : 4),
5819 SaveRestoreLROnStack(target.
isThumb() ? 8 : 8) {}
5832 for (
Register Reg : ARM::rGPRRegClass) {
5833 if (!(Reg < regsReserved.
size() && regsReserved.
test(Reg)) &&
5836 C.isAvailableAcrossAndOutOfSeq(Reg,
TRI) &&
5837 C.isAvailableInsideSeq(Reg,
TRI))
5851 for (;
I != E; ++
I) {
5855 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5859 unsigned Opcode =
MI.getOpcode();
5860 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5861 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5862 Opcode == ARM::tBXNS_RET) {
5868 if (
MI.readsRegister(ARM::LR, &
TRI))
5874std::optional<outliner::OutlinedFunction>
5877 std::vector<outliner::Candidate> &RepeatedSequenceLocs)
const {
5878 unsigned SequenceSize = 0;
5879 for (
auto &
MI : RepeatedSequenceLocs[0])
5883 unsigned FlagsSetInAll = 0xF;
5888 FlagsSetInAll &=
C.Flags;
5907 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5915 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5918 if (RepeatedSequenceLocs.size() < 2)
5919 return std::nullopt;
5938 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5939 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5940 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5942 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5944 if (RepeatedSequenceLocs.size() < 2)
5945 return std::nullopt;
5955 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5956 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5957 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5959 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5961 if (RepeatedSequenceLocs.size() < 2)
5962 return std::nullopt;
5967 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5970 auto SetCandidateCallInfo =
5971 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5973 C.setCallInfo(CallID, NumBytesForCall);
5978 const auto &SomeMFI =
5981 if (SomeMFI.branchTargetEnforcement()) {
5990 if (SomeMFI.shouldSignReturnAddress(
true)) {
6000 if (RepeatedSequenceLocs[0].back().isTerminator()) {
6004 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
6005 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
6006 LastInstrOpcode == ARM::tBLXr ||
6007 LastInstrOpcode == ARM::tBLXr_noip ||
6008 LastInstrOpcode == ARM::tBLXi) {
6016 unsigned NumBytesNoStackCalls = 0;
6017 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
6022 const auto Last =
C.getMBB()->rbegin();
6023 const bool LRIsAvailable =
6024 C.getMBB()->isReturnBlock() && !
Last->isCall()
6027 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
6028 if (LRIsAvailable) {
6032 CandidatesWithoutStackFixups.push_back(
C);
6037 else if (findRegisterToSaveLRTo(
C)) {
6041 CandidatesWithoutStackFixups.push_back(
C);
6046 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
6049 CandidatesWithoutStackFixups.push_back(
C);
6055 NumBytesNoStackCalls += SequenceSize;
6061 if (NumBytesNoStackCalls <=
6062 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
6063 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
6065 if (RepeatedSequenceLocs.size() < 2)
6066 return std::nullopt;
6073 if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
6092 NumBytesToCreateFrame, FrameID);
6095bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
6098 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
6123 unsigned NumOps =
MI->getDesc().getNumOperands();
6124 unsigned ImmIdx = NumOps - 3;
6128 int64_t OffVal =
Offset.getImm();
6134 unsigned NumBits = 0;
6163 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6183 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6184 "Can't encode this offset!");
6185 OffVal +=
Fixup / Scale;
6187 unsigned Mask = (1 << NumBits) - 1;
6189 if (OffVal <= Mask) {
6191 MI->getOperand(ImmIdx).setImm(OffVal);
6199 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6203 const Function &CFn =
C.getMF()->getFunction();
6207 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6215 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6234 unsigned &Flags)
const {
6238 "Suitable Machine Function for outlining must track liveness");
6246 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6247 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6251 if (R12AvailableInBlock && CPSRAvailableInBlock)
6252 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
6259 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6261 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6267 Flags |= MachineOutlinerMBBFlags::HasCalls;
6271 bool LRIsAvailable =
6276 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
6284 unsigned Flags)
const {
6290 unsigned Opc =
MI.getOpcode();
6291 if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR ||
6292 Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR ||
6293 Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB ||
6294 Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic ||
6295 Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel ||
6296 Opc == ARM::t2MOV_ga_pcrel)
6300 if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart ||
6301 Opc == ARM::t2DoLoopStartTP || Opc == ARM::t2WhileLoopStart ||
6302 Opc == ARM::t2WhileLoopStartLR || Opc == ARM::t2WhileLoopStartTP ||
6303 Opc == ARM::t2LoopDec || Opc == ARM::t2LoopEnd ||
6304 Opc == ARM::t2LoopEndDec)
6313 if (
MI.isTerminator())
6319 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6327 if (MOP.isGlobal()) {
6328 Callee = dyn_cast<Function>(MOP.getGlobal());
6336 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6337 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6345 if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX ||
6346 Opc == ARM::BLX_noip || Opc == ARM::tBLXr || Opc == ARM::tBLXr_noip ||
6351 return UnknownCallOutlineType;
6359 return UnknownCallOutlineType;
6367 return UnknownCallOutlineType;
6375 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6379 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6392 bool MightNeedStackFixUp =
6393 (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere |
6394 MachineOutlinerMBBFlags::HasCalls));
6396 if (!MightNeedStackFixUp)
6402 if (
MI.modifiesRegister(ARM::SP,
TRI))
6416 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6417 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6421 if (
MI.isCFIInstruction())
6452 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6467 int64_t StackPosEntry =
6477 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6485 unsigned DwarfRAC =
MRI->getDwarfRegNum(ARM::RA_AUTH_CODE,
true);
6499 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6500 unsigned DwarfReg =
MRI->getDwarfRegNum(Reg,
true);
6511 bool CFI,
bool Auth)
const {
6527 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6531 if (!Subtarget.isThumb())
6542 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6543 int64_t StackPosEntry =
6550 int64_t LRPosEntry =
6557 unsigned DwarfRAC =
MRI->getDwarfRegNum(ARM::RA_AUTH_CODE,
true);
6570void ARMBaseInstrInfo::emitCFIForLRRestoreFromReg(
6574 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6576 int64_t LRPosEntry =
6590 bool isThumb = Subtarget.isThumb();
6591 unsigned FuncOp =
isThumb ? 2 : 0;
6592 unsigned Opc = Call->getOperand(FuncOp).isReg()
6593 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6598 .
add(Call->getOperand(FuncOp));
6599 if (
isThumb && !Call->getOperand(FuncOp).isReg())
6601 Call->eraseFromParent();
6606 return MI.isCall() && !
MI.isReturn();
6614 Et = std::prev(
MBB.
end());
6626 ->shouldSignReturnAddress(
true);
6627 saveLROnStack(
MBB, It,
true, Auth);
6632 "Can only fix up stack references once");
6633 fixupPostOutline(
MBB);
6636 restoreLRFromStack(
MBB, Et,
true, Auth);
6656 fixupPostOutline(
MBB);
6665 bool isThumb = Subtarget.isThumb();
6671 ? Subtarget.
isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6682 Opc =
isThumb ? ARM::tBL : ARM::BL;
6698 Register Reg = findRegisterToSaveLRTo(
C);
6699 assert(Reg != 0 &&
"No callee-saved register available?");
6704 emitCFIForLRSaveToReg(
MBB, It, Reg);
6708 emitCFIForLRRestoreFromReg(
MBB, It);
6728bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(
6762 static int constexpr MAX_STAGES = 30;
6763 static int constexpr LAST_IS_USE = MAX_STAGES;
6764 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6765 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6766 typedef std::map<unsigned, IterNeed> IterNeeds;
6769 const IterNeeds &CIN);
6781 : EndLoop(EndLoop), LoopCount(LoopCount),
6783 TII(MF->getSubtarget().getInstrInfo()) {}
6785 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6787 return MI == EndLoop ||
MI == LoopCount;
6791 if (tooMuchRegisterPressure(SSD, SMS))
6797 std::optional<bool> createTripCountGreaterCondition(
6808 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6813 if (
I.getOpcode() == ARM::t2LoopDec)
6815 assert(LoopDec &&
"Unable to find copied LoopDec");
6821 .
addReg(ARM::NoRegister);
6831 void adjustTripCount(
int TripCountAdjust)
override {}
6833 void disposed()
override {}
6837 const IterNeeds &CIN) {
6839 for (
const auto &
N : CIN) {
6840 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6841 for (
int I = 0;
I < Cnt; ++
I)
6846 for (
const auto &
N : CIN) {
6847 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6848 for (
int I = 0;
I < Cnt; ++
I)
6856 IterNeeds CrossIterationNeeds;
6861 for (
auto &SU : SSD.
SUnits) {
6864 for (
auto &S : SU.Succs)
6867 if (
Reg.isVirtual())
6868 CrossIterationNeeds.insert(std::make_pair(
Reg.id(), IterNeed()))
6869 .first->second.set(0);
6870 }
else if (S.isAssignedRegDep()) {
6872 if (OStg >= 0 && OStg != Stg) {
6874 if (
Reg.isVirtual())
6875 CrossIterationNeeds.insert(std::make_pair(
Reg.id(), IterNeed()))
6876 .first->second |= ((1 << (OStg - Stg)) - 1);
6885 std::vector<SUnit *> ProposedSchedule;
6889 std::deque<SUnit *> Instrs =
6891 std::sort(Instrs.begin(), Instrs.end(),
6892 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6893 for (
SUnit *SU : Instrs)
6894 ProposedSchedule.push_back(SU);
6900 for (
auto *SU : ProposedSchedule)
6904 if (!MO.isReg() || !MO.getReg())
6907 auto CIter = CrossIterationNeeds.find(
Reg.id());
6908 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6909 CIter->second[SEEN_AS_LIVE])
6911 if (MO.isDef() && !MO.isDead())
6912 CIter->second.set(SEEN_AS_LIVE);
6913 else if (MO.isUse())
6914 CIter->second.set(LAST_IS_USE);
6916 for (
auto &CI : CrossIterationNeeds)
6917 CI.second.reset(LAST_IS_USE);
6923 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6927 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6929 for (
auto *SU : ProposedSchedule) {
6931 RPTracker.setPos(std::next(CurInstI));
6937 if (!MO.isReg() || !MO.getReg())
6940 if (MO.isDef() && !MO.isDead()) {
6941 auto CIter = CrossIterationNeeds.find(
Reg.id());
6942 if (CIter != CrossIterationNeeds.end()) {
6943 CIter->second.reset(0);
6944 CIter->second.reset(SEEN_AS_LIVE);
6948 for (
auto &S : SU->Preds) {
6950 if (S.isAssignedRegDep()) {
6952 auto CIter = CrossIterationNeeds.find(
Reg.id());
6953 if (CIter != CrossIterationNeeds.end()) {
6955 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6956 if (Stg - Stg2 < MAX_STAGES)
6957 CIter->second.set(Stg - Stg2);
6958 CIter->second.set(SEEN_AS_LIVE);
6963 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6966 auto &
P = RPTracker.getPressure().MaxSetPressure;
6967 for (
unsigned I = 0, E =
P.size();
I < E; ++
I)
6968 if (
P[
I] >
TRI->getRegPressureSetLimit(*MF,
I)) {
6976std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
6980 if (Preheader == LoopBB)
6981 Preheader = *std::next(LoopBB->
pred_begin());
6983 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
6989 for (
auto &L : LoopBB->
instrs()) {
6996 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
7010 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
7011 for (
auto &L : LoopBB->
instrs())
7016 Register LoopDecResult =
I->getOperand(0).getReg();
7019 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
7022 for (
auto &J : Preheader->
instrs())
7023 if (J.getOpcode() == ARM::t2DoLoopStart)
7027 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
static bool isLoad(int Opcode)
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, unsigned DReg, unsigned Lane, unsigned &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static cl::opt< bool > EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, cl::desc("Enable ARM 2-addr to 3-addr conv"))
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
This file contains the declarations for the subclasses of Constant, which represent the different fla...
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is Live
This file defines the DenseMap class.
const HexagonInstrInfo * TII
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
Module.h This file contains the declarations for the Module class.
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
This file defines the SmallVector class.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, unsigned State, const TargetRegisterInfo *TRI) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
virtual unsigned getUnindexedOpcode(unsigned Opc) const =0
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
virtual const ARMBaseRegisterInfo & getRegisterInfo() const =0
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
ARMBaseInstrInfo(const ARMSubtarget &STI)
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
std::optional< outliner::OutlinedFunction > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs) const override
bool expandPostRAPseudo(MachineInstr &MI) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
ARMConstantPoolConstant - ARM-specific constant pool values for Constants, Functions,...
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
ARMConstantPoolMBB - ARM-specific constantpool value of a machine basic block.
ARMConstantPoolSymbol - ARM-specific constantpool values for external symbols.
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
bool isTargetMachO() const
ARMLdStMultipleTiming getLdStMultipleTiming() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
bool isReadTPSoft() const
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
unsigned getMispredictionPenalty() const
const ARMBaseRegisterInfo * getRegisterInfo() const override
unsigned getReturnOpcode() const
Returns the correct return opcode for the current feature set.
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
bool isTargetCOFF() const
unsigned getPartialUpdateClearance() const
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
int getPreISelOperandLatencyAdjustment() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
A possibly irreducible generalization of a Loop.
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
void addVirtualRegisterDead(Register IncomingReg, MachineInstr &MI, bool AddIfNotFound=false)
addVirtualRegisterDead - Add information about the fact that the specified register is dead after bei...
void addVirtualRegisterKilled(Register IncomingReg, MachineInstr &MI, bool AddIfNotFound=false)
addVirtualRegisterKilled - Add information about the fact that the specified register is killed after...
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
This class is intended to be used as a base class for asm properties and features specific to the tar...
static MCCFIInstruction createUndefined(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_undefined From now on the previous value of Register can't be restored anymore.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createRegister(MCSymbol *L, unsigned Register1, unsigned Register2, SMLoc Loc={})
.cfi_register Previous value of Register1 is saved in register Register2.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool hasImplicitDefOfPhysReg(unsigned Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
bool isCall() const
Return true if the instruction is a call.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
unsigned getOpcode() const
Return the opcode number for this descriptor.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
instr_iterator instr_end()
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
This class is a data container for one entry in a MachineConstantPool.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
union llvm::MachineConstantPoolEntry::@204 Val
The constant itself.
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
unsigned addFrameInst(const MCCFIInstruction &Inst)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool tracksLiveness() const
tracksLiveness - Returns true when tracking register liveness accurately.
const TargetRegisterInfo * getTargetRegisterInfo() const
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
void increaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
void decreaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCodes getOppositeCondition(CondCodes CC)
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
@ C
The default llvm calling convention, compatible with C.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
unsigned getBLXpredOpcode(const MachineFunction &MF)
static bool isIndirectBranchOpcode(int Opc)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
MaybeAlign getAlign(const Function &F, unsigned Index)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
static bool isARMLowRegister(unsigned Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool isPopOpcode(int Opc)
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
unsigned getUndefRegState(bool B)
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
unsigned getKillRegState(bool B)
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
static bool isCalleeSavedRegister(unsigned Reg, const MCPhysReg *CSRegs)
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
VarInfo - This represents the regions where a virtual register is live in the program.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.
unsigned FrameConstructionID
Target-defined identifier for constructing a frame for this function.
std::vector< Candidate > Candidates