73#define DEBUG_TYPE "arm-instrinfo"
75#define GET_INSTRINFO_CTOR_DTOR
76#include "ARMGenInstrInfo.inc"
80 cl::desc(
"Enable ARM 2-addr to 3-addr conv"));
94 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
95 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
96 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
97 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
98 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
99 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
100 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
101 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
104 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
105 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
106 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
107 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
108 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
109 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
110 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
111 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
117 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
130 if (usePreRAHazardRecognizer()) {
132 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
152 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
189 default:
return nullptr;
215 unsigned OffImm =
MI.getOperand(NumOps - 2).getImm();
228 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
233 }
else if (Amt != 0) {
237 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
246 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
259 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
266 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
275 std::vector<MachineInstr*> NewMIs;
279 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
290 NewMIs.push_back(MemMI);
291 NewMIs.push_back(UpdateMI);
295 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
308 NewMIs.push_back(UpdateMI);
309 NewMIs.push_back(MemMI);
315 if (MO.isReg() && MO.getReg().isVirtual()) {
320 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
324 if (MO.isUse() && MO.isKill()) {
325 for (
unsigned j = 0; j < 2; ++j) {
331 if (VI.removeKill(
MI))
332 VI.Kills.push_back(NewMI);
358 bool AllowModify)
const {
373 bool CantAnalyze =
false;
377 while (
I->isDebugInstr() || !
I->isTerminator() ||
379 I->getOpcode() == ARM::t2DoLoopStartTP){
391 TBB =
I->getOperand(0).getMBB();
397 assert(!FBB &&
"FBB should have been null.");
399 TBB =
I->getOperand(0).getMBB();
400 Cond.push_back(
I->getOperand(1));
401 Cond.push_back(
I->getOperand(2));
402 }
else if (
I->isReturn()) {
405 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
412 TBB =
I->getOperand(1).getMBB();
414 Cond.push_back(
I->getOperand(0));
471 int *BytesRemoved)
const {
472 assert(!BytesRemoved &&
"code size not handled");
483 I->eraseFromParent();
493 I->eraseFromParent();
502 int *BytesAdded)
const {
503 assert(!BytesAdded &&
"code size not handled");
512 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
514 "ARM branch conditions have two or three components!");
524 }
else if (
Cond.size() == 2) {
535 if (
Cond.size() == 2)
540 else if (
Cond.size() == 3)
551 if (
Cond.size() == 2) {
563 while (++
I != E &&
I->isInsideBundle()) {
564 int PIdx =
I->findFirstPredOperandIdx();
565 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
571 int PIdx =
MI.findFirstPredOperandIdx();
572 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
580 std::string GenericComment =
582 if (!GenericComment.empty())
583 return GenericComment;
587 return std::string();
591 int FirstPredOp =
MI.findFirstPredOperandIdx();
592 if (FirstPredOp != (
int) OpIdx)
593 return std::string();
595 std::string
CC =
"CC::";
602 unsigned Opc =
MI.getOpcode();
611 int PIdx =
MI.findFirstPredOperandIdx();
614 PMO.
setImm(Pred[0].getImm());
615 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
622 "CPSR def isn't expected operand");
623 assert((
MI.getOperand(1).isDead() ||
624 MI.getOperand(1).getReg() != ARM::CPSR) &&
625 "if conversion tried to stop defining used CPSR");
626 MI.getOperand(1).setReg(ARM::NoRegister);
636 if (Pred1.
size() > 2 || Pred2.
size() > 2)
661 std::vector<MachineOperand> &Pred,
662 bool SkipDead)
const {
665 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
666 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
667 if (ClobbersCPSR || IsCPSR) {
685 for (
const auto &MO :
MI.operands())
686 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
692 switch (
MI->getOpcode()) {
693 default:
return true;
724 if (!
MI.isPredicable())
762 if (!MO.isReg() || MO.isUndef() || MO.isUse())
764 if (MO.getReg() != ARM::CPSR)
784 switch (
MI.getOpcode()) {
792 case TargetOpcode::BUNDLE:
793 return getInstBundleLength(
MI);
794 case ARM::CONSTPOOL_ENTRY:
795 case ARM::JUMPTABLE_INSTS:
796 case ARM::JUMPTABLE_ADDRS:
797 case ARM::JUMPTABLE_TBB:
798 case ARM::JUMPTABLE_TBH:
801 return MI.getOperand(2).getImm();
803 return MI.getOperand(1).getImm();
805 case ARM::INLINEASM_BR: {
807 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
815unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
819 while (++
I != E &&
I->isInsideBundle()) {
820 assert(!
I->isBundle() &&
"No nested bundle!");
828 unsigned DestReg,
bool KillSrc,
830 unsigned Opc = Subtarget.isThumb()
831 ? (Subtarget.
isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
848 unsigned SrcReg,
bool KillSrc,
850 unsigned Opc = Subtarget.isThumb()
851 ? (Subtarget.
isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
885 unsigned Cond,
unsigned Inactive) {
895 bool RenamableSrc)
const {
896 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
897 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
899 if (GPRDest && GPRSrc) {
907 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
908 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
911 if (SPRDest && SPRSrc)
913 else if (GPRDest && SPRSrc)
915 else if (SPRDest && GPRSrc)
917 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
919 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
920 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
925 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR)
927 if (Opc == ARM::MVE_VORR)
929 else if (Opc != ARM::MQPRCopy)
935 unsigned BeginIdx = 0;
936 unsigned SubRegs = 0;
940 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
941 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
942 BeginIdx = ARM::qsub_0;
944 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
945 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
946 BeginIdx = ARM::qsub_0;
949 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
951 BeginIdx = ARM::dsub_0;
953 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
955 BeginIdx = ARM::dsub_0;
957 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
959 BeginIdx = ARM::dsub_0;
961 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
962 Opc = Subtarget.
isThumb2() ? ARM::tMOVr : ARM::MOVr;
963 BeginIdx = ARM::gsub_0;
965 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
967 BeginIdx = ARM::dsub_0;
970 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
972 BeginIdx = ARM::dsub_0;
975 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
977 BeginIdx = ARM::dsub_0;
980 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
981 !Subtarget.hasFP64()) {
983 BeginIdx = ARM::ssub_0;
985 }
else if (SrcReg == ARM::CPSR) {
988 }
else if (DestReg == ARM::CPSR) {
991 }
else if (DestReg == ARM::VPR) {
997 }
else if (SrcReg == ARM::VPR) {
1003 }
else if (DestReg == ARM::FPSCR_NZCV) {
1005 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
1009 }
else if (SrcReg == ARM::FPSCR_NZCV) {
1011 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
1017 assert(Opc &&
"Impossible reg-to-reg copy");
1023 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
1024 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
1030 for (
unsigned i = 0; i != SubRegs; ++i) {
1031 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
1032 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
1033 assert(Dst && Src &&
"Bad sub-register");
1035 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
1040 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) {
1044 if (Opc == ARM::MVE_VORR)
1049 if (Opc == ARM::MOVr)
1058std::optional<DestSourcePair>
1067 if (!
MI.isMoveReg() ||
1068 (
MI.getOpcode() == ARM::VORRq &&
1069 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
1070 return std::nullopt;
1074std::optional<ParamLoadedValue>
1078 Register DstReg = DstSrcPair->Destination->getReg();
1099 return std::nullopt;
1106 unsigned SubIdx,
unsigned State,
1109 return MIB.
addReg(Reg, State);
1112 return MIB.
addReg(
TRI->getSubReg(Reg, SubIdx), State);
1113 return MIB.
addReg(Reg, State, SubIdx);
1118 Register SrcReg,
bool isKill,
int FI,
1130 switch (
TRI->getSpillSize(*RC)) {
1132 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1143 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1150 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1157 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1164 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1175 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1182 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1183 if (Subtarget.hasV5TEOps()) {
1203 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1219 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1220 Subtarget.hasMVEIntegerOps()) {
1225 .addMemOperand(MMO);
1231 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1234 Subtarget.hasNEON()) {
1248 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1255 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1256 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1257 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1259 Subtarget.hasNEON()) {
1268 }
else if (Subtarget.hasMVEIntegerOps()) {
1280 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1281 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1288 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1289 Subtarget.hasMVEIntegerOps()) {
1294 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1300 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1301 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1302 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, 0,
TRI);
1303 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, 0,
TRI);
1304 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, 0,
TRI);
1305 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, 0,
TRI);
1316 int &FrameIndex)
const {
1317 switch (
MI.getOpcode()) {
1321 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1322 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1323 MI.getOperand(3).getImm() == 0) {
1324 FrameIndex =
MI.getOperand(1).getIndex();
1325 return MI.getOperand(0).getReg();
1334 case ARM::VSTR_P0_off:
1335 case ARM::VSTR_FPSCR_NZCVQC_off:
1336 case ARM::MVE_VSTRWU32:
1337 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1338 MI.getOperand(2).getImm() == 0) {
1339 FrameIndex =
MI.getOperand(1).getIndex();
1340 return MI.getOperand(0).getReg();
1344 case ARM::VST1d64TPseudo:
1345 case ARM::VST1d64QPseudo:
1346 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1347 FrameIndex =
MI.getOperand(0).getIndex();
1348 return MI.getOperand(2).getReg();
1352 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1353 FrameIndex =
MI.getOperand(1).getIndex();
1354 return MI.getOperand(0).getReg();
1357 case ARM::MQQPRStore:
1358 case ARM::MQQQQPRStore:
1359 if (
MI.getOperand(1).isFI()) {
1360 FrameIndex =
MI.getOperand(1).getIndex();
1361 return MI.getOperand(0).getReg();
1370 int &FrameIndex)
const {
1372 if (
MI.mayStore() && hasStoreToStackSlot(
MI, Accesses) &&
1373 Accesses.
size() == 1) {
1375 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1397 switch (
TRI->getSpillSize(*RC)) {
1399 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1409 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1415 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1421 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1427 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1437 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1443 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1446 if (Subtarget.hasV5TEOps()) {
1469 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1482 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1483 Subtarget.hasMVEIntegerOps()) {
1485 MIB.addFrameIndex(FI)
1487 .addMemOperand(MMO);
1493 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1495 Subtarget.hasNEON()) {
1516 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1517 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1518 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1520 Subtarget.hasNEON()) {
1526 }
else if (Subtarget.hasMVEIntegerOps()) {
1546 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1547 Subtarget.hasMVEIntegerOps()) {
1551 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1575 int &FrameIndex)
const {
1576 switch (
MI.getOpcode()) {
1580 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1581 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1582 MI.getOperand(3).getImm() == 0) {
1583 FrameIndex =
MI.getOperand(1).getIndex();
1584 return MI.getOperand(0).getReg();
1593 case ARM::VLDR_P0_off:
1594 case ARM::VLDR_FPSCR_NZCVQC_off:
1595 case ARM::MVE_VLDRWU32:
1596 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1597 MI.getOperand(2).getImm() == 0) {
1598 FrameIndex =
MI.getOperand(1).getIndex();
1599 return MI.getOperand(0).getReg();
1603 case ARM::VLD1d8TPseudo:
1604 case ARM::VLD1d16TPseudo:
1605 case ARM::VLD1d32TPseudo:
1606 case ARM::VLD1d64TPseudo:
1607 case ARM::VLD1d8QPseudo:
1608 case ARM::VLD1d16QPseudo:
1609 case ARM::VLD1d32QPseudo:
1610 case ARM::VLD1d64QPseudo:
1611 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1612 FrameIndex =
MI.getOperand(1).getIndex();
1613 return MI.getOperand(0).getReg();
1617 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1618 FrameIndex =
MI.getOperand(1).getIndex();
1619 return MI.getOperand(0).getReg();
1622 case ARM::MQQPRLoad:
1623 case ARM::MQQQQPRLoad:
1624 if (
MI.getOperand(1).isFI()) {
1625 FrameIndex =
MI.getOperand(1).getIndex();
1626 return MI.getOperand(0).getReg();
1635 int &FrameIndex)
const {
1637 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI, Accesses) &&
1638 Accesses.
size() == 1) {
1640 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1651 bool isThumb2 = Subtarget.
isThumb2();
1658 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1660 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1661 : isThumb1 ? ARM::tLDMIA_UPD
1665 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1668 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1670 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1671 : isThumb1 ? ARM::tSTMIA_UPD
1675 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1690 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1691 return TRI.getEncodingValue(Reg1) <
1692 TRI.getEncodingValue(Reg2);
1695 for (
const auto &Reg : ScratchRegs) {
1704 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1705 expandLoadStackGuard(
MI);
1706 MI.getParent()->erase(
MI);
1710 if (
MI.getOpcode() == ARM::MEMCPY) {
1719 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1724 Register DstRegS =
MI.getOperand(0).getReg();
1725 Register SrcRegS =
MI.getOperand(1).getReg();
1726 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1730 unsigned DstRegD =
TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0,
1732 unsigned SrcRegD =
TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0,
1734 if (!DstRegD || !SrcRegD)
1740 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1744 if (
MI.getOperand(0).isDead())
1753 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1754 if (ImpDefIdx != -1)
1755 MI.removeOperand(ImpDefIdx);
1758 MI.setDesc(
get(ARM::VMOVD));
1759 MI.getOperand(0).setReg(DstRegD);
1760 MI.getOperand(1).setReg(SrcRegD);
1767 MI.getOperand(1).setIsUndef();
1772 if (
MI.getOperand(1).isKill()) {
1773 MI.getOperand(1).setIsKill(
false);
1774 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1788 assert(MCPE.isMachineConstantPoolEntry() &&
1789 "Expecting a machine constantpool entry!");
1803 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
ARMCP::CPValue,
1808 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1811 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1819 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1839 case ARM::tLDRpci_pic:
1840 case ARM::t2LDRpci_pic: {
1860 switch (
I->getOpcode()) {
1861 case ARM::tLDRpci_pic:
1862 case ARM::t2LDRpci_pic: {
1864 unsigned CPI =
I->getOperand(1).getIndex();
1866 I->getOperand(1).setIndex(CPI);
1867 I->getOperand(2).setImm(PCLabelId);
1871 if (!
I->isBundledWithSucc())
1882 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1883 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1884 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1885 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1886 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1887 Opcode == ARM::t2MOV_ga_pcrel) {
1898 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1899 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1900 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1901 Opcode == ARM::t2MOV_ga_pcrel)
1913 if (isARMCP0 && isARMCP1) {
1919 }
else if (!isARMCP0 && !isARMCP1) {
1923 }
else if (Opcode == ARM::PICLDR) {
1931 if (Addr0 != Addr1) {
1967 int64_t &Offset2)
const {
1974 auto IsLoadOpcode = [&](
unsigned Opcode) {
1989 case ARM::t2LDRSHi8:
1991 case ARM::t2LDRBi12:
1992 case ARM::t2LDRSHi12:
2011 if (isa<ConstantSDNode>(Load1->
getOperand(1)) &&
2013 Offset1 = cast<ConstantSDNode>(Load1->
getOperand(1))->getSExtValue();
2014 Offset2 = cast<ConstantSDNode>(Load2->
getOperand(1))->getSExtValue();
2033 int64_t Offset1, int64_t Offset2,
2034 unsigned NumLoads)
const {
2038 assert(Offset2 > Offset1);
2040 if ((Offset2 - Offset1) / 8 > 64)
2071 if (
MI.isDebugInstr())
2075 if (
MI.isTerminator() ||
MI.isPosition())
2079 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
2093 while (++
I !=
MBB->
end() &&
I->isDebugInstr())
2095 if (
I !=
MBB->
end() &&
I->getOpcode() == ARM::t2IT)
2106 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
2114 unsigned NumCycles,
unsigned ExtraPredCycles,
2124 if (!Pred->
empty()) {
2126 if (LastMI->
getOpcode() == ARM::t2Bcc) {
2135 MBB, 0, 0, Probability);
2140 unsigned TCycles,
unsigned TExtra,
2142 unsigned FCycles,
unsigned FExtra,
2159 const unsigned ScalingUpFactor = 1024;
2161 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
2162 unsigned UnpredCost;
2163 if (!Subtarget.hasBranchPredictor()) {
2166 unsigned NotTakenBranchCost = 1;
2168 unsigned TUnpredCycles, FUnpredCycles;
2171 TUnpredCycles = TCycles + NotTakenBranchCost;
2172 FUnpredCycles = TakenBranchCost;
2175 TUnpredCycles = TCycles + TakenBranchCost;
2176 FUnpredCycles = FCycles + NotTakenBranchCost;
2179 PredCost -= 1 * ScalingUpFactor;
2182 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2183 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2184 UnpredCost = TUnpredCost + FUnpredCost;
2187 if (Subtarget.
isThumb2() && TCycles + FCycles > 4) {
2188 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2191 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2192 unsigned FUnpredCost =
2194 UnpredCost = TUnpredCost + FUnpredCost;
2195 UnpredCost += 1 * ScalingUpFactor;
2199 return PredCost <= UnpredCost;
2204 unsigned NumInsts)
const {
2212 unsigned MaxInsts = Subtarget.
restrictIT() ? 1 : 4;
2221 if (
MI.getOpcode() == ARM::t2Bcc &&
2244 return Subtarget.isProfitableToUnpredicate();
2252 int PIdx =
MI.findFirstPredOperandIdx();
2258 PredReg =
MI.getOperand(PIdx+1).getReg();
2267 if (Opc == ARM::t2B)
2276 unsigned OpIdx2)
const {
2277 switch (
MI.getOpcode()) {
2279 case ARM::t2MOVCCr: {
2304 if (!Reg.isVirtual())
2306 if (!
MRI.hasOneNonDBGUse(Reg))
2318 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2325 if (MO.getReg().isPhysical())
2327 if (MO.isDef() && !MO.isDead())
2330 bool DontMoveAcrossStores =
true;
2331 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2338 unsigned &TrueOp,
unsigned &FalseOp,
2339 bool &Optimizable)
const {
2340 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2341 "Unknown select instruction");
2350 Cond.push_back(
MI.getOperand(3));
2351 Cond.push_back(
MI.getOperand(4));
2360 bool PreferFalse)
const {
2361 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2362 "Unknown select instruction");
2365 bool Invert = !
DefMI;
2367 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(),
MRI,
this);
2374 Register DestReg =
MI.getOperand(0).getReg();
2377 if (!
MRI.constrainRegClass(DestReg, FalseClass))
2379 if (!
MRI.constrainRegClass(DestReg, TrueClass))
2390 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2393 unsigned CondCode =
MI.getOperand(3).getImm();
2398 NewMI.
add(
MI.getOperand(4));
2409 NewMI.
add(FalseReg);
2440 {ARM::ADDSri, ARM::ADDri},
2441 {ARM::ADDSrr, ARM::ADDrr},
2442 {ARM::ADDSrsi, ARM::ADDrsi},
2443 {ARM::ADDSrsr, ARM::ADDrsr},
2445 {ARM::SUBSri, ARM::SUBri},
2446 {ARM::SUBSrr, ARM::SUBrr},
2447 {ARM::SUBSrsi, ARM::SUBrsi},
2448 {ARM::SUBSrsr, ARM::SUBrsr},
2450 {ARM::RSBSri, ARM::RSBri},
2451 {ARM::RSBSrsi, ARM::RSBrsi},
2452 {ARM::RSBSrsr, ARM::RSBrsr},
2454 {ARM::tADDSi3, ARM::tADDi3},
2455 {ARM::tADDSi8, ARM::tADDi8},
2456 {ARM::tADDSrr, ARM::tADDrr},
2457 {ARM::tADCS, ARM::tADC},
2459 {ARM::tSUBSi3, ARM::tSUBi3},
2460 {ARM::tSUBSi8, ARM::tSUBi8},
2461 {ARM::tSUBSrr, ARM::tSUBrr},
2462 {ARM::tSBCS, ARM::tSBC},
2463 {ARM::tRSBS, ARM::tRSB},
2464 {ARM::tLSLSri, ARM::tLSLri},
2466 {ARM::t2ADDSri, ARM::t2ADDri},
2467 {ARM::t2ADDSrr, ARM::t2ADDrr},
2468 {ARM::t2ADDSrs, ARM::t2ADDrs},
2470 {ARM::t2SUBSri, ARM::t2SUBri},
2471 {ARM::t2SUBSrr, ARM::t2SUBrr},
2472 {ARM::t2SUBSrs, ARM::t2SUBrs},
2474 {ARM::t2RSBSri, ARM::t2RSBri},
2475 {ARM::t2RSBSrs, ARM::t2RSBrs},
2480 if (OldOpc == Entry.PseudoOpc)
2481 return Entry.MachineOpc;
2492 if (NumBytes == 0 && DestReg != BaseReg) {
2501 bool isSub = NumBytes < 0;
2502 if (isSub) NumBytes = -NumBytes;
2506 unsigned ThisVal = NumBytes & llvm::rotr<uint32_t>(0xFF, RotAmt);
2507 assert(ThisVal &&
"Didn't extract field correctly");
2510 NumBytes &= ~ThisVal;
2515 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2528 unsigned NumBytes) {
2539 if (!IsPush && !IsPop)
2542 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2543 MI->getOpcode() == ARM::VLDMDIA_UPD;
2544 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2545 MI->getOpcode() == ARM::tPOP ||
2546 MI->getOpcode() == ARM::tPOP_RET;
2548 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2549 MI->getOperand(1).getReg() == ARM::SP)) &&
2550 "trying to fold sp update into non-sp-updating push/pop");
2555 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2560 int RegListIdx = IsT1PushPop ? 2 : 4;
2563 unsigned RegsNeeded;
2566 RegsNeeded = NumBytes / 8;
2567 RegClass = &ARM::DPRRegClass;
2569 RegsNeeded = NumBytes / 4;
2570 RegClass = &ARM::GPRRegClass;
2580 unsigned FirstRegEnc = -1;
2583 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2588 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2589 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2592 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2595 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2597 unsigned CurReg = RegClass->
getRegister(CurRegEnc);
2598 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2605 false,
false,
true));
2615 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2637 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2638 MI->removeOperand(i);
2651 unsigned Opcode =
MI.getOpcode();
2657 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2660 if (Opcode == ARM::ADDri) {
2661 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2664 MI.setDesc(
TII.get(ARM::MOVr));
2665 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2666 MI.removeOperand(FrameRegIdx+1);
2672 MI.setDesc(
TII.get(ARM::SUBri));
2678 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2679 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2687 unsigned ThisImmVal =
Offset & llvm::rotr<uint32_t>(0xFF, RotAmt);
2694 "Bit extraction didn't work?");
2695 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2697 unsigned ImmIdx = 0;
2699 unsigned NumBits = 0;
2703 ImmIdx = FrameRegIdx + 1;
2704 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2708 ImmIdx = FrameRegIdx+2;
2715 ImmIdx = FrameRegIdx+2;
2726 ImmIdx = FrameRegIdx+1;
2734 ImmIdx = FrameRegIdx+1;
2744 ImmIdx = FrameRegIdx+1;
2745 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2754 Offset += InstrOffs * Scale;
2755 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2765 int ImmedOffset =
Offset / Scale;
2766 unsigned Mask = (1 << NumBits) - 1;
2767 if ((
unsigned)
Offset <= Mask * Scale) {
2769 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2775 ImmedOffset = -ImmedOffset;
2777 ImmedOffset |= 1 << NumBits;
2785 ImmedOffset = ImmedOffset & Mask;
2788 ImmedOffset = -ImmedOffset;
2790 ImmedOffset |= 1 << NumBits;
2806 Register &SrcReg2, int64_t &CmpMask,
2807 int64_t &CmpValue)
const {
2808 switch (
MI.getOpcode()) {
2813 SrcReg =
MI.getOperand(0).getReg();
2816 CmpValue =
MI.getOperand(1).getImm();
2821 SrcReg =
MI.getOperand(0).getReg();
2822 SrcReg2 =
MI.getOperand(1).getReg();
2828 SrcReg =
MI.getOperand(0).getReg();
2830 CmpMask =
MI.getOperand(1).getImm();
2843 int CmpMask,
bool CommonUse) {
2844 switch (
MI->getOpcode()) {
2847 if (CmpMask !=
MI->getOperand(2).getImm())
2849 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2939 switch (
MI->getOpcode()) {
2940 default:
return false;
3036 if (!
MI)
return false;
3039 if (CmpMask != ~0) {
3043 UI =
MRI->use_instr_begin(SrcReg), UE =
MRI->use_instr_end();
3045 if (UI->getParent() != CmpInstr.
getParent())
3054 if (!
MI)
return false;
3063 if (
I ==
B)
return false;
3074 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
3079 if (CmpInstr.
getOpcode() == ARM::CMPri ||
3087 bool IsThumb1 =
false;
3104 if (
MI && IsThumb1) {
3106 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
3107 bool CanReorder =
true;
3108 for (;
I != E; --
I) {
3109 if (
I->getOpcode() != ARM::tMOVi8) {
3115 MI =
MI->removeFromParent();
3126 bool SubAddIsThumb1 =
false;
3141 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
3142 Instr.readsRegister(ARM::CPSR,
TRI))
3164 IsThumb1 = SubAddIsThumb1;
3179 bool isSafe =
false;
3182 while (!isSafe && ++
I != E) {
3184 for (
unsigned IO = 0, EO = Instr.getNumOperands();
3185 !isSafe && IO != EO; ++IO) {
3199 bool IsInstrVSel =
true;
3200 switch (Instr.getOpcode()) {
3202 IsInstrVSel =
false;
3236 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr ||
3237 Opc == ARM::SUBri || Opc == ARM::t2SUBri ||
3238 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 ||
3240 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2;
3252 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3286 if (Succ->isLiveIn(ARM::CPSR))
3293 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3294 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3295 MI->getOperand(CPSRRegNum).setIsDef(
true);
3303 for (
unsigned i = 0, e = OperandsToUpdate.
size(); i < e; i++)
3304 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
3306 MI->clearRegisterDeads(ARM::CPSR);
3320 int64_t CmpMask, CmpValue;
3322 if (Next !=
MI.getParent()->end() &&
3333 unsigned DefOpc =
DefMI.getOpcode();
3334 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3335 DefOpc != ARM::tMOVi32imm)
3337 if (!
DefMI.getOperand(1).isImm())
3341 if (!
MRI->hasOneNonDBGUse(Reg))
3357 if (
UseMI.getOperand(NumOps - 1).
getReg() == ARM::CPSR)
3363 unsigned UseOpc =
UseMI.getOpcode();
3364 unsigned NewUseOpc = 0;
3366 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3367 bool Commute =
false;
3369 default:
return false;
3377 case ARM::t2EORrr: {
3383 if (UseOpc == ARM::SUBrr && Commute)
3389 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3392 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3406 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3407 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3411 case ARM::t2SUBrr: {
3412 if (UseOpc == ARM::t2SUBrr && Commute)
3417 const bool ToSP =
DefMI.getOperand(0).
getReg() == ARM::SP;
3418 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3419 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3421 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3424 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3439 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3440 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3447 unsigned OpIdx = Commute ? 2 : 1;
3449 bool isKill =
UseMI.getOperand(OpIdx).isKill();
3451 Register NewReg =
MRI->createVirtualRegister(TRC);
3459 UseMI.getOperand(1).setReg(NewReg);
3460 UseMI.getOperand(1).setIsKill();
3461 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3462 DefMI.eraseFromParent();
3469 case ARM::t2ADDspImm:
3470 case ARM::t2SUBspImm:
3480 switch (
MI.getOpcode()) {
3484 assert(UOps >= 0 &&
"bad # UOps");
3492 unsigned ShOpVal =
MI.getOperand(3).getImm();
3497 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3505 if (!
MI.getOperand(2).getReg())
3508 unsigned ShOpVal =
MI.getOperand(3).getImm();
3513 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3523 case ARM::LDRSB_POST:
3524 case ARM::LDRSH_POST: {
3527 return (Rt == Rm) ? 4 : 3;
3530 case ARM::LDR_PRE_REG:
3531 case ARM::LDRB_PRE_REG: {
3536 unsigned ShOpVal =
MI.getOperand(4).getImm();
3541 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3547 case ARM::STR_PRE_REG:
3548 case ARM::STRB_PRE_REG: {
3549 unsigned ShOpVal =
MI.getOperand(4).getImm();
3554 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3561 case ARM::STRH_PRE: {
3571 case ARM::LDR_POST_REG:
3572 case ARM::LDRB_POST_REG:
3573 case ARM::LDRH_POST: {
3576 return (Rt == Rm) ? 3 : 2;
3579 case ARM::LDR_PRE_IMM:
3580 case ARM::LDRB_PRE_IMM:
3581 case ARM::LDR_POST_IMM:
3582 case ARM::LDRB_POST_IMM:
3583 case ARM::STRB_POST_IMM:
3584 case ARM::STRB_POST_REG:
3585 case ARM::STRB_PRE_IMM:
3586 case ARM::STRH_POST:
3587 case ARM::STR_POST_IMM:
3588 case ARM::STR_POST_REG:
3589 case ARM::STR_PRE_IMM:
3592 case ARM::LDRSB_PRE:
3593 case ARM::LDRSH_PRE: {
3600 unsigned ShOpVal =
MI.getOperand(4).getImm();
3605 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3618 return (Rt == Rn) ? 3 : 2;
3629 case ARM::LDRD_POST:
3630 case ARM::t2LDRD_POST:
3633 case ARM::STRD_POST:
3634 case ARM::t2STRD_POST:
3637 case ARM::LDRD_PRE: {
3644 return (Rt == Rn) ? 4 : 3;
3647 case ARM::t2LDRD_PRE: {
3650 return (Rt == Rn) ? 4 : 3;
3653 case ARM::STRD_PRE: {
3661 case ARM::t2STRD_PRE:
3664 case ARM::t2LDR_POST:
3665 case ARM::t2LDRB_POST:
3666 case ARM::t2LDRB_PRE:
3667 case ARM::t2LDRSBi12:
3668 case ARM::t2LDRSBi8:
3669 case ARM::t2LDRSBpci:
3671 case ARM::t2LDRH_POST:
3672 case ARM::t2LDRH_PRE:
3674 case ARM::t2LDRSB_POST:
3675 case ARM::t2LDRSB_PRE:
3676 case ARM::t2LDRSH_POST:
3677 case ARM::t2LDRSH_PRE:
3678 case ARM::t2LDRSHi12:
3679 case ARM::t2LDRSHi8:
3680 case ARM::t2LDRSHpci:
3684 case ARM::t2LDRDi8: {
3687 return (Rt == Rn) ? 3 : 2;
3690 case ARM::t2STRB_POST:
3691 case ARM::t2STRB_PRE:
3694 case ARM::t2STRH_POST:
3695 case ARM::t2STRH_PRE:
3697 case ARM::t2STR_POST:
3698 case ARM::t2STR_PRE:
3729 E =
MI.memoperands_end();
3731 Size += (*I)->getSize().getValue();
3738 return std::min(
Size / 4, 16U);
3743 unsigned UOps = 1 + NumRegs;
3747 case ARM::VLDMDIA_UPD:
3748 case ARM::VLDMDDB_UPD:
3749 case ARM::VLDMSIA_UPD:
3750 case ARM::VLDMSDB_UPD:
3751 case ARM::VSTMDIA_UPD:
3752 case ARM::VSTMDDB_UPD:
3753 case ARM::VSTMSIA_UPD:
3754 case ARM::VSTMSDB_UPD:
3755 case ARM::LDMIA_UPD:
3756 case ARM::LDMDA_UPD:
3757 case ARM::LDMDB_UPD:
3758 case ARM::LDMIB_UPD:
3759 case ARM::STMIA_UPD:
3760 case ARM::STMDA_UPD:
3761 case ARM::STMDB_UPD:
3762 case ARM::STMIB_UPD:
3763 case ARM::tLDMIA_UPD:
3764 case ARM::tSTMIA_UPD:
3765 case ARM::t2LDMIA_UPD:
3766 case ARM::t2LDMDB_UPD:
3767 case ARM::t2STMIA_UPD:
3768 case ARM::t2STMDB_UPD:
3771 case ARM::LDMIA_RET:
3773 case ARM::t2LDMIA_RET:
3782 if (!ItinData || ItinData->
isEmpty())
3786 unsigned Class =
Desc.getSchedClass();
3788 if (ItinUOps >= 0) {
3795 unsigned Opc =
MI.getOpcode();
3814 case ARM::VLDMDIA_UPD:
3815 case ARM::VLDMDDB_UPD:
3817 case ARM::VLDMSIA_UPD:
3818 case ARM::VLDMSDB_UPD:
3820 case ARM::VSTMDIA_UPD:
3821 case ARM::VSTMDDB_UPD:
3823 case ARM::VSTMSIA_UPD:
3824 case ARM::VSTMSDB_UPD: {
3825 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3826 return (NumRegs / 2) + (NumRegs % 2) + 1;
3829 case ARM::LDMIA_RET:
3834 case ARM::LDMIA_UPD:
3835 case ARM::LDMDA_UPD:
3836 case ARM::LDMDB_UPD:
3837 case ARM::LDMIB_UPD:
3842 case ARM::STMIA_UPD:
3843 case ARM::STMDA_UPD:
3844 case ARM::STMDB_UPD:
3845 case ARM::STMIB_UPD:
3847 case ARM::tLDMIA_UPD:
3848 case ARM::tSTMIA_UPD:
3852 case ARM::t2LDMIA_RET:
3855 case ARM::t2LDMIA_UPD:
3856 case ARM::t2LDMDB_UPD:
3859 case ARM::t2STMIA_UPD:
3860 case ARM::t2STMDB_UPD: {
3861 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3873 unsigned UOps = (NumRegs / 2);
3879 unsigned UOps = (NumRegs / 2);
3882 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3893std::optional<unsigned>
3896 unsigned DefIdx,
unsigned DefAlign)
const {
3905 DefCycle = RegNo / 2 + 1;
3910 bool isSLoad =
false;
3915 case ARM::VLDMSIA_UPD:
3916 case ARM::VLDMSDB_UPD:
3923 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3927 DefCycle = RegNo + 2;
3933std::optional<unsigned>
3936 unsigned DefIdx,
unsigned DefAlign)
const {
3946 DefCycle = RegNo / 2;
3952 DefCycle = (RegNo / 2);
3955 if ((RegNo % 2) || DefAlign < 8)
3961 DefCycle = RegNo + 2;
3967std::optional<unsigned>
3970 unsigned UseIdx,
unsigned UseAlign)
const {
3978 UseCycle = RegNo / 2 + 1;
3983 bool isSStore =
false;
3988 case ARM::VSTMSIA_UPD:
3989 case ARM::VSTMSDB_UPD:
3996 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
4000 UseCycle = RegNo + 2;
4006std::optional<unsigned>
4009 unsigned UseIdx,
unsigned UseAlign)
const {
4016 UseCycle = RegNo / 2;
4022 UseCycle = (RegNo / 2);
4025 if ((RegNo % 2) || UseAlign < 8)
4036 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
4037 unsigned UseIdx,
unsigned UseAlign)
const {
4047 std::optional<unsigned> DefCycle;
4048 bool LdmBypass =
false;
4055 case ARM::VLDMDIA_UPD:
4056 case ARM::VLDMDDB_UPD:
4058 case ARM::VLDMSIA_UPD:
4059 case ARM::VLDMSDB_UPD:
4060 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4063 case ARM::LDMIA_RET:
4068 case ARM::LDMIA_UPD:
4069 case ARM::LDMDA_UPD:
4070 case ARM::LDMDB_UPD:
4071 case ARM::LDMIB_UPD:
4073 case ARM::tLDMIA_UPD:
4075 case ARM::t2LDMIA_RET:
4078 case ARM::t2LDMIA_UPD:
4079 case ARM::t2LDMDB_UPD:
4081 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4089 std::optional<unsigned> UseCycle;
4096 case ARM::VSTMDIA_UPD:
4097 case ARM::VSTMDDB_UPD:
4099 case ARM::VSTMSIA_UPD:
4100 case ARM::VSTMSDB_UPD:
4101 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4108 case ARM::STMIA_UPD:
4109 case ARM::STMDA_UPD:
4110 case ARM::STMDB_UPD:
4111 case ARM::STMIB_UPD:
4112 case ARM::tSTMIA_UPD:
4117 case ARM::t2STMIA_UPD:
4118 case ARM::t2STMDB_UPD:
4119 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4127 if (UseCycle > *DefCycle + 1)
4128 return std::nullopt;
4130 UseCycle = *DefCycle - *UseCycle + 1;
4131 if (UseCycle > 0u) {
4137 UseCycle = *UseCycle - 1;
4139 UseClass, UseIdx)) {
4140 UseCycle = *UseCycle - 1;
4149 unsigned &DefIdx,
unsigned &Dist) {
4154 assert(
II->isInsideBundle() &&
"Empty bundle?");
4157 while (
II->isInsideBundle()) {
4158 Idx =
II->findRegisterDefOperandIdx(Reg,
TRI,
false,
true);
4165 assert(
Idx != -1 &&
"Cannot find bundled definition!");
4172 unsigned &UseIdx,
unsigned &Dist) {
4176 assert(
II->isInsideBundle() &&
"Empty bundle?");
4181 while (
II != E &&
II->isInsideBundle()) {
4182 Idx =
II->findRegisterUseOperandIdx(Reg,
TRI,
false);
4185 if (
II->getOpcode() != ARM::t2IT)
4213 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4223 case ARM::t2LDRSHs: {
4225 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4226 if (ShAmt == 0 || ShAmt == 2)
4231 }
else if (Subtarget.
isSwift()) {
4238 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4243 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4254 case ARM::t2LDRSHs: {
4256 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4257 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4264 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4271 case ARM::VLD1q8wb_fixed:
4272 case ARM::VLD1q16wb_fixed:
4273 case ARM::VLD1q32wb_fixed:
4274 case ARM::VLD1q64wb_fixed:
4275 case ARM::VLD1q8wb_register:
4276 case ARM::VLD1q16wb_register:
4277 case ARM::VLD1q32wb_register:
4278 case ARM::VLD1q64wb_register:
4285 case ARM::VLD2d8wb_fixed:
4286 case ARM::VLD2d16wb_fixed:
4287 case ARM::VLD2d32wb_fixed:
4288 case ARM::VLD2q8wb_fixed:
4289 case ARM::VLD2q16wb_fixed:
4290 case ARM::VLD2q32wb_fixed:
4291 case ARM::VLD2d8wb_register:
4292 case ARM::VLD2d16wb_register:
4293 case ARM::VLD2d32wb_register:
4294 case ARM::VLD2q8wb_register:
4295 case ARM::VLD2q16wb_register:
4296 case ARM::VLD2q32wb_register:
4301 case ARM::VLD3d8_UPD:
4302 case ARM::VLD3d16_UPD:
4303 case ARM::VLD3d32_UPD:
4304 case ARM::VLD1d64Twb_fixed:
4305 case ARM::VLD1d64Twb_register:
4306 case ARM::VLD3q8_UPD:
4307 case ARM::VLD3q16_UPD:
4308 case ARM::VLD3q32_UPD:
4313 case ARM::VLD4d8_UPD:
4314 case ARM::VLD4d16_UPD:
4315 case ARM::VLD4d32_UPD:
4316 case ARM::VLD1d64Qwb_fixed:
4317 case ARM::VLD1d64Qwb_register:
4318 case ARM::VLD4q8_UPD:
4319 case ARM::VLD4q16_UPD:
4320 case ARM::VLD4q32_UPD:
4321 case ARM::VLD1DUPq8:
4322 case ARM::VLD1DUPq16:
4323 case ARM::VLD1DUPq32:
4324 case ARM::VLD1DUPq8wb_fixed:
4325 case ARM::VLD1DUPq16wb_fixed:
4326 case ARM::VLD1DUPq32wb_fixed:
4327 case ARM::VLD1DUPq8wb_register:
4328 case ARM::VLD1DUPq16wb_register:
4329 case ARM::VLD1DUPq32wb_register:
4330 case ARM::VLD2DUPd8:
4331 case ARM::VLD2DUPd16:
4332 case ARM::VLD2DUPd32:
4333 case ARM::VLD2DUPd8wb_fixed:
4334 case ARM::VLD2DUPd16wb_fixed:
4335 case ARM::VLD2DUPd32wb_fixed:
4336 case ARM::VLD2DUPd8wb_register:
4337 case ARM::VLD2DUPd16wb_register:
4338 case ARM::VLD2DUPd32wb_register:
4339 case ARM::VLD4DUPd8:
4340 case ARM::VLD4DUPd16:
4341 case ARM::VLD4DUPd32:
4342 case ARM::VLD4DUPd8_UPD:
4343 case ARM::VLD4DUPd16_UPD:
4344 case ARM::VLD4DUPd32_UPD:
4346 case ARM::VLD1LNd16:
4347 case ARM::VLD1LNd32:
4348 case ARM::VLD1LNd8_UPD:
4349 case ARM::VLD1LNd16_UPD:
4350 case ARM::VLD1LNd32_UPD:
4352 case ARM::VLD2LNd16:
4353 case ARM::VLD2LNd32:
4354 case ARM::VLD2LNq16:
4355 case ARM::VLD2LNq32:
4356 case ARM::VLD2LNd8_UPD:
4357 case ARM::VLD2LNd16_UPD:
4358 case ARM::VLD2LNd32_UPD:
4359 case ARM::VLD2LNq16_UPD:
4360 case ARM::VLD2LNq32_UPD:
4362 case ARM::VLD4LNd16:
4363 case ARM::VLD4LNd32:
4364 case ARM::VLD4LNq16:
4365 case ARM::VLD4LNq32:
4366 case ARM::VLD4LNd8_UPD:
4367 case ARM::VLD4LNd16_UPD:
4368 case ARM::VLD4LNd32_UPD:
4369 case ARM::VLD4LNq16_UPD:
4370 case ARM::VLD4LNq32_UPD:
4384 if (!ItinData || ItinData->
isEmpty())
4385 return std::nullopt;
4391 unsigned DefAdj = 0;
4392 if (
DefMI.isBundle())
4401 unsigned UseAdj = 0;
4402 if (
UseMI.isBundle()) {
4406 return std::nullopt;
4409 return getOperandLatencyImpl(
4410 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4411 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4414std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4416 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4418 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4419 if (Reg == ARM::CPSR) {
4420 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4422 return Subtarget.
isLikeA9() ? 1 : 20;
4426 if (
UseMI.isBranch())
4446 return std::nullopt;
4448 unsigned DefAlign =
DefMI.hasOneMemOperand()
4451 unsigned UseAlign =
UseMI.hasOneMemOperand()
4457 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4460 return std::nullopt;
4463 int Adj = DefAdj + UseAdj;
4467 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4474std::optional<unsigned>
4476 SDNode *DefNode,
unsigned DefIdx,
4477 SDNode *UseNode,
unsigned UseIdx)
const {
4483 if (isZeroCost(DefMCID.
Opcode))
4486 if (!ItinData || ItinData->
isEmpty())
4487 return DefMCID.
mayLoad() ? 3 : 1;
4490 std::optional<unsigned>
Latency =
4493 int Threshold = 1 + Adj;
4498 auto *DefMN = cast<MachineSDNode>(DefNode);
4499 unsigned DefAlign = !DefMN->memoperands_empty()
4500 ? (*DefMN->memoperands_begin())->
getAlign().value()
4502 auto *UseMN = cast<MachineSDNode>(UseNode);
4503 unsigned UseAlign = !UseMN->memoperands_empty()
4504 ? (*UseMN->memoperands_begin())->
getAlign().value()
4507 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4509 return std::nullopt;
4530 case ARM::t2LDRSHs: {
4533 if (ShAmt == 0 || ShAmt == 2)
4548 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4565 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4572 case ARM::VLD1q8wb_register:
4573 case ARM::VLD1q16wb_register:
4574 case ARM::VLD1q32wb_register:
4575 case ARM::VLD1q64wb_register:
4576 case ARM::VLD1q8wb_fixed:
4577 case ARM::VLD1q16wb_fixed:
4578 case ARM::VLD1q32wb_fixed:
4579 case ARM::VLD1q64wb_fixed:
4583 case ARM::VLD2q8Pseudo:
4584 case ARM::VLD2q16Pseudo:
4585 case ARM::VLD2q32Pseudo:
4586 case ARM::VLD2d8wb_fixed:
4587 case ARM::VLD2d16wb_fixed:
4588 case ARM::VLD2d32wb_fixed:
4589 case ARM::VLD2q8PseudoWB_fixed:
4590 case ARM::VLD2q16PseudoWB_fixed:
4591 case ARM::VLD2q32PseudoWB_fixed:
4592 case ARM::VLD2d8wb_register:
4593 case ARM::VLD2d16wb_register:
4594 case ARM::VLD2d32wb_register:
4595 case ARM::VLD2q8PseudoWB_register:
4596 case ARM::VLD2q16PseudoWB_register:
4597 case ARM::VLD2q32PseudoWB_register:
4598 case ARM::VLD3d8Pseudo:
4599 case ARM::VLD3d16Pseudo:
4600 case ARM::VLD3d32Pseudo:
4601 case ARM::VLD1d8TPseudo:
4602 case ARM::VLD1d16TPseudo:
4603 case ARM::VLD1d32TPseudo:
4604 case ARM::VLD1d64TPseudo:
4605 case ARM::VLD1d64TPseudoWB_fixed:
4606 case ARM::VLD1d64TPseudoWB_register:
4607 case ARM::VLD3d8Pseudo_UPD:
4608 case ARM::VLD3d16Pseudo_UPD:
4609 case ARM::VLD3d32Pseudo_UPD:
4610 case ARM::VLD3q8Pseudo_UPD:
4611 case ARM::VLD3q16Pseudo_UPD:
4612 case ARM::VLD3q32Pseudo_UPD:
4613 case ARM::VLD3q8oddPseudo:
4614 case ARM::VLD3q16oddPseudo:
4615 case ARM::VLD3q32oddPseudo:
4616 case ARM::VLD3q8oddPseudo_UPD:
4617 case ARM::VLD3q16oddPseudo_UPD:
4618 case ARM::VLD3q32oddPseudo_UPD:
4619 case ARM::VLD4d8Pseudo:
4620 case ARM::VLD4d16Pseudo:
4621 case ARM::VLD4d32Pseudo:
4622 case ARM::VLD1d8QPseudo:
4623 case ARM::VLD1d16QPseudo:
4624 case ARM::VLD1d32QPseudo:
4625 case ARM::VLD1d64QPseudo:
4626 case ARM::VLD1d64QPseudoWB_fixed:
4627 case ARM::VLD1d64QPseudoWB_register:
4628 case ARM::VLD1q8HighQPseudo:
4629 case ARM::VLD1q8LowQPseudo_UPD:
4630 case ARM::VLD1q8HighTPseudo:
4631 case ARM::VLD1q8LowTPseudo_UPD:
4632 case ARM::VLD1q16HighQPseudo:
4633 case ARM::VLD1q16LowQPseudo_UPD:
4634 case ARM::VLD1q16HighTPseudo:
4635 case ARM::VLD1q16LowTPseudo_UPD:
4636 case ARM::VLD1q32HighQPseudo:
4637 case ARM::VLD1q32LowQPseudo_UPD:
4638 case ARM::VLD1q32HighTPseudo:
4639 case ARM::VLD1q32LowTPseudo_UPD:
4640 case ARM::VLD1q64HighQPseudo:
4641 case ARM::VLD1q64LowQPseudo_UPD:
4642 case ARM::VLD1q64HighTPseudo:
4643 case ARM::VLD1q64LowTPseudo_UPD:
4644 case ARM::VLD4d8Pseudo_UPD:
4645 case ARM::VLD4d16Pseudo_UPD:
4646 case ARM::VLD4d32Pseudo_UPD:
4647 case ARM::VLD4q8Pseudo_UPD:
4648 case ARM::VLD4q16Pseudo_UPD:
4649 case ARM::VLD4q32Pseudo_UPD:
4650 case ARM::VLD4q8oddPseudo:
4651 case ARM::VLD4q16oddPseudo:
4652 case ARM::VLD4q32oddPseudo:
4653 case ARM::VLD4q8oddPseudo_UPD:
4654 case ARM::VLD4q16oddPseudo_UPD:
4655 case ARM::VLD4q32oddPseudo_UPD:
4656 case ARM::VLD1DUPq8:
4657 case ARM::VLD1DUPq16:
4658 case ARM::VLD1DUPq32:
4659 case ARM::VLD1DUPq8wb_fixed:
4660 case ARM::VLD1DUPq16wb_fixed:
4661 case ARM::VLD1DUPq32wb_fixed:
4662 case ARM::VLD1DUPq8wb_register:
4663 case ARM::VLD1DUPq16wb_register:
4664 case ARM::VLD1DUPq32wb_register:
4665 case ARM::VLD2DUPd8:
4666 case ARM::VLD2DUPd16:
4667 case ARM::VLD2DUPd32:
4668 case ARM::VLD2DUPd8wb_fixed:
4669 case ARM::VLD2DUPd16wb_fixed:
4670 case ARM::VLD2DUPd32wb_fixed:
4671 case ARM::VLD2DUPd8wb_register:
4672 case ARM::VLD2DUPd16wb_register:
4673 case ARM::VLD2DUPd32wb_register:
4674 case ARM::VLD2DUPq8EvenPseudo:
4675 case ARM::VLD2DUPq8OddPseudo:
4676 case ARM::VLD2DUPq16EvenPseudo:
4677 case ARM::VLD2DUPq16OddPseudo:
4678 case ARM::VLD2DUPq32EvenPseudo:
4679 case ARM::VLD2DUPq32OddPseudo:
4680 case ARM::VLD3DUPq8EvenPseudo:
4681 case ARM::VLD3DUPq8OddPseudo:
4682 case ARM::VLD3DUPq16EvenPseudo:
4683 case ARM::VLD3DUPq16OddPseudo:
4684 case ARM::VLD3DUPq32EvenPseudo:
4685 case ARM::VLD3DUPq32OddPseudo:
4686 case ARM::VLD4DUPd8Pseudo:
4687 case ARM::VLD4DUPd16Pseudo:
4688 case ARM::VLD4DUPd32Pseudo:
4689 case ARM::VLD4DUPd8Pseudo_UPD:
4690 case ARM::VLD4DUPd16Pseudo_UPD:
4691 case ARM::VLD4DUPd32Pseudo_UPD:
4692 case ARM::VLD4DUPq8EvenPseudo:
4693 case ARM::VLD4DUPq8OddPseudo:
4694 case ARM::VLD4DUPq16EvenPseudo:
4695 case ARM::VLD4DUPq16OddPseudo:
4696 case ARM::VLD4DUPq32EvenPseudo:
4697 case ARM::VLD4DUPq32OddPseudo:
4698 case ARM::VLD1LNq8Pseudo:
4699 case ARM::VLD1LNq16Pseudo:
4700 case ARM::VLD1LNq32Pseudo:
4701 case ARM::VLD1LNq8Pseudo_UPD:
4702 case ARM::VLD1LNq16Pseudo_UPD:
4703 case ARM::VLD1LNq32Pseudo_UPD:
4704 case ARM::VLD2LNd8Pseudo:
4705 case ARM::VLD2LNd16Pseudo:
4706 case ARM::VLD2LNd32Pseudo:
4707 case ARM::VLD2LNq16Pseudo:
4708 case ARM::VLD2LNq32Pseudo:
4709 case ARM::VLD2LNd8Pseudo_UPD:
4710 case ARM::VLD2LNd16Pseudo_UPD:
4711 case ARM::VLD2LNd32Pseudo_UPD:
4712 case ARM::VLD2LNq16Pseudo_UPD:
4713 case ARM::VLD2LNq32Pseudo_UPD:
4714 case ARM::VLD4LNd8Pseudo:
4715 case ARM::VLD4LNd16Pseudo:
4716 case ARM::VLD4LNd32Pseudo:
4717 case ARM::VLD4LNq16Pseudo:
4718 case ARM::VLD4LNq32Pseudo:
4719 case ARM::VLD4LNd8Pseudo_UPD:
4720 case ARM::VLD4LNd16Pseudo_UPD:
4721 case ARM::VLD4LNd32Pseudo_UPD:
4722 case ARM::VLD4LNq16Pseudo_UPD:
4723 case ARM::VLD4LNq32Pseudo_UPD:
4733unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4734 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4744 !Subtarget.cheapPredicableCPSRDef())) {
4754 unsigned *PredCost)
const {
4755 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4761 if (
MI.isBundle()) {
4765 while (++
I != E &&
I->isInsideBundle()) {
4766 if (
I->getOpcode() != ARM::t2IT)
4767 Latency += getInstrLatency(ItinData, *
I, PredCost);
4774 !Subtarget.cheapPredicableCPSRDef()))) {
4782 return MI.mayLoad() ? 3 : 1;
4795 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->
getAlign().value() : 0;
4797 if (Adj >= 0 || (
int)
Latency > -Adj) {
4805 if (!
Node->isMachineOpcode())
4808 if (!ItinData || ItinData->
isEmpty())
4811 unsigned Opcode =
Node->getMachineOpcode();
4821bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4826 unsigned UseIdx)
const {
4829 if (Subtarget.nonpipelinedVFP() &&
4844 unsigned DefIdx)
const {
4846 if (!ItinData || ItinData->
isEmpty())
4851 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4852 std::optional<unsigned> DefCycle =
4854 return DefCycle && DefCycle <= 2U;
4862 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4865 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4867 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4868 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4869 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4873 if (
MI.getOpcode() == ARM::tPUSH ||
4874 MI.getOpcode() == ARM::tPOP ||
4875 MI.getOpcode() == ARM::tPOP_RET) {
4877 if (MO.isImplicit() || !MO.isReg())
4880 if (Reg < ARM::R0 || Reg > ARM::R7) {
4881 if (!(
MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) &&
4882 !(
MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) {
4883 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4889 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4890 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4891 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4892 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4893 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4914 for (
auto Op :
MI.operands()) {
4921 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4931 unsigned LoadImmOpc,
4932 unsigned LoadOpc)
const {
4934 "ROPI/RWPI not currently supported with stack guard");
4942 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4944 "TLS stack protector requires hardware TLS register");
4955 Offset = M.getStackProtectorGuardOffset();
4960 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4970 cast<GlobalValue>((*
MI->memoperands_begin())->getValue());
4979 else if (IsIndirect)
4981 }
else if (IsIndirect) {
4985 if (LoadImmOpc == ARM::tMOVi32imm) {
4988 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
5024 unsigned &AddSubOpc,
5025 bool &NegAcc,
bool &HasLane)
const {
5027 if (
I == MLxEntryMap.
end())
5031 MulOpc = Entry.MulOpc;
5032 AddSubOpc = Entry.AddSubOpc;
5033 NegAcc = Entry.NegAcc;
5034 HasLane = Entry.HasLane;
5058std::pair<uint16_t, uint16_t>
5062 if (Subtarget.hasNEON()) {
5071 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
5072 MI.getOpcode() == ARM::VMOVS))
5079 return std::make_pair(
ExeNEON, 0);
5084 return std::make_pair(
ExeNEON, 0);
5087 return std::make_pair(
ExeVFP, 0);
5093 unsigned SReg,
unsigned &Lane) {
5094 unsigned DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
5097 if (DReg != ARM::NoRegister)
5101 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
5103 assert(DReg &&
"S-register with no D super-register?");
5124 unsigned Lane,
unsigned &ImplicitSReg) {
5127 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
5133 ImplicitSReg =
TRI->getSubReg(DReg,
5134 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
5136 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
5151 unsigned DstReg, SrcReg, DReg;
5155 switch (
MI.getOpcode()) {
5167 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
5170 DstReg =
MI.getOperand(0).getReg();
5171 SrcReg =
MI.getOperand(1).getReg();
5173 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5174 MI.removeOperand(i - 1);
5177 MI.setDesc(
get(ARM::VORRd));
5189 DstReg =
MI.getOperand(0).getReg();
5190 SrcReg =
MI.getOperand(1).getReg();
5192 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5193 MI.removeOperand(i - 1);
5200 MI.setDesc(
get(ARM::VGETLNi32));
5216 DstReg =
MI.getOperand(0).getReg();
5217 SrcReg =
MI.getOperand(1).getReg();
5221 unsigned ImplicitSReg;
5225 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5226 MI.removeOperand(i - 1);
5230 MI.setDesc(
get(ARM::VSETLNi32));
5240 if (ImplicitSReg != 0)
5249 DstReg =
MI.getOperand(0).getReg();
5250 SrcReg =
MI.getOperand(1).getReg();
5252 unsigned DstLane = 0, SrcLane = 0, DDst, DSrc;
5256 unsigned ImplicitSReg;
5260 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5261 MI.removeOperand(i - 1);
5266 MI.setDesc(
get(ARM::VDUPLN32d));
5276 if (ImplicitSReg != 0)
5300 unsigned CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5301 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5304 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5305 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5310 if (SrcLane == DstLane)
5313 MI.setDesc(
get(ARM::VEXTd32));
5318 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5319 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5322 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5323 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5328 if (SrcLane != DstLane)
5334 if (ImplicitSReg != 0)
5361 if (!PartialUpdateClearance)
5372 switch (
MI.getOpcode()) {
5378 case ARM::VMOVv4i16:
5379 case ARM::VMOVv2i32:
5380 case ARM::VMOVv2f32:
5381 case ARM::VMOVv1i64:
5382 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5386 case ARM::VLD1LNd32:
5395 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5399 if (Reg.isVirtual()) {
5401 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5403 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5405 unsigned DReg =
TRI->getMatchingSuperReg(Reg, ARM::ssub_0,
5407 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5413 return PartialUpdateClearance;
5420 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5425 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5426 unsigned DReg = Reg;
5429 if (ARM::SPRRegClass.
contains(Reg)) {
5430 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5431 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5434 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5435 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5448 MI.addRegisterKilled(DReg,
TRI,
true);
5452 return Subtarget.hasFeature(ARM::HasV6KOps);
5456 if (
MI->getNumOperands() < 4)
5458 unsigned ShOpVal =
MI->getOperand(3).getImm();
5462 ((ShImm == 1 || ShImm == 2) &&
5472 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5473 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5475 switch (
MI.getOpcode()) {
5487 MOReg = &
MI.getOperand(2);
5499 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5500 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5502 switch (
MI.getOpcode()) {
5513 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5522 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5523 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5525 switch (
MI.getOpcode()) {
5526 case ARM::VSETLNi32:
5527 case ARM::MVE_VMOV_to_lane_32:
5538 InsertedReg.
Reg = MOInsertedReg.
getReg();
5546std::pair<unsigned, unsigned>
5549 return std::make_pair(TF & Mask, TF & ~Mask);
5554 using namespace ARMII;
5556 static const std::pair<unsigned, const char *> TargetFlags[] = {
5557 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5558 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5559 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5566 using namespace ARMII;
5568 static const std::pair<unsigned, const char *> TargetFlags[] = {
5569 {MO_COFFSTUB,
"arm-coffstub"},
5570 {MO_GOT,
"arm-got"},
5571 {MO_SBREL,
"arm-sbrel"},
5572 {MO_DLLIMPORT,
"arm-dllimport"},
5573 {MO_SECREL,
"arm-secrel"},
5574 {MO_NONLAZY,
"arm-nonlazy"}};
5578std::optional<RegImmPair>
5581 unsigned Opcode =
MI.getOpcode();
5588 return std::nullopt;
5591 if (Opcode == ARM::SUBri)
5593 else if (Opcode != ARM::ADDri)
5594 return std::nullopt;
5599 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5600 return std::nullopt;
5602 Offset =
MI.getOperand(2).getImm() * Sign;
5610 for (
auto I =
From;
I != To; ++
I)
5611 if (
I->modifiesRegister(Reg,
TRI))
5624 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5626 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5632 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5634 Register Reg = CmpMI->getOperand(0).getReg();
5637 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5650 if (Subtarget->isThumb()) {
5652 return ForCodesize ? 2 : 1;
5653 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5656 return ForCodesize ? 4 : 1;
5658 return ForCodesize ? 4 : 2;
5660 return ForCodesize ? 4 : 2;
5662 return ForCodesize ? 4 : 2;
5665 return ForCodesize ? 4 : 1;
5667 return ForCodesize ? 4 : 1;
5668 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5669 return ForCodesize ? 4 : 1;
5671 return ForCodesize ? 8 : 2;
5673 return ForCodesize ? 8 : 2;
5676 return ForCodesize ? 8 : 2;
5677 return ForCodesize ? 8 : 3;
5826 : CallTailCall(target.
isThumb() ? 4 : 4),
5827 FrameTailCall(target.
isThumb() ? 0 : 0),
5828 CallThunk(target.
isThumb() ? 4 : 4),
5829 FrameThunk(target.
isThumb() ? 0 : 0),
5830 CallNoLRSave(target.
isThumb() ? 4 : 4),
5831 FrameNoLRSave(target.
isThumb() ? 2 : 4),
5832 CallRegSave(target.
isThumb() ? 8 : 12),
5833 FrameRegSave(target.
isThumb() ? 2 : 4),
5834 CallDefault(target.
isThumb() ? 8 : 12),
5835 FrameDefault(target.
isThumb() ? 2 : 4),
5836 SaveRestoreLROnStack(target.
isThumb() ? 8 : 8) {}
5849 for (
Register Reg : ARM::rGPRRegClass) {
5850 if (!(Reg < regsReserved.
size() && regsReserved.
test(Reg)) &&
5853 C.isAvailableAcrossAndOutOfSeq(Reg,
TRI) &&
5854 C.isAvailableInsideSeq(Reg,
TRI))
5868 for (;
I != E; ++
I) {
5872 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5876 unsigned Opcode =
MI.getOpcode();
5877 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5878 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5879 Opcode == ARM::tBXNS_RET) {
5885 if (
MI.readsRegister(ARM::LR, &
TRI))
5891std::optional<std::unique_ptr<outliner::OutlinedFunction>>
5894 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
5895 unsigned MinRepeats)
const {
5896 unsigned SequenceSize = 0;
5897 for (
auto &
MI : RepeatedSequenceLocs[0])
5901 unsigned FlagsSetInAll = 0xF;
5906 FlagsSetInAll &=
C.Flags;
5925 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5933 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5936 if (RepeatedSequenceLocs.size() < MinRepeats)
5937 return std::nullopt;
5956 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5957 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5958 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5960 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5962 if (RepeatedSequenceLocs.size() < MinRepeats)
5963 return std::nullopt;
5973 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5974 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5975 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5977 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5979 if (RepeatedSequenceLocs.size() < MinRepeats)
5980 return std::nullopt;
5985 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5988 auto SetCandidateCallInfo =
5989 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5991 C.setCallInfo(CallID, NumBytesForCall);
5996 const auto &SomeMFI =
5999 if (SomeMFI.branchTargetEnforcement()) {
6008 if (SomeMFI.shouldSignReturnAddress(
true)) {
6018 if (RepeatedSequenceLocs[0].back().isTerminator()) {
6022 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
6023 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
6024 LastInstrOpcode == ARM::tBLXr ||
6025 LastInstrOpcode == ARM::tBLXr_noip ||
6026 LastInstrOpcode == ARM::tBLXi) {
6034 unsigned NumBytesNoStackCalls = 0;
6035 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
6040 const auto Last =
C.getMBB()->rbegin();
6041 const bool LRIsAvailable =
6042 C.getMBB()->isReturnBlock() && !
Last->isCall()
6045 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
6046 if (LRIsAvailable) {
6050 CandidatesWithoutStackFixups.push_back(
C);
6055 else if (findRegisterToSaveLRTo(
C)) {
6059 CandidatesWithoutStackFixups.push_back(
C);
6064 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
6067 CandidatesWithoutStackFixups.push_back(
C);
6073 NumBytesNoStackCalls += SequenceSize;
6079 if (NumBytesNoStackCalls <=
6080 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
6081 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
6083 if (RepeatedSequenceLocs.size() < MinRepeats)
6084 return std::nullopt;
6091 if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
6109 return std::make_unique<outliner::OutlinedFunction>(
6110 RepeatedSequenceLocs, SequenceSize, NumBytesToCreateFrame, FrameID);
6113bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
6116 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
6141 unsigned NumOps =
MI->getDesc().getNumOperands();
6142 unsigned ImmIdx = NumOps - 3;
6146 int64_t OffVal =
Offset.getImm();
6152 unsigned NumBits = 0;
6181 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6201 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6202 "Can't encode this offset!");
6203 OffVal +=
Fixup / Scale;
6205 unsigned Mask = (1 << NumBits) - 1;
6207 if (OffVal <= Mask) {
6209 MI->getOperand(ImmIdx).setImm(OffVal);
6217 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6221 const Function &CFn =
C.getMF()->getFunction();
6228 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6236 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6255 unsigned &Flags)
const {
6259 "Suitable Machine Function for outlining must track liveness");
6267 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6268 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6272 if (R12AvailableInBlock && CPSRAvailableInBlock)
6273 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
6280 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6282 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6288 Flags |= MachineOutlinerMBBFlags::HasCalls;
6292 bool LRIsAvailable =
6297 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
6305 unsigned Flags)
const {
6311 unsigned Opc =
MI.getOpcode();
6312 if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR ||
6313 Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR ||
6314 Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB ||
6315 Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic ||
6316 Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel ||
6317 Opc == ARM::t2MOV_ga_pcrel)
6321 if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart ||
6322 Opc == ARM::t2DoLoopStartTP || Opc == ARM::t2WhileLoopStart ||
6323 Opc == ARM::t2WhileLoopStartLR || Opc == ARM::t2WhileLoopStartTP ||
6324 Opc == ARM::t2LoopDec || Opc == ARM::t2LoopEnd ||
6325 Opc == ARM::t2LoopEndDec)
6334 if (
MI.isTerminator())
6340 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6348 if (MOP.isGlobal()) {
6349 Callee = dyn_cast<Function>(MOP.getGlobal());
6357 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6358 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6366 if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX ||
6367 Opc == ARM::BLX_noip || Opc == ARM::tBLXr || Opc == ARM::tBLXr_noip ||
6372 return UnknownCallOutlineType;
6380 return UnknownCallOutlineType;
6388 return UnknownCallOutlineType;
6396 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6400 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6413 bool MightNeedStackFixUp =
6414 (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere |
6415 MachineOutlinerMBBFlags::HasCalls));
6417 if (!MightNeedStackFixUp)
6423 if (
MI.modifiesRegister(ARM::SP,
TRI))
6437 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6438 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6442 if (
MI.isCFIInstruction())
6473 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6488 int64_t StackPosEntry =
6498 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6506 unsigned DwarfRAC =
MRI->getDwarfRegNum(ARM::RA_AUTH_CODE,
true);
6520 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6521 unsigned DwarfReg =
MRI->getDwarfRegNum(Reg,
true);
6532 bool CFI,
bool Auth)
const {
6548 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6552 if (!Subtarget.isThumb())
6563 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6564 int64_t StackPosEntry =
6571 int64_t LRPosEntry =
6578 unsigned DwarfRAC =
MRI->getDwarfRegNum(ARM::RA_AUTH_CODE,
true);
6591void ARMBaseInstrInfo::emitCFIForLRRestoreFromReg(
6595 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6597 int64_t LRPosEntry =
6611 bool isThumb = Subtarget.isThumb();
6612 unsigned FuncOp =
isThumb ? 2 : 0;
6613 unsigned Opc = Call->getOperand(FuncOp).isReg()
6614 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6619 .
add(Call->getOperand(FuncOp));
6620 if (
isThumb && !Call->getOperand(FuncOp).isReg())
6622 Call->eraseFromParent();
6627 return MI.isCall() && !
MI.isReturn();
6635 Et = std::prev(
MBB.
end());
6645 saveLROnStack(
MBB, It,
true, Auth);
6650 "Can only fix up stack references once");
6651 fixupPostOutline(
MBB);
6654 restoreLRFromStack(
MBB, Et,
true, Auth);
6674 fixupPostOutline(
MBB);
6683 bool isThumb = Subtarget.isThumb();
6689 ? Subtarget.
isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6700 Opc =
isThumb ? ARM::tBL : ARM::BL;
6716 Register Reg = findRegisterToSaveLRTo(
C);
6717 assert(Reg != 0 &&
"No callee-saved register available?");
6722 emitCFIForLRSaveToReg(
MBB, It, Reg);
6726 emitCFIForLRRestoreFromReg(
MBB, It);
6746bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(
6780 static int constexpr MAX_STAGES = 30;
6781 static int constexpr LAST_IS_USE = MAX_STAGES;
6782 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6783 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6784 typedef std::map<unsigned, IterNeed> IterNeeds;
6787 const IterNeeds &CIN);
6799 : EndLoop(EndLoop), LoopCount(LoopCount),
6801 TII(MF->getSubtarget().getInstrInfo()) {}
6803 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6805 return MI == EndLoop ||
MI == LoopCount;
6809 if (tooMuchRegisterPressure(SSD, SMS))
6815 std::optional<bool> createTripCountGreaterCondition(
6826 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6831 if (
I.getOpcode() == ARM::t2LoopDec)
6833 assert(LoopDec &&
"Unable to find copied LoopDec");
6839 .
addReg(ARM::NoRegister);
6849 void adjustTripCount(
int TripCountAdjust)
override {}
6851 void disposed()
override {}
6855 const IterNeeds &CIN) {
6857 for (
const auto &
N : CIN) {
6858 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6859 for (
int I = 0;
I < Cnt; ++
I)
6864 for (
const auto &
N : CIN) {
6865 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6866 for (
int I = 0;
I < Cnt; ++
I)
6874 IterNeeds CrossIterationNeeds;
6879 for (
auto &SU : SSD.
SUnits) {
6882 for (
auto &S : SU.Succs)
6885 if (
Reg.isVirtual())
6886 CrossIterationNeeds[
Reg.id()].set(0);
6887 }
else if (S.isAssignedRegDep()) {
6889 if (OStg >= 0 && OStg != Stg) {
6891 if (
Reg.isVirtual())
6892 CrossIterationNeeds[
Reg.id()] |= ((1 << (OStg - Stg)) - 1);
6901 std::vector<SUnit *> ProposedSchedule;
6905 std::deque<SUnit *> Instrs =
6907 std::sort(Instrs.begin(), Instrs.end(),
6908 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6909 for (
SUnit *SU : Instrs)
6910 ProposedSchedule.push_back(SU);
6916 for (
auto *SU : ProposedSchedule)
6920 if (!MO.isReg() || !MO.getReg())
6923 auto CIter = CrossIterationNeeds.find(
Reg.id());
6924 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6925 CIter->second[SEEN_AS_LIVE])
6927 if (MO.isDef() && !MO.isDead())
6928 CIter->second.set(SEEN_AS_LIVE);
6929 else if (MO.isUse())
6930 CIter->second.set(LAST_IS_USE);
6932 for (
auto &CI : CrossIterationNeeds)
6933 CI.second.reset(LAST_IS_USE);
6939 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6942 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6944 for (
auto *SU : ProposedSchedule) {
6946 RPTracker.setPos(std::next(CurInstI));
6952 if (!MO.isReg() || !MO.getReg())
6955 if (MO.isDef() && !MO.isDead()) {
6956 auto CIter = CrossIterationNeeds.find(
Reg.id());
6957 if (CIter != CrossIterationNeeds.end()) {
6958 CIter->second.reset(0);
6959 CIter->second.reset(SEEN_AS_LIVE);
6963 for (
auto &S : SU->Preds) {
6965 if (S.isAssignedRegDep()) {
6967 auto CIter = CrossIterationNeeds.find(
Reg.id());
6968 if (CIter != CrossIterationNeeds.end()) {
6970 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6971 if (Stg - Stg2 < MAX_STAGES)
6972 CIter->second.set(Stg - Stg2);
6973 CIter->second.set(SEEN_AS_LIVE);
6978 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6981 auto &
P = RPTracker.getPressure().MaxSetPressure;
6982 for (
unsigned I = 0, E =
P.size();
I < E; ++
I) {
6984 if (
I == ARM::DQuad_with_ssub_0 ||
I == ARM::DTripleSpc_with_ssub_0 ||
6985 I == ARM::DTriple_with_qsub_0_in_QPR)
6997std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
7001 if (Preheader == LoopBB)
7002 Preheader = *std::next(LoopBB->
pred_begin());
7004 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
7010 for (
auto &L : LoopBB->
instrs()) {
7017 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
7031 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
7032 for (
auto &L : LoopBB->
instrs())
7037 Register LoopDecResult =
I->getOperand(0).getReg();
7040 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
7043 for (
auto &J : Preheader->
instrs())
7044 if (J.getOpcode() == ARM::t2DoLoopStart)
7048 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
static bool isLoad(int Opcode)
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, unsigned DReg, unsigned Lane, unsigned &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static cl::opt< bool > EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, cl::desc("Enable ARM 2-addr to 3-addr conv"))
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static unsigned getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is Live
This file defines the DenseMap class.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, unsigned State, const TargetRegisterInfo *TRI) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
virtual unsigned getUnindexedOpcode(unsigned Opc) const =0
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
virtual const ARMBaseRegisterInfo & getRegisterInfo() const =0
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
ARMBaseInstrInfo(const ARMSubtarget &STI)
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool expandPostRAPseudo(MachineInstr &MI) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, unsigned DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
ARMConstantPoolConstant - ARM-specific constant pool values for Constants, Functions,...
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
ARMConstantPoolMBB - ARM-specific constantpool value of a machine basic block.
ARMConstantPoolSymbol - ARM-specific constantpool values for external symbols.
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
bool isTargetMachO() const
ARMLdStMultipleTiming getLdStMultipleTiming() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
bool isReadTPSoft() const
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
unsigned getMispredictionPenalty() const
const ARMBaseRegisterInfo * getRegisterInfo() const override
unsigned getReturnOpcode() const
Returns the correct return opcode for the current feature set.
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
bool isTargetCOFF() const
unsigned getPartialUpdateClearance() const
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
int getPreISelOperandLatencyAdjustment() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
A possibly irreducible generalization of a Loop.
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
void addVirtualRegisterDead(Register IncomingReg, MachineInstr &MI, bool AddIfNotFound=false)
addVirtualRegisterDead - Add information about the fact that the specified register is dead after bei...
void addVirtualRegisterKilled(Register IncomingReg, MachineInstr &MI, bool AddIfNotFound=false)
addVirtualRegisterKilled - Add information about the fact that the specified register is killed after...
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
This class is intended to be used as a base class for asm properties and features specific to the tar...
static MCCFIInstruction createUndefined(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_undefined From now on the previous value of Register can't be restored anymore.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createRegister(MCSymbol *L, unsigned Register1, unsigned Register2, SMLoc Loc={})
.cfi_register Previous value of Register1 is saved in register Register2.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isCall() const
Return true if the instruction is a call.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
unsigned getOpcode() const
Return the opcode number for this descriptor.
bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
instr_iterator instr_end()
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
This class is a data container for one entry in a MachineConstantPool.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
union llvm::MachineConstantPoolEntry::@204 Val
The constant itself.
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
unsigned addFrameInst(const MCCFIInstruction &Inst)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool tracksLiveness() const
tracksLiveness - Returns true when tracking register liveness accurately.
const TargetRegisterInfo * getTargetRegisterInfo() const
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
void increaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
void decreaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCodes getOppositeCondition(CondCodes CC)
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
@ C
The default llvm calling convention, compatible with C.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
unsigned getBLXpredOpcode(const MachineFunction &MF)
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
static bool isIndirectBranchOpcode(int Opc)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
MaybeAlign getAlign(const Function &F, unsigned Index)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool isPopOpcode(int Opc)
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
unsigned getUndefRegState(bool B)
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
unsigned getKillRegState(bool B)
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
static bool isCalleeSavedRegister(unsigned Reg, const MCPhysReg *CSRegs)
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
VarInfo - This represents the regions where a virtual register is live in the program.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.
unsigned FrameConstructionID
Target-defined identifier for constructing a frame for this function.
std::vector< Candidate > Candidates