73#define DEBUG_TYPE "arm-instrinfo"
75#define GET_INSTRINFO_CTOR_DTOR
76#include "ARMGenInstrInfo.inc"
80 cl::desc(
"Enable ARM 2-addr to 3-addr conv"));
94 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
95 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
96 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
97 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
98 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
99 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
100 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
101 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
104 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
105 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
106 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
107 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
108 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
109 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
110 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
111 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
117 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
130 if (usePreRAHazardRecognizer()) {
132 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
152 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
189 default:
return nullptr;
215 unsigned OffImm =
MI.getOperand(NumOps - 2).getImm();
228 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
233 }
else if (Amt != 0) {
237 get(isSub ? ARM::SUBrsi : ARM::ADDrsi), WBReg)
246 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
259 get(isSub ? ARM::SUBri : ARM::ADDri), WBReg)
266 get(isSub ? ARM::SUBrr : ARM::ADDrr), WBReg)
275 std::vector<MachineInstr*> NewMIs;
279 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
290 NewMIs.push_back(MemMI);
291 NewMIs.push_back(UpdateMI);
295 BuildMI(MF,
MI.getDebugLoc(),
get(MemOpc),
MI.getOperand(0).getReg())
308 NewMIs.push_back(UpdateMI);
309 NewMIs.push_back(MemMI);
315 if (MO.isReg() && MO.getReg().isVirtual()) {
320 MachineInstr *NewMI = (Reg == WBReg) ? UpdateMI : MemMI;
324 if (MO.isUse() && MO.isKill()) {
325 for (
unsigned j = 0; j < 2; ++j) {
331 if (VI.removeKill(
MI))
332 VI.Kills.push_back(NewMI);
358 bool AllowModify)
const {
373 bool CantAnalyze =
false;
377 while (
I->isDebugInstr() || !
I->isTerminator() ||
379 I->getOpcode() == ARM::t2DoLoopStartTP){
391 TBB =
I->getOperand(0).getMBB();
397 assert(!FBB &&
"FBB should have been null.");
399 TBB =
I->getOperand(0).getMBB();
400 Cond.push_back(
I->getOperand(1));
401 Cond.push_back(
I->getOperand(2));
402 }
else if (
I->isReturn()) {
405 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
412 TBB =
I->getOperand(1).getMBB();
414 Cond.push_back(
I->getOperand(0));
471 int *BytesRemoved)
const {
472 assert(!BytesRemoved &&
"code size not handled");
483 I->eraseFromParent();
493 I->eraseFromParent();
502 int *BytesAdded)
const {
503 assert(!BytesAdded &&
"code size not handled");
512 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
514 "ARM branch conditions have two or three components!");
524 }
else if (
Cond.size() == 2) {
535 if (
Cond.size() == 2)
540 else if (
Cond.size() == 3)
551 if (
Cond.size() == 2) {
563 while (++
I != E &&
I->isInsideBundle()) {
564 int PIdx =
I->findFirstPredOperandIdx();
565 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
571 int PIdx =
MI.findFirstPredOperandIdx();
572 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
580 std::string GenericComment =
582 if (!GenericComment.empty())
583 return GenericComment;
587 return std::string();
591 int FirstPredOp =
MI.findFirstPredOperandIdx();
592 if (FirstPredOp != (
int) OpIdx)
593 return std::string();
595 std::string
CC =
"CC::";
602 unsigned Opc =
MI.getOpcode();
611 int PIdx =
MI.findFirstPredOperandIdx();
614 PMO.
setImm(Pred[0].getImm());
615 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
622 "CPSR def isn't expected operand");
623 assert((
MI.getOperand(1).isDead() ||
624 MI.getOperand(1).getReg() != ARM::CPSR) &&
625 "if conversion tried to stop defining used CPSR");
626 MI.getOperand(1).setReg(ARM::NoRegister);
636 if (Pred1.
size() > 2 || Pred2.
size() > 2)
661 std::vector<MachineOperand> &Pred,
662 bool SkipDead)
const {
665 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
666 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
667 if (ClobbersCPSR || IsCPSR) {
685 for (
const auto &MO :
MI.operands())
686 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
692 switch (
MI->getOpcode()) {
693 default:
return true;
724 if (!
MI.isPredicable())
762 if (!MO.isReg() || MO.isUndef() || MO.isUse())
764 if (MO.getReg() != ARM::CPSR)
784 switch (
MI.getOpcode()) {
792 case TargetOpcode::BUNDLE:
793 return getInstBundleLength(
MI);
794 case ARM::CONSTPOOL_ENTRY:
795 case ARM::JUMPTABLE_INSTS:
796 case ARM::JUMPTABLE_ADDRS:
797 case ARM::JUMPTABLE_TBB:
798 case ARM::JUMPTABLE_TBH:
801 return MI.getOperand(2).getImm();
803 return MI.getOperand(1).getImm();
805 case ARM::INLINEASM_BR: {
807 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
815unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
819 while (++
I != E &&
I->isInsideBundle()) {
820 assert(!
I->isBundle() &&
"No nested bundle!");
830 unsigned Opc = Subtarget.isThumb()
831 ? (Subtarget.
isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
850 unsigned Opc = Subtarget.isThumb()
851 ? (Subtarget.
isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
885 unsigned Cond,
unsigned Inactive) {
895 bool RenamableSrc)
const {
896 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
897 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
899 if (GPRDest && GPRSrc) {
907 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
908 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
911 if (SPRDest && SPRSrc)
913 else if (GPRDest && SPRSrc)
915 else if (SPRDest && GPRSrc)
917 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
919 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
920 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
925 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR)
927 if (Opc == ARM::MVE_VORR)
929 else if (Opc != ARM::MQPRCopy)
935 unsigned BeginIdx = 0;
936 unsigned SubRegs = 0;
940 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
941 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
942 BeginIdx = ARM::qsub_0;
944 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
945 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
946 BeginIdx = ARM::qsub_0;
949 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
951 BeginIdx = ARM::dsub_0;
953 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
955 BeginIdx = ARM::dsub_0;
957 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
959 BeginIdx = ARM::dsub_0;
961 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
962 Opc = Subtarget.
isThumb2() ? ARM::tMOVr : ARM::MOVr;
963 BeginIdx = ARM::gsub_0;
965 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
967 BeginIdx = ARM::dsub_0;
970 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
972 BeginIdx = ARM::dsub_0;
975 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
977 BeginIdx = ARM::dsub_0;
980 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
981 !Subtarget.hasFP64()) {
983 BeginIdx = ARM::ssub_0;
985 }
else if (SrcReg == ARM::CPSR) {
988 }
else if (DestReg == ARM::CPSR) {
991 }
else if (DestReg == ARM::VPR) {
997 }
else if (SrcReg == ARM::VPR) {
1003 }
else if (DestReg == ARM::FPSCR_NZCV) {
1005 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
1009 }
else if (SrcReg == ARM::FPSCR_NZCV) {
1011 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
1017 assert(Opc &&
"Impossible reg-to-reg copy");
1023 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
1024 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
1030 for (
unsigned i = 0; i != SubRegs; ++i) {
1031 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
1032 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
1033 assert(Dst && Src &&
"Bad sub-register");
1035 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
1040 if (Opc == ARM::VORRq || Opc == ARM::MVE_VORR) {
1044 if (Opc == ARM::MVE_VORR)
1049 if (Opc == ARM::MOVr)
1058std::optional<DestSourcePair>
1067 if (!
MI.isMoveReg() ||
1068 (
MI.getOpcode() == ARM::VORRq &&
1069 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
1070 return std::nullopt;
1074std::optional<ParamLoadedValue>
1078 Register DstReg = DstSrcPair->Destination->getReg();
1099 return std::nullopt;
1106 unsigned SubIdx,
unsigned State,
1109 return MIB.
addReg(Reg, State);
1112 return MIB.
addReg(
TRI->getSubReg(Reg, SubIdx), State);
1113 return MIB.
addReg(Reg, State, SubIdx);
1118 Register SrcReg,
bool isKill,
int FI,
1131 switch (
TRI->getSpillSize(*RC)) {
1133 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1144 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1151 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1158 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1165 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1176 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1183 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1184 if (Subtarget.hasV5TEOps()) {
1204 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1220 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1221 Subtarget.hasMVEIntegerOps()) {
1226 .addMemOperand(MMO);
1232 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1235 Subtarget.hasNEON()) {
1249 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1256 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1257 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1258 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1260 Subtarget.hasNEON()) {
1269 }
else if (Subtarget.hasMVEIntegerOps()) {
1281 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1282 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1289 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1290 Subtarget.hasMVEIntegerOps()) {
1295 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1301 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, 0,
TRI);
1302 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, 0,
TRI);
1303 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, 0,
TRI);
1304 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, 0,
TRI);
1305 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, 0,
TRI);
1306 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, 0,
TRI);
1317 int &FrameIndex)
const {
1318 switch (
MI.getOpcode()) {
1322 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1323 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1324 MI.getOperand(3).getImm() == 0) {
1325 FrameIndex =
MI.getOperand(1).getIndex();
1326 return MI.getOperand(0).getReg();
1335 case ARM::VSTR_P0_off:
1336 case ARM::VSTR_FPSCR_NZCVQC_off:
1337 case ARM::MVE_VSTRWU32:
1338 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1339 MI.getOperand(2).getImm() == 0) {
1340 FrameIndex =
MI.getOperand(1).getIndex();
1341 return MI.getOperand(0).getReg();
1345 case ARM::VST1d64TPseudo:
1346 case ARM::VST1d64QPseudo:
1347 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1348 FrameIndex =
MI.getOperand(0).getIndex();
1349 return MI.getOperand(2).getReg();
1353 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1354 FrameIndex =
MI.getOperand(1).getIndex();
1355 return MI.getOperand(0).getReg();
1358 case ARM::MQQPRStore:
1359 case ARM::MQQQQPRStore:
1360 if (
MI.getOperand(1).isFI()) {
1361 FrameIndex =
MI.getOperand(1).getIndex();
1362 return MI.getOperand(0).getReg();
1371 int &FrameIndex)
const {
1373 if (
MI.mayStore() && hasStoreToStackSlot(
MI, Accesses) &&
1374 Accesses.
size() == 1) {
1376 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1396 switch (
TRI->getSpillSize(*RC)) {
1398 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1408 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1414 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1420 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1426 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1436 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1442 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1445 if (Subtarget.hasV5TEOps()) {
1468 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1481 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1482 Subtarget.hasMVEIntegerOps()) {
1484 MIB.addFrameIndex(FI)
1486 .addMemOperand(MMO);
1492 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1494 Subtarget.hasNEON()) {
1515 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1516 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1517 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1519 Subtarget.hasNEON()) {
1525 }
else if (Subtarget.hasMVEIntegerOps()) {
1545 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1546 Subtarget.hasMVEIntegerOps()) {
1550 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1574 int &FrameIndex)
const {
1575 switch (
MI.getOpcode()) {
1579 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1580 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1581 MI.getOperand(3).getImm() == 0) {
1582 FrameIndex =
MI.getOperand(1).getIndex();
1583 return MI.getOperand(0).getReg();
1592 case ARM::VLDR_P0_off:
1593 case ARM::VLDR_FPSCR_NZCVQC_off:
1594 case ARM::MVE_VLDRWU32:
1595 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1596 MI.getOperand(2).getImm() == 0) {
1597 FrameIndex =
MI.getOperand(1).getIndex();
1598 return MI.getOperand(0).getReg();
1602 case ARM::VLD1d8TPseudo:
1603 case ARM::VLD1d16TPseudo:
1604 case ARM::VLD1d32TPseudo:
1605 case ARM::VLD1d64TPseudo:
1606 case ARM::VLD1d8QPseudo:
1607 case ARM::VLD1d16QPseudo:
1608 case ARM::VLD1d32QPseudo:
1609 case ARM::VLD1d64QPseudo:
1610 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1611 FrameIndex =
MI.getOperand(1).getIndex();
1612 return MI.getOperand(0).getReg();
1616 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1617 FrameIndex =
MI.getOperand(1).getIndex();
1618 return MI.getOperand(0).getReg();
1621 case ARM::MQQPRLoad:
1622 case ARM::MQQQQPRLoad:
1623 if (
MI.getOperand(1).isFI()) {
1624 FrameIndex =
MI.getOperand(1).getIndex();
1625 return MI.getOperand(0).getReg();
1634 int &FrameIndex)
const {
1636 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI, Accesses) &&
1637 Accesses.
size() == 1) {
1639 cast<FixedStackPseudoSourceValue>(Accesses.
front()->getPseudoValue())
1650 bool isThumb2 = Subtarget.
isThumb2();
1657 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1659 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1660 : isThumb1 ? ARM::tLDMIA_UPD
1664 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1667 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1669 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1670 : isThumb1 ? ARM::tSTMIA_UPD
1674 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1689 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1690 return TRI.getEncodingValue(Reg1) <
1691 TRI.getEncodingValue(Reg2);
1694 for (
const auto &Reg : ScratchRegs) {
1703 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1704 expandLoadStackGuard(
MI);
1705 MI.getParent()->erase(
MI);
1709 if (
MI.getOpcode() == ARM::MEMCPY) {
1718 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1723 Register DstRegS =
MI.getOperand(0).getReg();
1724 Register SrcRegS =
MI.getOperand(1).getReg();
1725 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1730 TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, &ARM::DPRRegClass);
1732 TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, &ARM::DPRRegClass);
1733 if (!DstRegD || !SrcRegD)
1739 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1743 if (
MI.getOperand(0).isDead())
1752 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1753 if (ImpDefIdx != -1)
1754 MI.removeOperand(ImpDefIdx);
1757 MI.setDesc(
get(ARM::VMOVD));
1758 MI.getOperand(0).setReg(DstRegD);
1759 MI.getOperand(1).setReg(SrcRegD);
1766 MI.getOperand(1).setIsUndef();
1771 if (
MI.getOperand(1).isKill()) {
1772 MI.getOperand(1).setIsKill(
false);
1773 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1787 assert(MCPE.isMachineConstantPoolEntry() &&
1788 "Expecting a machine constantpool entry!");
1802 cast<ARMConstantPoolConstant>(ACPV)->getGV(), PCLabelId,
ARMCP::CPValue,
1807 cast<ARMConstantPoolSymbol>(ACPV)->getSymbol(), PCLabelId, 4);
1810 Create(cast<ARMConstantPoolConstant>(ACPV)->getBlockAddress(), PCLabelId,
1818 cast<ARMConstantPoolMBB>(ACPV)->getMBB(), PCLabelId, 4);
1838 case ARM::tLDRpci_pic:
1839 case ARM::t2LDRpci_pic: {
1859 switch (
I->getOpcode()) {
1860 case ARM::tLDRpci_pic:
1861 case ARM::t2LDRpci_pic: {
1863 unsigned CPI =
I->getOperand(1).getIndex();
1865 I->getOperand(1).setIndex(CPI);
1866 I->getOperand(2).setImm(PCLabelId);
1870 if (!
I->isBundledWithSucc())
1881 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1882 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1883 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1884 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1885 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1886 Opcode == ARM::t2MOV_ga_pcrel) {
1897 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1898 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1899 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1900 Opcode == ARM::t2MOV_ga_pcrel)
1912 if (isARMCP0 && isARMCP1) {
1918 }
else if (!isARMCP0 && !isARMCP1) {
1922 }
else if (Opcode == ARM::PICLDR) {
1930 if (Addr0 != Addr1) {
1966 int64_t &Offset2)
const {
1973 auto IsLoadOpcode = [&](
unsigned Opcode) {
1988 case ARM::t2LDRSHi8:
1990 case ARM::t2LDRBi12:
1991 case ARM::t2LDRSHi12:
2010 if (isa<ConstantSDNode>(Load1->
getOperand(1)) &&
2012 Offset1 = cast<ConstantSDNode>(Load1->
getOperand(1))->getSExtValue();
2013 Offset2 = cast<ConstantSDNode>(Load2->
getOperand(1))->getSExtValue();
2032 int64_t Offset1, int64_t Offset2,
2033 unsigned NumLoads)
const {
2037 assert(Offset2 > Offset1);
2039 if ((Offset2 - Offset1) / 8 > 64)
2070 if (
MI.isDebugInstr())
2074 if (
MI.isTerminator() ||
MI.isPosition())
2078 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
2092 while (++
I !=
MBB->
end() &&
I->isDebugInstr())
2094 if (
I !=
MBB->
end() &&
I->getOpcode() == ARM::t2IT)
2105 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
2113 unsigned NumCycles,
unsigned ExtraPredCycles,
2123 if (!Pred->
empty()) {
2125 if (LastMI->
getOpcode() == ARM::t2Bcc) {
2134 MBB, 0, 0, Probability);
2139 unsigned TCycles,
unsigned TExtra,
2141 unsigned FCycles,
unsigned FExtra,
2158 const unsigned ScalingUpFactor = 1024;
2160 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
2161 unsigned UnpredCost;
2162 if (!Subtarget.hasBranchPredictor()) {
2165 unsigned NotTakenBranchCost = 1;
2167 unsigned TUnpredCycles, FUnpredCycles;
2170 TUnpredCycles = TCycles + NotTakenBranchCost;
2171 FUnpredCycles = TakenBranchCost;
2174 TUnpredCycles = TCycles + TakenBranchCost;
2175 FUnpredCycles = FCycles + NotTakenBranchCost;
2178 PredCost -= 1 * ScalingUpFactor;
2181 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2182 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2183 UnpredCost = TUnpredCost + FUnpredCost;
2186 if (Subtarget.
isThumb2() && TCycles + FCycles > 4) {
2187 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2190 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2191 unsigned FUnpredCost =
2193 UnpredCost = TUnpredCost + FUnpredCost;
2194 UnpredCost += 1 * ScalingUpFactor;
2198 return PredCost <= UnpredCost;
2203 unsigned NumInsts)
const {
2211 unsigned MaxInsts = Subtarget.
restrictIT() ? 1 : 4;
2220 if (
MI.getOpcode() == ARM::t2Bcc &&
2243 return Subtarget.isProfitableToUnpredicate();
2251 int PIdx =
MI.findFirstPredOperandIdx();
2257 PredReg =
MI.getOperand(PIdx+1).getReg();
2266 if (Opc == ARM::t2B)
2275 unsigned OpIdx2)
const {
2276 switch (
MI.getOpcode()) {
2278 case ARM::t2MOVCCr: {
2303 if (!Reg.isVirtual())
2305 if (!
MRI.hasOneNonDBGUse(Reg))
2317 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2324 if (MO.getReg().isPhysical())
2326 if (MO.isDef() && !MO.isDead())
2329 bool DontMoveAcrossStores =
true;
2330 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2337 unsigned &TrueOp,
unsigned &FalseOp,
2338 bool &Optimizable)
const {
2339 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2340 "Unknown select instruction");
2349 Cond.push_back(
MI.getOperand(3));
2350 Cond.push_back(
MI.getOperand(4));
2359 bool PreferFalse)
const {
2360 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2361 "Unknown select instruction");
2364 bool Invert = !
DefMI;
2366 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(),
MRI,
this);
2373 Register DestReg =
MI.getOperand(0).getReg();
2376 if (!
MRI.constrainRegClass(DestReg, FalseClass))
2378 if (!
MRI.constrainRegClass(DestReg, TrueClass))
2389 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2392 unsigned CondCode =
MI.getOperand(3).getImm();
2397 NewMI.
add(
MI.getOperand(4));
2408 NewMI.
add(FalseReg);
2439 {ARM::ADDSri, ARM::ADDri},
2440 {ARM::ADDSrr, ARM::ADDrr},
2441 {ARM::ADDSrsi, ARM::ADDrsi},
2442 {ARM::ADDSrsr, ARM::ADDrsr},
2444 {ARM::SUBSri, ARM::SUBri},
2445 {ARM::SUBSrr, ARM::SUBrr},
2446 {ARM::SUBSrsi, ARM::SUBrsi},
2447 {ARM::SUBSrsr, ARM::SUBrsr},
2449 {ARM::RSBSri, ARM::RSBri},
2450 {ARM::RSBSrsi, ARM::RSBrsi},
2451 {ARM::RSBSrsr, ARM::RSBrsr},
2453 {ARM::tADDSi3, ARM::tADDi3},
2454 {ARM::tADDSi8, ARM::tADDi8},
2455 {ARM::tADDSrr, ARM::tADDrr},
2456 {ARM::tADCS, ARM::tADC},
2458 {ARM::tSUBSi3, ARM::tSUBi3},
2459 {ARM::tSUBSi8, ARM::tSUBi8},
2460 {ARM::tSUBSrr, ARM::tSUBrr},
2461 {ARM::tSBCS, ARM::tSBC},
2462 {ARM::tRSBS, ARM::tRSB},
2463 {ARM::tLSLSri, ARM::tLSLri},
2465 {ARM::t2ADDSri, ARM::t2ADDri},
2466 {ARM::t2ADDSrr, ARM::t2ADDrr},
2467 {ARM::t2ADDSrs, ARM::t2ADDrs},
2469 {ARM::t2SUBSri, ARM::t2SUBri},
2470 {ARM::t2SUBSrr, ARM::t2SUBrr},
2471 {ARM::t2SUBSrs, ARM::t2SUBrs},
2473 {ARM::t2RSBSri, ARM::t2RSBri},
2474 {ARM::t2RSBSrs, ARM::t2RSBrs},
2479 if (OldOpc == Entry.PseudoOpc)
2480 return Entry.MachineOpc;
2491 if (NumBytes == 0 && DestReg != BaseReg) {
2500 bool isSub = NumBytes < 0;
2501 if (isSub) NumBytes = -NumBytes;
2505 unsigned ThisVal = NumBytes & llvm::rotr<uint32_t>(0xFF, RotAmt);
2506 assert(ThisVal &&
"Didn't extract field correctly");
2509 NumBytes &= ~ThisVal;
2514 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2527 unsigned NumBytes) {
2538 if (!IsPush && !IsPop)
2541 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2542 MI->getOpcode() == ARM::VLDMDIA_UPD;
2543 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2544 MI->getOpcode() == ARM::tPOP ||
2545 MI->getOpcode() == ARM::tPOP_RET;
2547 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2548 MI->getOperand(1).getReg() == ARM::SP)) &&
2549 "trying to fold sp update into non-sp-updating push/pop");
2554 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2559 int RegListIdx = IsT1PushPop ? 2 : 4;
2562 unsigned RegsNeeded;
2565 RegsNeeded = NumBytes / 8;
2566 RegClass = &ARM::DPRRegClass;
2568 RegsNeeded = NumBytes / 4;
2569 RegClass = &ARM::GPRRegClass;
2579 unsigned FirstRegEnc = -1;
2582 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2587 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2588 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2591 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2594 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2597 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2604 false,
false,
true));
2614 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2636 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2637 MI->removeOperand(i);
2650 unsigned Opcode =
MI.getOpcode();
2656 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2659 if (Opcode == ARM::ADDri) {
2660 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2663 MI.setDesc(
TII.get(ARM::MOVr));
2664 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2665 MI.removeOperand(FrameRegIdx+1);
2671 MI.setDesc(
TII.get(ARM::SUBri));
2677 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2678 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2686 unsigned ThisImmVal =
Offset & llvm::rotr<uint32_t>(0xFF, RotAmt);
2693 "Bit extraction didn't work?");
2694 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2696 unsigned ImmIdx = 0;
2698 unsigned NumBits = 0;
2702 ImmIdx = FrameRegIdx + 1;
2703 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2707 ImmIdx = FrameRegIdx+2;
2714 ImmIdx = FrameRegIdx+2;
2725 ImmIdx = FrameRegIdx+1;
2733 ImmIdx = FrameRegIdx+1;
2743 ImmIdx = FrameRegIdx+1;
2744 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2753 Offset += InstrOffs * Scale;
2754 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2764 int ImmedOffset =
Offset / Scale;
2765 unsigned Mask = (1 << NumBits) - 1;
2766 if ((
unsigned)
Offset <= Mask * Scale) {
2768 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2774 ImmedOffset = -ImmedOffset;
2776 ImmedOffset |= 1 << NumBits;
2784 ImmedOffset = ImmedOffset & Mask;
2787 ImmedOffset = -ImmedOffset;
2789 ImmedOffset |= 1 << NumBits;
2805 Register &SrcReg2, int64_t &CmpMask,
2806 int64_t &CmpValue)
const {
2807 switch (
MI.getOpcode()) {
2812 SrcReg =
MI.getOperand(0).getReg();
2815 CmpValue =
MI.getOperand(1).getImm();
2820 SrcReg =
MI.getOperand(0).getReg();
2821 SrcReg2 =
MI.getOperand(1).getReg();
2827 SrcReg =
MI.getOperand(0).getReg();
2829 CmpMask =
MI.getOperand(1).getImm();
2842 int CmpMask,
bool CommonUse) {
2843 switch (
MI->getOpcode()) {
2846 if (CmpMask !=
MI->getOperand(2).getImm())
2848 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2938 switch (
MI->getOpcode()) {
2939 default:
return false;
3035 if (!
MI)
return false;
3038 if (CmpMask != ~0) {
3042 UI =
MRI->use_instr_begin(SrcReg), UE =
MRI->use_instr_end();
3044 if (UI->getParent() != CmpInstr.
getParent())
3053 if (!
MI)
return false;
3062 if (
I ==
B)
return false;
3073 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
3078 if (CmpInstr.
getOpcode() == ARM::CMPri ||
3086 bool IsThumb1 =
false;
3103 if (
MI && IsThumb1) {
3105 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
3106 bool CanReorder =
true;
3107 for (;
I != E; --
I) {
3108 if (
I->getOpcode() != ARM::tMOVi8) {
3114 MI =
MI->removeFromParent();
3125 bool SubAddIsThumb1 =
false;
3140 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
3141 Instr.readsRegister(ARM::CPSR,
TRI))
3163 IsThumb1 = SubAddIsThumb1;
3178 bool isSafe =
false;
3181 while (!isSafe && ++
I != E) {
3183 for (
unsigned IO = 0, EO = Instr.getNumOperands();
3184 !isSafe && IO != EO; ++IO) {
3198 bool IsInstrVSel =
true;
3199 switch (Instr.getOpcode()) {
3201 IsInstrVSel =
false;
3235 bool IsSub = Opc == ARM::SUBrr || Opc == ARM::t2SUBrr ||
3236 Opc == ARM::SUBri || Opc == ARM::t2SUBri ||
3237 Opc == ARM::tSUBrr || Opc == ARM::tSUBi3 ||
3239 unsigned OpI = Opc != ARM::tSUBrr ? 1 : 2;
3251 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3285 if (Succ->isLiveIn(ARM::CPSR))
3292 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3293 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3294 MI->getOperand(CPSRRegNum).setIsDef(
true);
3302 for (
unsigned i = 0, e = OperandsToUpdate.
size(); i < e; i++)
3303 OperandsToUpdate[i].first->setImm(OperandsToUpdate[i].second);
3305 MI->clearRegisterDeads(ARM::CPSR);
3319 int64_t CmpMask, CmpValue;
3321 if (Next !=
MI.getParent()->end() &&
3332 unsigned DefOpc =
DefMI.getOpcode();
3333 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3334 DefOpc != ARM::tMOVi32imm)
3336 if (!
DefMI.getOperand(1).isImm())
3340 if (!
MRI->hasOneNonDBGUse(Reg))
3356 if (
UseMI.getOperand(NumOps - 1).
getReg() == ARM::CPSR)
3362 unsigned UseOpc =
UseMI.getOpcode();
3363 unsigned NewUseOpc = 0;
3365 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3366 bool Commute =
false;
3368 default:
return false;
3376 case ARM::t2EORrr: {
3382 if (UseOpc == ARM::SUBrr && Commute)
3388 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3391 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3405 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3406 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3410 case ARM::t2SUBrr: {
3411 if (UseOpc == ARM::t2SUBrr && Commute)
3416 const bool ToSP =
DefMI.getOperand(0).
getReg() == ARM::SP;
3417 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3418 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3420 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3423 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3438 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3439 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3446 unsigned OpIdx = Commute ? 2 : 1;
3448 bool isKill =
UseMI.getOperand(OpIdx).isKill();
3450 Register NewReg =
MRI->createVirtualRegister(TRC);
3458 UseMI.getOperand(1).setReg(NewReg);
3459 UseMI.getOperand(1).setIsKill();
3460 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3461 DefMI.eraseFromParent();
3468 case ARM::t2ADDspImm:
3469 case ARM::t2SUBspImm:
3479 switch (
MI.getOpcode()) {
3483 assert(UOps >= 0 &&
"bad # UOps");
3491 unsigned ShOpVal =
MI.getOperand(3).getImm();
3496 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3504 if (!
MI.getOperand(2).getReg())
3507 unsigned ShOpVal =
MI.getOperand(3).getImm();
3512 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3522 case ARM::LDRSB_POST:
3523 case ARM::LDRSH_POST: {
3526 return (Rt == Rm) ? 4 : 3;
3529 case ARM::LDR_PRE_REG:
3530 case ARM::LDRB_PRE_REG: {
3535 unsigned ShOpVal =
MI.getOperand(4).getImm();
3540 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3546 case ARM::STR_PRE_REG:
3547 case ARM::STRB_PRE_REG: {
3548 unsigned ShOpVal =
MI.getOperand(4).getImm();
3553 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3560 case ARM::STRH_PRE: {
3570 case ARM::LDR_POST_REG:
3571 case ARM::LDRB_POST_REG:
3572 case ARM::LDRH_POST: {
3575 return (Rt == Rm) ? 3 : 2;
3578 case ARM::LDR_PRE_IMM:
3579 case ARM::LDRB_PRE_IMM:
3580 case ARM::LDR_POST_IMM:
3581 case ARM::LDRB_POST_IMM:
3582 case ARM::STRB_POST_IMM:
3583 case ARM::STRB_POST_REG:
3584 case ARM::STRB_PRE_IMM:
3585 case ARM::STRH_POST:
3586 case ARM::STR_POST_IMM:
3587 case ARM::STR_POST_REG:
3588 case ARM::STR_PRE_IMM:
3591 case ARM::LDRSB_PRE:
3592 case ARM::LDRSH_PRE: {
3599 unsigned ShOpVal =
MI.getOperand(4).getImm();
3604 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3617 return (Rt == Rn) ? 3 : 2;
3628 case ARM::LDRD_POST:
3629 case ARM::t2LDRD_POST:
3632 case ARM::STRD_POST:
3633 case ARM::t2STRD_POST:
3636 case ARM::LDRD_PRE: {
3643 return (Rt == Rn) ? 4 : 3;
3646 case ARM::t2LDRD_PRE: {
3649 return (Rt == Rn) ? 4 : 3;
3652 case ARM::STRD_PRE: {
3660 case ARM::t2STRD_PRE:
3663 case ARM::t2LDR_POST:
3664 case ARM::t2LDRB_POST:
3665 case ARM::t2LDRB_PRE:
3666 case ARM::t2LDRSBi12:
3667 case ARM::t2LDRSBi8:
3668 case ARM::t2LDRSBpci:
3670 case ARM::t2LDRH_POST:
3671 case ARM::t2LDRH_PRE:
3673 case ARM::t2LDRSB_POST:
3674 case ARM::t2LDRSB_PRE:
3675 case ARM::t2LDRSH_POST:
3676 case ARM::t2LDRSH_PRE:
3677 case ARM::t2LDRSHi12:
3678 case ARM::t2LDRSHi8:
3679 case ARM::t2LDRSHpci:
3683 case ARM::t2LDRDi8: {
3686 return (Rt == Rn) ? 3 : 2;
3689 case ARM::t2STRB_POST:
3690 case ARM::t2STRB_PRE:
3693 case ARM::t2STRH_POST:
3694 case ARM::t2STRH_PRE:
3696 case ARM::t2STR_POST:
3697 case ARM::t2STR_PRE:
3728 E =
MI.memoperands_end();
3730 Size += (*I)->getSize().getValue();
3737 return std::min(
Size / 4, 16U);
3742 unsigned UOps = 1 + NumRegs;
3746 case ARM::VLDMDIA_UPD:
3747 case ARM::VLDMDDB_UPD:
3748 case ARM::VLDMSIA_UPD:
3749 case ARM::VLDMSDB_UPD:
3750 case ARM::VSTMDIA_UPD:
3751 case ARM::VSTMDDB_UPD:
3752 case ARM::VSTMSIA_UPD:
3753 case ARM::VSTMSDB_UPD:
3754 case ARM::LDMIA_UPD:
3755 case ARM::LDMDA_UPD:
3756 case ARM::LDMDB_UPD:
3757 case ARM::LDMIB_UPD:
3758 case ARM::STMIA_UPD:
3759 case ARM::STMDA_UPD:
3760 case ARM::STMDB_UPD:
3761 case ARM::STMIB_UPD:
3762 case ARM::tLDMIA_UPD:
3763 case ARM::tSTMIA_UPD:
3764 case ARM::t2LDMIA_UPD:
3765 case ARM::t2LDMDB_UPD:
3766 case ARM::t2STMIA_UPD:
3767 case ARM::t2STMDB_UPD:
3770 case ARM::LDMIA_RET:
3772 case ARM::t2LDMIA_RET:
3781 if (!ItinData || ItinData->
isEmpty())
3785 unsigned Class =
Desc.getSchedClass();
3787 if (ItinUOps >= 0) {
3794 unsigned Opc =
MI.getOpcode();
3813 case ARM::VLDMDIA_UPD:
3814 case ARM::VLDMDDB_UPD:
3816 case ARM::VLDMSIA_UPD:
3817 case ARM::VLDMSDB_UPD:
3819 case ARM::VSTMDIA_UPD:
3820 case ARM::VSTMDDB_UPD:
3822 case ARM::VSTMSIA_UPD:
3823 case ARM::VSTMSDB_UPD: {
3824 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3825 return (NumRegs / 2) + (NumRegs % 2) + 1;
3828 case ARM::LDMIA_RET:
3833 case ARM::LDMIA_UPD:
3834 case ARM::LDMDA_UPD:
3835 case ARM::LDMDB_UPD:
3836 case ARM::LDMIB_UPD:
3841 case ARM::STMIA_UPD:
3842 case ARM::STMDA_UPD:
3843 case ARM::STMDB_UPD:
3844 case ARM::STMIB_UPD:
3846 case ARM::tLDMIA_UPD:
3847 case ARM::tSTMIA_UPD:
3851 case ARM::t2LDMIA_RET:
3854 case ARM::t2LDMIA_UPD:
3855 case ARM::t2LDMDB_UPD:
3858 case ARM::t2STMIA_UPD:
3859 case ARM::t2STMDB_UPD: {
3860 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3872 unsigned UOps = (NumRegs / 2);
3878 unsigned UOps = (NumRegs / 2);
3881 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3892std::optional<unsigned>
3895 unsigned DefIdx,
unsigned DefAlign)
const {
3904 DefCycle = RegNo / 2 + 1;
3909 bool isSLoad =
false;
3914 case ARM::VLDMSIA_UPD:
3915 case ARM::VLDMSDB_UPD:
3922 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3926 DefCycle = RegNo + 2;
3932std::optional<unsigned>
3935 unsigned DefIdx,
unsigned DefAlign)
const {
3945 DefCycle = RegNo / 2;
3951 DefCycle = (RegNo / 2);
3954 if ((RegNo % 2) || DefAlign < 8)
3960 DefCycle = RegNo + 2;
3966std::optional<unsigned>
3969 unsigned UseIdx,
unsigned UseAlign)
const {
3977 UseCycle = RegNo / 2 + 1;
3982 bool isSStore =
false;
3987 case ARM::VSTMSIA_UPD:
3988 case ARM::VSTMSDB_UPD:
3995 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3999 UseCycle = RegNo + 2;
4005std::optional<unsigned>
4008 unsigned UseIdx,
unsigned UseAlign)
const {
4015 UseCycle = RegNo / 2;
4021 UseCycle = (RegNo / 2);
4024 if ((RegNo % 2) || UseAlign < 8)
4035 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
4036 unsigned UseIdx,
unsigned UseAlign)
const {
4046 std::optional<unsigned> DefCycle;
4047 bool LdmBypass =
false;
4054 case ARM::VLDMDIA_UPD:
4055 case ARM::VLDMDDB_UPD:
4057 case ARM::VLDMSIA_UPD:
4058 case ARM::VLDMSDB_UPD:
4059 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4062 case ARM::LDMIA_RET:
4067 case ARM::LDMIA_UPD:
4068 case ARM::LDMDA_UPD:
4069 case ARM::LDMDB_UPD:
4070 case ARM::LDMIB_UPD:
4072 case ARM::tLDMIA_UPD:
4074 case ARM::t2LDMIA_RET:
4077 case ARM::t2LDMIA_UPD:
4078 case ARM::t2LDMDB_UPD:
4080 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
4088 std::optional<unsigned> UseCycle;
4095 case ARM::VSTMDIA_UPD:
4096 case ARM::VSTMDDB_UPD:
4098 case ARM::VSTMSIA_UPD:
4099 case ARM::VSTMSDB_UPD:
4100 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4107 case ARM::STMIA_UPD:
4108 case ARM::STMDA_UPD:
4109 case ARM::STMDB_UPD:
4110 case ARM::STMIB_UPD:
4111 case ARM::tSTMIA_UPD:
4116 case ARM::t2STMIA_UPD:
4117 case ARM::t2STMDB_UPD:
4118 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
4126 if (UseCycle > *DefCycle + 1)
4127 return std::nullopt;
4129 UseCycle = *DefCycle - *UseCycle + 1;
4130 if (UseCycle > 0u) {
4136 UseCycle = *UseCycle - 1;
4138 UseClass, UseIdx)) {
4139 UseCycle = *UseCycle - 1;
4148 unsigned &DefIdx,
unsigned &Dist) {
4153 assert(
II->isInsideBundle() &&
"Empty bundle?");
4156 while (
II->isInsideBundle()) {
4157 Idx =
II->findRegisterDefOperandIdx(Reg,
TRI,
false,
true);
4164 assert(
Idx != -1 &&
"Cannot find bundled definition!");
4171 unsigned &UseIdx,
unsigned &Dist) {
4175 assert(
II->isInsideBundle() &&
"Empty bundle?");
4180 while (
II != E &&
II->isInsideBundle()) {
4181 Idx =
II->findRegisterUseOperandIdx(Reg,
TRI,
false);
4184 if (
II->getOpcode() != ARM::t2IT)
4212 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4222 case ARM::t2LDRSHs: {
4224 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4225 if (ShAmt == 0 || ShAmt == 2)
4230 }
else if (Subtarget.
isSwift()) {
4237 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4242 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4253 case ARM::t2LDRSHs: {
4255 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4256 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4263 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4270 case ARM::VLD1q8wb_fixed:
4271 case ARM::VLD1q16wb_fixed:
4272 case ARM::VLD1q32wb_fixed:
4273 case ARM::VLD1q64wb_fixed:
4274 case ARM::VLD1q8wb_register:
4275 case ARM::VLD1q16wb_register:
4276 case ARM::VLD1q32wb_register:
4277 case ARM::VLD1q64wb_register:
4284 case ARM::VLD2d8wb_fixed:
4285 case ARM::VLD2d16wb_fixed:
4286 case ARM::VLD2d32wb_fixed:
4287 case ARM::VLD2q8wb_fixed:
4288 case ARM::VLD2q16wb_fixed:
4289 case ARM::VLD2q32wb_fixed:
4290 case ARM::VLD2d8wb_register:
4291 case ARM::VLD2d16wb_register:
4292 case ARM::VLD2d32wb_register:
4293 case ARM::VLD2q8wb_register:
4294 case ARM::VLD2q16wb_register:
4295 case ARM::VLD2q32wb_register:
4300 case ARM::VLD3d8_UPD:
4301 case ARM::VLD3d16_UPD:
4302 case ARM::VLD3d32_UPD:
4303 case ARM::VLD1d64Twb_fixed:
4304 case ARM::VLD1d64Twb_register:
4305 case ARM::VLD3q8_UPD:
4306 case ARM::VLD3q16_UPD:
4307 case ARM::VLD3q32_UPD:
4312 case ARM::VLD4d8_UPD:
4313 case ARM::VLD4d16_UPD:
4314 case ARM::VLD4d32_UPD:
4315 case ARM::VLD1d64Qwb_fixed:
4316 case ARM::VLD1d64Qwb_register:
4317 case ARM::VLD4q8_UPD:
4318 case ARM::VLD4q16_UPD:
4319 case ARM::VLD4q32_UPD:
4320 case ARM::VLD1DUPq8:
4321 case ARM::VLD1DUPq16:
4322 case ARM::VLD1DUPq32:
4323 case ARM::VLD1DUPq8wb_fixed:
4324 case ARM::VLD1DUPq16wb_fixed:
4325 case ARM::VLD1DUPq32wb_fixed:
4326 case ARM::VLD1DUPq8wb_register:
4327 case ARM::VLD1DUPq16wb_register:
4328 case ARM::VLD1DUPq32wb_register:
4329 case ARM::VLD2DUPd8:
4330 case ARM::VLD2DUPd16:
4331 case ARM::VLD2DUPd32:
4332 case ARM::VLD2DUPd8wb_fixed:
4333 case ARM::VLD2DUPd16wb_fixed:
4334 case ARM::VLD2DUPd32wb_fixed:
4335 case ARM::VLD2DUPd8wb_register:
4336 case ARM::VLD2DUPd16wb_register:
4337 case ARM::VLD2DUPd32wb_register:
4338 case ARM::VLD4DUPd8:
4339 case ARM::VLD4DUPd16:
4340 case ARM::VLD4DUPd32:
4341 case ARM::VLD4DUPd8_UPD:
4342 case ARM::VLD4DUPd16_UPD:
4343 case ARM::VLD4DUPd32_UPD:
4345 case ARM::VLD1LNd16:
4346 case ARM::VLD1LNd32:
4347 case ARM::VLD1LNd8_UPD:
4348 case ARM::VLD1LNd16_UPD:
4349 case ARM::VLD1LNd32_UPD:
4351 case ARM::VLD2LNd16:
4352 case ARM::VLD2LNd32:
4353 case ARM::VLD2LNq16:
4354 case ARM::VLD2LNq32:
4355 case ARM::VLD2LNd8_UPD:
4356 case ARM::VLD2LNd16_UPD:
4357 case ARM::VLD2LNd32_UPD:
4358 case ARM::VLD2LNq16_UPD:
4359 case ARM::VLD2LNq32_UPD:
4361 case ARM::VLD4LNd16:
4362 case ARM::VLD4LNd32:
4363 case ARM::VLD4LNq16:
4364 case ARM::VLD4LNq32:
4365 case ARM::VLD4LNd8_UPD:
4366 case ARM::VLD4LNd16_UPD:
4367 case ARM::VLD4LNd32_UPD:
4368 case ARM::VLD4LNq16_UPD:
4369 case ARM::VLD4LNq32_UPD:
4383 if (!ItinData || ItinData->
isEmpty())
4384 return std::nullopt;
4390 unsigned DefAdj = 0;
4391 if (
DefMI.isBundle())
4400 unsigned UseAdj = 0;
4401 if (
UseMI.isBundle()) {
4405 return std::nullopt;
4408 return getOperandLatencyImpl(
4409 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4410 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4413std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4415 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4417 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4418 if (Reg == ARM::CPSR) {
4419 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4421 return Subtarget.
isLikeA9() ? 1 : 20;
4425 if (
UseMI.isBranch())
4445 return std::nullopt;
4447 unsigned DefAlign =
DefMI.hasOneMemOperand()
4450 unsigned UseAlign =
UseMI.hasOneMemOperand()
4456 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4459 return std::nullopt;
4462 int Adj = DefAdj + UseAdj;
4466 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4473std::optional<unsigned>
4475 SDNode *DefNode,
unsigned DefIdx,
4476 SDNode *UseNode,
unsigned UseIdx)
const {
4482 if (isZeroCost(DefMCID.
Opcode))
4485 if (!ItinData || ItinData->
isEmpty())
4486 return DefMCID.
mayLoad() ? 3 : 1;
4489 std::optional<unsigned>
Latency =
4492 int Threshold = 1 + Adj;
4497 auto *DefMN = cast<MachineSDNode>(DefNode);
4498 unsigned DefAlign = !DefMN->memoperands_empty()
4499 ? (*DefMN->memoperands_begin())->
getAlign().value()
4501 auto *UseMN = cast<MachineSDNode>(UseNode);
4502 unsigned UseAlign = !UseMN->memoperands_empty()
4503 ? (*UseMN->memoperands_begin())->
getAlign().value()
4506 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4508 return std::nullopt;
4529 case ARM::t2LDRSHs: {
4532 if (ShAmt == 0 || ShAmt == 2)
4547 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4564 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4571 case ARM::VLD1q8wb_register:
4572 case ARM::VLD1q16wb_register:
4573 case ARM::VLD1q32wb_register:
4574 case ARM::VLD1q64wb_register:
4575 case ARM::VLD1q8wb_fixed:
4576 case ARM::VLD1q16wb_fixed:
4577 case ARM::VLD1q32wb_fixed:
4578 case ARM::VLD1q64wb_fixed:
4582 case ARM::VLD2q8Pseudo:
4583 case ARM::VLD2q16Pseudo:
4584 case ARM::VLD2q32Pseudo:
4585 case ARM::VLD2d8wb_fixed:
4586 case ARM::VLD2d16wb_fixed:
4587 case ARM::VLD2d32wb_fixed:
4588 case ARM::VLD2q8PseudoWB_fixed:
4589 case ARM::VLD2q16PseudoWB_fixed:
4590 case ARM::VLD2q32PseudoWB_fixed:
4591 case ARM::VLD2d8wb_register:
4592 case ARM::VLD2d16wb_register:
4593 case ARM::VLD2d32wb_register:
4594 case ARM::VLD2q8PseudoWB_register:
4595 case ARM::VLD2q16PseudoWB_register:
4596 case ARM::VLD2q32PseudoWB_register:
4597 case ARM::VLD3d8Pseudo:
4598 case ARM::VLD3d16Pseudo:
4599 case ARM::VLD3d32Pseudo:
4600 case ARM::VLD1d8TPseudo:
4601 case ARM::VLD1d16TPseudo:
4602 case ARM::VLD1d32TPseudo:
4603 case ARM::VLD1d64TPseudo:
4604 case ARM::VLD1d64TPseudoWB_fixed:
4605 case ARM::VLD1d64TPseudoWB_register:
4606 case ARM::VLD3d8Pseudo_UPD:
4607 case ARM::VLD3d16Pseudo_UPD:
4608 case ARM::VLD3d32Pseudo_UPD:
4609 case ARM::VLD3q8Pseudo_UPD:
4610 case ARM::VLD3q16Pseudo_UPD:
4611 case ARM::VLD3q32Pseudo_UPD:
4612 case ARM::VLD3q8oddPseudo:
4613 case ARM::VLD3q16oddPseudo:
4614 case ARM::VLD3q32oddPseudo:
4615 case ARM::VLD3q8oddPseudo_UPD:
4616 case ARM::VLD3q16oddPseudo_UPD:
4617 case ARM::VLD3q32oddPseudo_UPD:
4618 case ARM::VLD4d8Pseudo:
4619 case ARM::VLD4d16Pseudo:
4620 case ARM::VLD4d32Pseudo:
4621 case ARM::VLD1d8QPseudo:
4622 case ARM::VLD1d16QPseudo:
4623 case ARM::VLD1d32QPseudo:
4624 case ARM::VLD1d64QPseudo:
4625 case ARM::VLD1d64QPseudoWB_fixed:
4626 case ARM::VLD1d64QPseudoWB_register:
4627 case ARM::VLD1q8HighQPseudo:
4628 case ARM::VLD1q8LowQPseudo_UPD:
4629 case ARM::VLD1q8HighTPseudo:
4630 case ARM::VLD1q8LowTPseudo_UPD:
4631 case ARM::VLD1q16HighQPseudo:
4632 case ARM::VLD1q16LowQPseudo_UPD:
4633 case ARM::VLD1q16HighTPseudo:
4634 case ARM::VLD1q16LowTPseudo_UPD:
4635 case ARM::VLD1q32HighQPseudo:
4636 case ARM::VLD1q32LowQPseudo_UPD:
4637 case ARM::VLD1q32HighTPseudo:
4638 case ARM::VLD1q32LowTPseudo_UPD:
4639 case ARM::VLD1q64HighQPseudo:
4640 case ARM::VLD1q64LowQPseudo_UPD:
4641 case ARM::VLD1q64HighTPseudo:
4642 case ARM::VLD1q64LowTPseudo_UPD:
4643 case ARM::VLD4d8Pseudo_UPD:
4644 case ARM::VLD4d16Pseudo_UPD:
4645 case ARM::VLD4d32Pseudo_UPD:
4646 case ARM::VLD4q8Pseudo_UPD:
4647 case ARM::VLD4q16Pseudo_UPD:
4648 case ARM::VLD4q32Pseudo_UPD:
4649 case ARM::VLD4q8oddPseudo:
4650 case ARM::VLD4q16oddPseudo:
4651 case ARM::VLD4q32oddPseudo:
4652 case ARM::VLD4q8oddPseudo_UPD:
4653 case ARM::VLD4q16oddPseudo_UPD:
4654 case ARM::VLD4q32oddPseudo_UPD:
4655 case ARM::VLD1DUPq8:
4656 case ARM::VLD1DUPq16:
4657 case ARM::VLD1DUPq32:
4658 case ARM::VLD1DUPq8wb_fixed:
4659 case ARM::VLD1DUPq16wb_fixed:
4660 case ARM::VLD1DUPq32wb_fixed:
4661 case ARM::VLD1DUPq8wb_register:
4662 case ARM::VLD1DUPq16wb_register:
4663 case ARM::VLD1DUPq32wb_register:
4664 case ARM::VLD2DUPd8:
4665 case ARM::VLD2DUPd16:
4666 case ARM::VLD2DUPd32:
4667 case ARM::VLD2DUPd8wb_fixed:
4668 case ARM::VLD2DUPd16wb_fixed:
4669 case ARM::VLD2DUPd32wb_fixed:
4670 case ARM::VLD2DUPd8wb_register:
4671 case ARM::VLD2DUPd16wb_register:
4672 case ARM::VLD2DUPd32wb_register:
4673 case ARM::VLD2DUPq8EvenPseudo:
4674 case ARM::VLD2DUPq8OddPseudo:
4675 case ARM::VLD2DUPq16EvenPseudo:
4676 case ARM::VLD2DUPq16OddPseudo:
4677 case ARM::VLD2DUPq32EvenPseudo:
4678 case ARM::VLD2DUPq32OddPseudo:
4679 case ARM::VLD3DUPq8EvenPseudo:
4680 case ARM::VLD3DUPq8OddPseudo:
4681 case ARM::VLD3DUPq16EvenPseudo:
4682 case ARM::VLD3DUPq16OddPseudo:
4683 case ARM::VLD3DUPq32EvenPseudo:
4684 case ARM::VLD3DUPq32OddPseudo:
4685 case ARM::VLD4DUPd8Pseudo:
4686 case ARM::VLD4DUPd16Pseudo:
4687 case ARM::VLD4DUPd32Pseudo:
4688 case ARM::VLD4DUPd8Pseudo_UPD:
4689 case ARM::VLD4DUPd16Pseudo_UPD:
4690 case ARM::VLD4DUPd32Pseudo_UPD:
4691 case ARM::VLD4DUPq8EvenPseudo:
4692 case ARM::VLD4DUPq8OddPseudo:
4693 case ARM::VLD4DUPq16EvenPseudo:
4694 case ARM::VLD4DUPq16OddPseudo:
4695 case ARM::VLD4DUPq32EvenPseudo:
4696 case ARM::VLD4DUPq32OddPseudo:
4697 case ARM::VLD1LNq8Pseudo:
4698 case ARM::VLD1LNq16Pseudo:
4699 case ARM::VLD1LNq32Pseudo:
4700 case ARM::VLD1LNq8Pseudo_UPD:
4701 case ARM::VLD1LNq16Pseudo_UPD:
4702 case ARM::VLD1LNq32Pseudo_UPD:
4703 case ARM::VLD2LNd8Pseudo:
4704 case ARM::VLD2LNd16Pseudo:
4705 case ARM::VLD2LNd32Pseudo:
4706 case ARM::VLD2LNq16Pseudo:
4707 case ARM::VLD2LNq32Pseudo:
4708 case ARM::VLD2LNd8Pseudo_UPD:
4709 case ARM::VLD2LNd16Pseudo_UPD:
4710 case ARM::VLD2LNd32Pseudo_UPD:
4711 case ARM::VLD2LNq16Pseudo_UPD:
4712 case ARM::VLD2LNq32Pseudo_UPD:
4713 case ARM::VLD4LNd8Pseudo:
4714 case ARM::VLD4LNd16Pseudo:
4715 case ARM::VLD4LNd32Pseudo:
4716 case ARM::VLD4LNq16Pseudo:
4717 case ARM::VLD4LNq32Pseudo:
4718 case ARM::VLD4LNd8Pseudo_UPD:
4719 case ARM::VLD4LNd16Pseudo_UPD:
4720 case ARM::VLD4LNd32Pseudo_UPD:
4721 case ARM::VLD4LNq16Pseudo_UPD:
4722 case ARM::VLD4LNq32Pseudo_UPD:
4732unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4733 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4743 !Subtarget.cheapPredicableCPSRDef())) {
4753 unsigned *PredCost)
const {
4754 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4760 if (
MI.isBundle()) {
4764 while (++
I != E &&
I->isInsideBundle()) {
4765 if (
I->getOpcode() != ARM::t2IT)
4766 Latency += getInstrLatency(ItinData, *
I, PredCost);
4773 !Subtarget.cheapPredicableCPSRDef()))) {
4781 return MI.mayLoad() ? 3 : 1;
4794 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->
getAlign().value() : 0;
4796 if (Adj >= 0 || (
int)
Latency > -Adj) {
4804 if (!
Node->isMachineOpcode())
4807 if (!ItinData || ItinData->
isEmpty())
4810 unsigned Opcode =
Node->getMachineOpcode();
4820bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4825 unsigned UseIdx)
const {
4828 if (Subtarget.nonpipelinedVFP() &&
4843 unsigned DefIdx)
const {
4845 if (!ItinData || ItinData->
isEmpty())
4850 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4851 std::optional<unsigned> DefCycle =
4853 return DefCycle && DefCycle <= 2U;
4861 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4864 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4866 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4867 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4868 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4872 if (
MI.getOpcode() == ARM::tPUSH ||
4873 MI.getOpcode() == ARM::tPOP ||
4874 MI.getOpcode() == ARM::tPOP_RET) {
4876 if (MO.isImplicit() || !MO.isReg())
4879 if (Reg < ARM::R0 || Reg > ARM::R7) {
4880 if (!(
MI.getOpcode() == ARM::tPUSH && Reg == ARM::LR) &&
4881 !(
MI.getOpcode() == ARM::tPOP_RET && Reg == ARM::PC)) {
4882 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4888 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4889 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4890 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4891 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4892 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4913 for (
auto Op :
MI.operands()) {
4920 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4930 unsigned LoadImmOpc,
4931 unsigned LoadOpc)
const {
4933 "ROPI/RWPI not currently supported with stack guard");
4941 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4943 "TLS stack protector requires hardware TLS register");
4954 Offset = M.getStackProtectorGuardOffset();
4959 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4969 cast<GlobalValue>((*
MI->memoperands_begin())->getValue());
4978 else if (IsIndirect)
4980 }
else if (IsIndirect) {
4984 if (LoadImmOpc == ARM::tMOVi32imm) {
4987 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
5023 unsigned &AddSubOpc,
5024 bool &NegAcc,
bool &HasLane)
const {
5026 if (
I == MLxEntryMap.
end())
5030 MulOpc = Entry.MulOpc;
5031 AddSubOpc = Entry.AddSubOpc;
5032 NegAcc = Entry.NegAcc;
5033 HasLane = Entry.HasLane;
5057std::pair<uint16_t, uint16_t>
5061 if (Subtarget.hasNEON()) {
5070 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
5071 MI.getOpcode() == ARM::VMOVS))
5078 return std::make_pair(
ExeNEON, 0);
5083 return std::make_pair(
ExeNEON, 0);
5086 return std::make_pair(
ExeVFP, 0);
5092 unsigned SReg,
unsigned &Lane) {
5094 TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
5101 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
5103 assert(DReg &&
"S-register with no D super-register?");
5128 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
5134 ImplicitSReg =
TRI->getSubReg(DReg,
5135 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
5137 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
5152 unsigned DstReg, SrcReg;
5157 switch (
MI.getOpcode()) {
5169 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
5172 DstReg =
MI.getOperand(0).getReg();
5173 SrcReg =
MI.getOperand(1).getReg();
5175 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5176 MI.removeOperand(i - 1);
5179 MI.setDesc(
get(ARM::VORRd));
5191 DstReg =
MI.getOperand(0).getReg();
5192 SrcReg =
MI.getOperand(1).getReg();
5194 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5195 MI.removeOperand(i - 1);
5202 MI.setDesc(
get(ARM::VGETLNi32));
5218 DstReg =
MI.getOperand(0).getReg();
5219 SrcReg =
MI.getOperand(1).getReg();
5227 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5228 MI.removeOperand(i - 1);
5232 MI.setDesc(
get(ARM::VSETLNi32));
5251 DstReg =
MI.getOperand(0).getReg();
5252 SrcReg =
MI.getOperand(1).getReg();
5254 unsigned DstLane = 0, SrcLane = 0;
5263 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5264 MI.removeOperand(i - 1);
5269 MI.setDesc(
get(ARM::VDUPLN32d));
5303 MCRegister CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5304 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5307 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5308 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5313 if (SrcLane == DstLane)
5316 MI.setDesc(
get(ARM::VEXTd32));
5321 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5322 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5325 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5326 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5331 if (SrcLane != DstLane)
5337 if (ImplicitSReg != 0)
5364 if (!PartialUpdateClearance)
5375 switch (
MI.getOpcode()) {
5381 case ARM::VMOVv4i16:
5382 case ARM::VMOVv2i32:
5383 case ARM::VMOVv2f32:
5384 case ARM::VMOVv1i64:
5385 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5389 case ARM::VLD1LNd32:
5398 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5402 if (Reg.isVirtual()) {
5404 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5406 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5409 TRI->getMatchingSuperReg(Reg, ARM::ssub_0, &ARM::DPRRegClass);
5410 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5416 return PartialUpdateClearance;
5423 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5428 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5429 unsigned DReg = Reg;
5432 if (ARM::SPRRegClass.
contains(Reg)) {
5433 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5434 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5437 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5438 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5451 MI.addRegisterKilled(DReg,
TRI,
true);
5455 return Subtarget.hasFeature(ARM::HasV6KOps);
5459 if (
MI->getNumOperands() < 4)
5461 unsigned ShOpVal =
MI->getOperand(3).getImm();
5465 ((ShImm == 1 || ShImm == 2) &&
5475 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5476 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5478 switch (
MI.getOpcode()) {
5490 MOReg = &
MI.getOperand(2);
5502 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5503 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5505 switch (
MI.getOpcode()) {
5516 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5525 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5526 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5528 switch (
MI.getOpcode()) {
5529 case ARM::VSETLNi32:
5530 case ARM::MVE_VMOV_to_lane_32:
5541 InsertedReg.
Reg = MOInsertedReg.
getReg();
5549std::pair<unsigned, unsigned>
5552 return std::make_pair(TF & Mask, TF & ~Mask);
5557 using namespace ARMII;
5559 static const std::pair<unsigned, const char *> TargetFlags[] = {
5560 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5561 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5562 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5569 using namespace ARMII;
5571 static const std::pair<unsigned, const char *> TargetFlags[] = {
5572 {MO_COFFSTUB,
"arm-coffstub"},
5573 {MO_GOT,
"arm-got"},
5574 {MO_SBREL,
"arm-sbrel"},
5575 {MO_DLLIMPORT,
"arm-dllimport"},
5576 {MO_SECREL,
"arm-secrel"},
5577 {MO_NONLAZY,
"arm-nonlazy"}};
5581std::optional<RegImmPair>
5584 unsigned Opcode =
MI.getOpcode();
5591 return std::nullopt;
5594 if (Opcode == ARM::SUBri)
5596 else if (Opcode != ARM::ADDri)
5597 return std::nullopt;
5602 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5603 return std::nullopt;
5605 Offset =
MI.getOperand(2).getImm() * Sign;
5613 for (
auto I =
From;
I != To; ++
I)
5614 if (
I->modifiesRegister(Reg,
TRI))
5627 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5629 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5635 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5637 Register Reg = CmpMI->getOperand(0).getReg();
5640 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5653 if (Subtarget->isThumb()) {
5655 return ForCodesize ? 2 : 1;
5656 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5659 return ForCodesize ? 4 : 1;
5661 return ForCodesize ? 4 : 2;
5663 return ForCodesize ? 4 : 2;
5665 return ForCodesize ? 4 : 2;
5668 return ForCodesize ? 4 : 1;
5670 return ForCodesize ? 4 : 1;
5671 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5672 return ForCodesize ? 4 : 1;
5674 return ForCodesize ? 8 : 2;
5676 return ForCodesize ? 8 : 2;
5679 return ForCodesize ? 8 : 2;
5680 return ForCodesize ? 8 : 3;
5829 : CallTailCall(target.
isThumb() ? 4 : 4),
5830 FrameTailCall(target.
isThumb() ? 0 : 0),
5831 CallThunk(target.
isThumb() ? 4 : 4),
5832 FrameThunk(target.
isThumb() ? 0 : 0),
5833 CallNoLRSave(target.
isThumb() ? 4 : 4),
5834 FrameNoLRSave(target.
isThumb() ? 2 : 4),
5835 CallRegSave(target.
isThumb() ? 8 : 12),
5836 FrameRegSave(target.
isThumb() ? 2 : 4),
5837 CallDefault(target.
isThumb() ? 8 : 12),
5838 FrameDefault(target.
isThumb() ? 2 : 4),
5839 SaveRestoreLROnStack(target.
isThumb() ? 8 : 8) {}
5852 for (
Register Reg : ARM::rGPRRegClass) {
5853 if (!(Reg < regsReserved.
size() && regsReserved.
test(Reg)) &&
5856 C.isAvailableAcrossAndOutOfSeq(Reg,
TRI) &&
5857 C.isAvailableInsideSeq(Reg,
TRI))
5871 for (;
I != E; ++
I) {
5875 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5879 unsigned Opcode =
MI.getOpcode();
5880 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5881 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5882 Opcode == ARM::tBXNS_RET) {
5888 if (
MI.readsRegister(ARM::LR, &
TRI))
5894std::optional<std::unique_ptr<outliner::OutlinedFunction>>
5897 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
5898 unsigned MinRepeats)
const {
5899 unsigned SequenceSize = 0;
5900 for (
auto &
MI : RepeatedSequenceLocs[0])
5904 unsigned FlagsSetInAll = 0xF;
5909 FlagsSetInAll &=
C.Flags;
5928 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5936 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5939 if (RepeatedSequenceLocs.size() < MinRepeats)
5940 return std::nullopt;
5959 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5960 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5961 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5963 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5965 if (RepeatedSequenceLocs.size() < MinRepeats)
5966 return std::nullopt;
5976 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5977 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5978 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5980 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5982 if (RepeatedSequenceLocs.size() < MinRepeats)
5983 return std::nullopt;
5988 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5991 auto SetCandidateCallInfo =
5992 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5994 C.setCallInfo(CallID, NumBytesForCall);
5999 const auto &SomeMFI =
6002 if (SomeMFI.branchTargetEnforcement()) {
6011 if (SomeMFI.shouldSignReturnAddress(
true)) {
6021 if (RepeatedSequenceLocs[0].back().isTerminator()) {
6025 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
6026 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
6027 LastInstrOpcode == ARM::tBLXr ||
6028 LastInstrOpcode == ARM::tBLXr_noip ||
6029 LastInstrOpcode == ARM::tBLXi) {
6037 unsigned NumBytesNoStackCalls = 0;
6038 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
6043 const auto Last =
C.getMBB()->rbegin();
6044 const bool LRIsAvailable =
6045 C.getMBB()->isReturnBlock() && !
Last->isCall()
6048 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
6049 if (LRIsAvailable) {
6053 CandidatesWithoutStackFixups.push_back(
C);
6058 else if (findRegisterToSaveLRTo(
C)) {
6062 CandidatesWithoutStackFixups.push_back(
C);
6067 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
6070 CandidatesWithoutStackFixups.push_back(
C);
6076 NumBytesNoStackCalls += SequenceSize;
6082 if (NumBytesNoStackCalls <=
6083 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
6084 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
6086 if (RepeatedSequenceLocs.size() < MinRepeats)
6087 return std::nullopt;
6094 if (FlagsSetInAll & MachineOutlinerMBBFlags::HasCalls) {
6112 return std::make_unique<outliner::OutlinedFunction>(
6113 RepeatedSequenceLocs, SequenceSize, NumBytesToCreateFrame, FrameID);
6116bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
6119 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
6144 unsigned NumOps =
MI->getDesc().getNumOperands();
6145 unsigned ImmIdx = NumOps - 3;
6149 int64_t OffVal =
Offset.getImm();
6155 unsigned NumBits = 0;
6184 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6204 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6205 "Can't encode this offset!");
6206 OffVal +=
Fixup / Scale;
6208 unsigned Mask = (1 << NumBits) - 1;
6210 if (OffVal <= Mask) {
6212 MI->getOperand(ImmIdx).setImm(OffVal);
6220 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6224 const Function &CFn =
C.getMF()->getFunction();
6231 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6239 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6258 unsigned &Flags)
const {
6262 "Suitable Machine Function for outlining must track liveness");
6270 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6271 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6275 if (R12AvailableInBlock && CPSRAvailableInBlock)
6276 Flags |= MachineOutlinerMBBFlags::UnsafeRegsDead;
6283 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6285 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6291 Flags |= MachineOutlinerMBBFlags::HasCalls;
6295 bool LRIsAvailable =
6300 Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
6308 unsigned Flags)
const {
6314 unsigned Opc =
MI.getOpcode();
6315 if (Opc == ARM::tPICADD || Opc == ARM::PICADD || Opc == ARM::PICSTR ||
6316 Opc == ARM::PICSTRB || Opc == ARM::PICSTRH || Opc == ARM::PICLDR ||
6317 Opc == ARM::PICLDRB || Opc == ARM::PICLDRH || Opc == ARM::PICLDRSB ||
6318 Opc == ARM::PICLDRSH || Opc == ARM::t2LDRpci_pic ||
6319 Opc == ARM::t2MOVi16_ga_pcrel || Opc == ARM::t2MOVTi16_ga_pcrel ||
6320 Opc == ARM::t2MOV_ga_pcrel)
6324 if (Opc == ARM::t2BF_LabelPseudo || Opc == ARM::t2DoLoopStart ||
6325 Opc == ARM::t2DoLoopStartTP || Opc == ARM::t2WhileLoopStart ||
6326 Opc == ARM::t2WhileLoopStartLR || Opc == ARM::t2WhileLoopStartTP ||
6327 Opc == ARM::t2LoopDec || Opc == ARM::t2LoopEnd ||
6328 Opc == ARM::t2LoopEndDec)
6337 if (
MI.isTerminator())
6343 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6351 if (MOP.isGlobal()) {
6352 Callee = dyn_cast<Function>(MOP.getGlobal());
6360 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6361 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6369 if (Opc == ARM::BL || Opc == ARM::tBL || Opc == ARM::BLX ||
6370 Opc == ARM::BLX_noip || Opc == ARM::tBLXr || Opc == ARM::tBLXr_noip ||
6375 return UnknownCallOutlineType;
6383 return UnknownCallOutlineType;
6391 return UnknownCallOutlineType;
6399 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6403 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6416 bool MightNeedStackFixUp =
6417 (Flags & (MachineOutlinerMBBFlags::LRUnavailableSomewhere |
6418 MachineOutlinerMBBFlags::HasCalls));
6420 if (!MightNeedStackFixUp)
6426 if (
MI.modifiesRegister(ARM::SP,
TRI))
6440 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6441 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6445 if (
MI.isCFIInstruction())
6476 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6491 int64_t StackPosEntry =
6501 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6509 unsigned DwarfRAC =
MRI->getDwarfRegNum(ARM::RA_AUTH_CODE,
true);
6523 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6524 unsigned DwarfReg =
MRI->getDwarfRegNum(Reg,
true);
6535 bool CFI,
bool Auth)
const {
6551 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6555 if (!Subtarget.isThumb())
6566 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6567 int64_t StackPosEntry =
6574 int64_t LRPosEntry =
6581 unsigned DwarfRAC =
MRI->getDwarfRegNum(ARM::RA_AUTH_CODE,
true);
6594void ARMBaseInstrInfo::emitCFIForLRRestoreFromReg(
6598 unsigned DwarfLR =
MRI->getDwarfRegNum(ARM::LR,
true);
6600 int64_t LRPosEntry =
6614 bool isThumb = Subtarget.isThumb();
6615 unsigned FuncOp =
isThumb ? 2 : 0;
6616 unsigned Opc = Call->getOperand(FuncOp).isReg()
6617 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6622 .
add(Call->getOperand(FuncOp));
6623 if (
isThumb && !Call->getOperand(FuncOp).isReg())
6625 Call->eraseFromParent();
6630 return MI.isCall() && !
MI.isReturn();
6638 Et = std::prev(
MBB.
end());
6648 saveLROnStack(
MBB, It,
true, Auth);
6653 "Can only fix up stack references once");
6654 fixupPostOutline(
MBB);
6657 restoreLRFromStack(
MBB, Et,
true, Auth);
6677 fixupPostOutline(
MBB);
6686 bool isThumb = Subtarget.isThumb();
6692 ? Subtarget.
isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6703 Opc =
isThumb ? ARM::tBL : ARM::BL;
6719 Register Reg = findRegisterToSaveLRTo(
C);
6720 assert(Reg != 0 &&
"No callee-saved register available?");
6725 emitCFIForLRSaveToReg(
MBB, It, Reg);
6729 emitCFIForLRRestoreFromReg(
MBB, It);
6749bool ARMBaseInstrInfo::isReallyTriviallyReMaterializable(
6783 static int constexpr MAX_STAGES = 30;
6784 static int constexpr LAST_IS_USE = MAX_STAGES;
6785 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6786 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6787 typedef std::map<unsigned, IterNeed> IterNeeds;
6790 const IterNeeds &CIN);
6802 : EndLoop(EndLoop), LoopCount(LoopCount),
6804 TII(MF->getSubtarget().getInstrInfo()) {}
6806 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6808 return MI == EndLoop ||
MI == LoopCount;
6812 if (tooMuchRegisterPressure(SSD, SMS))
6818 std::optional<bool> createTripCountGreaterCondition(
6829 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6834 if (
I.getOpcode() == ARM::t2LoopDec)
6836 assert(LoopDec &&
"Unable to find copied LoopDec");
6842 .
addReg(ARM::NoRegister);
6852 void adjustTripCount(
int TripCountAdjust)
override {}
6856 const IterNeeds &CIN) {
6858 for (
const auto &
N : CIN) {
6859 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6860 for (
int I = 0;
I < Cnt; ++
I)
6865 for (
const auto &
N : CIN) {
6866 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6867 for (
int I = 0;
I < Cnt; ++
I)
6875 IterNeeds CrossIterationNeeds;
6880 for (
auto &SU : SSD.
SUnits) {
6883 for (
auto &S : SU.Succs)
6886 if (
Reg.isVirtual())
6887 CrossIterationNeeds[
Reg.id()].set(0);
6888 }
else if (S.isAssignedRegDep()) {
6890 if (OStg >= 0 && OStg != Stg) {
6892 if (
Reg.isVirtual())
6893 CrossIterationNeeds[
Reg.id()] |= ((1 << (OStg - Stg)) - 1);
6902 std::vector<SUnit *> ProposedSchedule;
6906 std::deque<SUnit *> Instrs =
6908 std::sort(Instrs.begin(), Instrs.end(),
6909 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6910 for (
SUnit *SU : Instrs)
6911 ProposedSchedule.push_back(SU);
6917 for (
auto *SU : ProposedSchedule)
6921 if (!MO.isReg() || !MO.getReg())
6924 auto CIter = CrossIterationNeeds.find(
Reg.id());
6925 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6926 CIter->second[SEEN_AS_LIVE])
6928 if (MO.isDef() && !MO.isDead())
6929 CIter->second.set(SEEN_AS_LIVE);
6930 else if (MO.isUse())
6931 CIter->second.set(LAST_IS_USE);
6933 for (
auto &CI : CrossIterationNeeds)
6934 CI.second.reset(LAST_IS_USE);
6940 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6943 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6945 for (
auto *SU : ProposedSchedule) {
6947 RPTracker.setPos(std::next(CurInstI));
6953 if (!MO.isReg() || !MO.getReg())
6956 if (MO.isDef() && !MO.isDead()) {
6957 auto CIter = CrossIterationNeeds.find(
Reg.id());
6958 if (CIter != CrossIterationNeeds.end()) {
6959 CIter->second.reset(0);
6960 CIter->second.reset(SEEN_AS_LIVE);
6964 for (
auto &S : SU->Preds) {
6966 if (S.isAssignedRegDep()) {
6968 auto CIter = CrossIterationNeeds.find(
Reg.id());
6969 if (CIter != CrossIterationNeeds.end()) {
6971 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6972 if (Stg - Stg2 < MAX_STAGES)
6973 CIter->second.set(Stg - Stg2);
6974 CIter->second.set(SEEN_AS_LIVE);
6979 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6982 auto &
P = RPTracker.getPressure().MaxSetPressure;
6983 for (
unsigned I = 0, E =
P.size();
I < E; ++
I) {
6985 if (
I == ARM::DQuad_with_ssub_0 ||
I == ARM::DTripleSpc_with_ssub_0 ||
6986 I == ARM::DTriple_with_qsub_0_in_QPR)
6998std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
7002 if (Preheader == LoopBB)
7003 Preheader = *std::next(LoopBB->
pred_begin());
7005 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
7011 for (
auto &L : LoopBB->
instrs()) {
7018 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
7032 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
7033 for (
auto &L : LoopBB->
instrs())
7038 Register LoopDecResult =
I->getOperand(0).getReg();
7041 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
7044 for (
auto &J : Preheader->
instrs())
7045 if (J.getOpcode() == ARM::t2DoLoopStart)
7049 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
static bool isLoad(int Opcode)
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, MCRegister DReg, unsigned Lane, MCRegister &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static cl::opt< bool > EnableARM3Addr("enable-arm-3-addr-conv", cl::Hidden, cl::desc("Enable ARM 2-addr to 3-addr conv"))
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static MCRegister getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
BlockVerifier::State From
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
Looks at all the uses of the given value Returns the Liveness deduced from the uses of this value Adds all uses that cause the result to be MaybeLive to MaybeLiveRetUses If the result is Live
This file defines the DenseMap class.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, unsigned State, const TargetRegisterInfo *TRI) const
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
virtual unsigned getUnindexedOpcode(unsigned Opc) const =0
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
virtual const ARMBaseRegisterInfo & getRegisterInfo() const =0
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, const TargetRegisterInfo &TRI) const override
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
ARMBaseInstrInfo(const ARMSubtarget &STI)
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool expandPostRAPseudo(MachineInstr &MI) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool analyzeSelect(const MachineInstr &MI, SmallVectorImpl< MachineOperand > &Cond, unsigned &TrueOp, unsigned &FalseOp, bool &Optimizable) const override
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
ARMConstantPoolConstant - ARM-specific constant pool values for Constants, Functions,...
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
ARMConstantPoolMBB - ARM-specific constantpool value of a machine basic block.
ARMConstantPoolSymbol - ARM-specific constantpool values for external symbols.
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
bool isTargetMachO() const
ARMLdStMultipleTiming getLdStMultipleTiming() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
bool isReadTPSoft() const
bool isGVIndirectSymbol(const GlobalValue *GV) const
True if the GV will be accessed via an indirect symbol.
unsigned getMispredictionPenalty() const
const ARMBaseRegisterInfo * getRegisterInfo() const override
unsigned getReturnOpcode() const
Returns the correct return opcode for the current feature set.
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
bool isTargetCOFF() const
unsigned getPartialUpdateClearance() const
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
int getPreISelOperandLatencyAdjustment() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
This class represents an Operation in the Expression.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
A possibly irreducible generalization of a Loop.
bool hasDLLImportStorageClass() const
Module * getParent()
Get the module that this global value is contained inside of...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
void addVirtualRegisterDead(Register IncomingReg, MachineInstr &MI, bool AddIfNotFound=false)
addVirtualRegisterDead - Add information about the fact that the specified register is dead after bei...
void addVirtualRegisterKilled(Register IncomingReg, MachineInstr &MI, bool AddIfNotFound=false)
addVirtualRegisterKilled - Add information about the fact that the specified register is killed after...
VarInfo & getVarInfo(Register Reg)
getVarInfo - Return the VarInfo structure for the specified VIRTUAL register.
This class is intended to be used as a base class for asm properties and features specific to the tar...
static MCCFIInstruction createUndefined(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_undefined From now on the previous value of Register can't be restored anymore.
static MCCFIInstruction createRestore(MCSymbol *L, unsigned Register, SMLoc Loc={})
.cfi_restore says that the rule for Register is now the same as it was at the beginning of the functi...
static MCCFIInstruction createRegister(MCSymbol *L, unsigned Register1, unsigned Register2, SMLoc Loc={})
.cfi_register Previous value of Register1 is saved in register Register2.
static MCCFIInstruction createOffset(MCSymbol *L, unsigned Register, int64_t Offset, SMLoc Loc={})
.cfi_offset Previous value of Register is saved at offset Offset from CFA.
static MCCFIInstruction cfiDefCfaOffset(MCSymbol *L, int64_t Offset, SMLoc Loc={})
.cfi_def_cfa_offset modifies a rule for computing CFA.
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isCall() const
Return true if the instruction is a call.
unsigned getSize() const
Return the number of bytes in the encoding of this instruction, or zero if the encoding size cannot b...
unsigned getOpcode() const
Return the opcode number for this descriptor.
bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
bool isReturnBlock() const
Convenience function that returns true if the block ends in a return instruction.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
instr_iterator instr_end()
Instructions::const_iterator const_instr_iterator
void addLiveIn(MCRegister PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
iterator_range< succ_iterator > successors()
reverse_iterator rbegin()
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
This class is a data container for one entry in a MachineConstantPool.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
union llvm::MachineConstantPoolEntry::@204 Val
The constant itself.
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
unsigned addFrameInst(const MCCFIInstruction &Inst)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addCFIIndex(unsigned CFIIndex) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
void setIsDead(bool Val=true)
void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool tracksLiveness() const
tracksLiveness - Returns true when tracking register liveness accurately.
const TargetRegisterInfo * getTargetRegisterInfo() const
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
void increaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
void decreaseRegPressure(Register RegUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
void runOnMachineFunction(const MachineFunction &MF)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
static CondCodes getOppositeCondition(CondCodes CC)
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getSORegOpc(ShiftOpc ShOp, unsigned Imm)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
@ C
The default llvm calling convention, compatible with C.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
Reg
All possible values of the reg field in the ModR/M byte.
InstrType
Represents how an instruction should be mapped by the outliner.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
unsigned getBLXpredOpcode(const MachineFunction &MF)
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
static bool isIndirectBranchOpcode(int Opc)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
MaybeAlign getAlign(const Function &F, unsigned Index)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
static bool isCalleeSavedRegister(MCRegister Reg, const MCPhysReg *CSRegs)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool isPopOpcode(int Opc)
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
unsigned getUndefRegState(bool B)
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
unsigned getKillRegState(bool B)
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t value() const
This is a hole in the type system and should not be abused.
Description of the encoding of one expression Op.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
VarInfo - This represents the regions where a virtual register is live in the program.
static MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
A pair composed of a pair of a register and a sub-register index, and another sub-register index.
A pair composed of a register and a sub-register index.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.
unsigned FrameConstructionID
Target-defined identifier for constructing a frame for this function.
std::vector< Candidate > Candidates