73#define DEBUG_TYPE "arm-instrinfo"
75#define GET_INSTRINFO_CTOR_DTOR
76#include "ARMGenInstrInfo.inc"
90 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
91 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
92 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
93 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
94 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
95 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
96 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
97 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
100 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
101 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
102 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
103 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
104 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
105 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
106 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
107 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
114 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
115 if (!MLxEntryMap.insert(std::make_pair(
ARM_MLxTable[i].MLxOpc, i)).second)
127 if (usePreRAHazardRecognizer()) {
129 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
149 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
165 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base())
186 bool AllowModify)
const {
191 if (
I ==
MBB.instr_begin())
201 bool CantAnalyze =
false;
205 while (
I->isDebugInstr() || !
I->isTerminator() ||
207 I->getOpcode() == ARM::t2DoLoopStartTP){
208 if (
I ==
MBB.instr_begin())
219 TBB =
I->getOperand(0).getMBB();
225 assert(!FBB &&
"FBB should have been null.");
227 TBB =
I->getOperand(0).getMBB();
228 Cond.push_back(
I->getOperand(1));
229 Cond.push_back(
I->getOperand(2));
230 }
else if (
I->isReturn()) {
233 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
240 TBB =
I->getOperand(1).getMBB();
242 Cond.push_back(
I->getOperand(0));
264 while (DI !=
MBB.instr_end()) {
287 if (
I ==
MBB.instr_begin())
299 int *BytesRemoved)
const {
300 assert(!BytesRemoved &&
"code size not handled");
311 I->eraseFromParent();
315 if (
I ==
MBB.begin())
return 1;
321 I->eraseFromParent();
330 int *BytesAdded)
const {
331 assert(!BytesAdded &&
"code size not handled");
340 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
342 "ARM branch conditions have two or three components!");
352 }
else if (
Cond.size() == 2) {
363 if (
Cond.size() == 2)
368 else if (
Cond.size() == 3)
379 if (
Cond.size() == 2) {
391 while (++
I != E &&
I->isInsideBundle()) {
392 int PIdx =
I->findFirstPredOperandIdx();
393 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
399 int PIdx =
MI.findFirstPredOperandIdx();
400 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
408 std::string GenericComment =
410 if (!GenericComment.empty())
411 return GenericComment;
415 return std::string();
419 int FirstPredOp =
MI.findFirstPredOperandIdx();
420 if (FirstPredOp != (
int)
OpIdx)
421 return std::string();
423 std::string CC =
"CC::";
430 unsigned Opc =
MI.getOpcode();
439 int PIdx =
MI.findFirstPredOperandIdx();
443 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
450 "CPSR def isn't expected operand");
451 assert((
MI.getOperand(1).isDead() ||
452 MI.getOperand(1).getReg() != ARM::CPSR) &&
453 "if conversion tried to stop defining used CPSR");
454 MI.getOperand(1).setReg(ARM::NoRegister);
464 if (Pred1.
size() > 2 || Pred2.
size() > 2)
489 std::vector<MachineOperand> &Pred,
490 bool SkipDead)
const {
493 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
494 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
495 if (ClobbersCPSR || IsCPSR) {
513 for (
const auto &MO :
MI.operands())
514 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
520 switch (
MI->getOpcode()) {
521 default:
return true;
552 if (!
MI.isPredicable())
590 if (!MO.isReg() || MO.isUndef() || MO.isUse())
592 if (MO.getReg() != ARM::CPSR)
612 switch (
MI.getOpcode()) {
619 return MCID.getSize();
620 case TargetOpcode::BUNDLE:
621 return getInstBundleLength(
MI);
622 case TargetOpcode::COPY:
627 case ARM::CONSTPOOL_ENTRY:
628 case ARM::JUMPTABLE_INSTS:
629 case ARM::JUMPTABLE_ADDRS:
630 case ARM::JUMPTABLE_TBB:
631 case ARM::JUMPTABLE_TBH:
634 return MI.getOperand(2).getImm();
636 return MI.getOperand(1).getImm();
638 case ARM::INLINEASM_BR: {
640 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
648unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
652 while (++
I != E &&
I->isInsideBundle()) {
653 assert(!
I->isBundle() &&
"No nested bundle!");
663 unsigned Opc = Subtarget.isThumb()
664 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
672 if (Subtarget.isMClass())
683 unsigned Opc = Subtarget.isThumb()
684 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
689 if (Subtarget.isMClass())
718 unsigned Cond,
unsigned Inactive) {
728 bool RenamableSrc)
const {
729 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
730 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
732 if (GPRDest && GPRSrc) {
740 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
741 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
744 if (SPRDest && SPRSrc)
746 else if (GPRDest && SPRSrc)
748 else if (SPRDest && GPRSrc)
750 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
752 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
753 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
758 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR)
760 if (
Opc == ARM::MVE_VORR)
762 else if (
Opc != ARM::MQPRCopy)
768 unsigned BeginIdx = 0;
769 unsigned SubRegs = 0;
773 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
774 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
775 BeginIdx = ARM::qsub_0;
777 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
778 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
779 BeginIdx = ARM::qsub_0;
782 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
784 BeginIdx = ARM::dsub_0;
786 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
788 BeginIdx = ARM::dsub_0;
790 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
792 BeginIdx = ARM::dsub_0;
794 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
795 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
796 BeginIdx = ARM::gsub_0;
798 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
800 BeginIdx = ARM::dsub_0;
803 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
805 BeginIdx = ARM::dsub_0;
808 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
810 BeginIdx = ARM::dsub_0;
813 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
814 !Subtarget.hasFP64()) {
816 BeginIdx = ARM::ssub_0;
818 }
else if (SrcReg == ARM::CPSR) {
821 }
else if (DestReg == ARM::CPSR) {
824 }
else if (DestReg == ARM::VPR) {
830 }
else if (SrcReg == ARM::VPR) {
836 }
else if (DestReg == ARM::FPSCR_NZCV) {
838 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
842 }
else if (SrcReg == ARM::FPSCR_NZCV) {
844 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
850 assert(
Opc &&
"Impossible reg-to-reg copy");
856 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
857 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
863 for (
unsigned i = 0; i != SubRegs; ++i) {
864 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
865 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
866 assert(Dst && Src &&
"Bad sub-register");
868 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
873 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR) {
877 if (
Opc == ARM::MVE_VORR)
882 if (
Opc == ARM::MOVr)
891std::optional<DestSourcePair>
900 if (!
MI.isMoveReg() ||
901 (
MI.getOpcode() == ARM::VORRq &&
902 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
907std::optional<ParamLoadedValue>
911 Register DstReg = DstSrcPair->Destination->getReg();
942 return MIB.
addReg(Reg, State);
946 return MIB.
addReg(Reg, State, SubIdx);
951 Register SrcReg,
bool isKill,
int FI,
964 switch (
TRI.getSpillSize(*RC)) {
966 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
977 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
984 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
991 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
998 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1009 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1016 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1017 if (Subtarget.hasV5TEOps()) {
1020 AddDReg(MIB, SrcReg, ARM::gsub_1, {});
1031 AddDReg(MIB, SrcReg, ARM::gsub_1, {});
1037 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1053 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1054 Subtarget.hasMVEIntegerOps()) {
1059 .addMemOperand(MMO);
1065 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1068 Subtarget.hasNEON()) {
1082 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1083 AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1089 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1090 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1091 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1093 Subtarget.hasNEON()) {
1102 }
else if (Subtarget.hasMVEIntegerOps()) {
1114 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1115 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1116 AddDReg(MIB, SrcReg, ARM::dsub_3, {});
1122 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1123 Subtarget.hasMVEIntegerOps()) {
1128 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1134 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1135 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1136 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, {});
1137 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, {});
1138 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, {});
1139 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, {});
1140 AddDReg(MIB, SrcReg, ARM::dsub_7, {});
1150 int &FrameIndex)
const {
1151 switch (
MI.getOpcode()) {
1155 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1156 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1157 MI.getOperand(3).getImm() == 0) {
1158 FrameIndex =
MI.getOperand(1).getIndex();
1159 return MI.getOperand(0).getReg();
1168 case ARM::VSTR_P0_off:
1169 case ARM::VSTR_FPSCR_NZCVQC_off:
1170 case ARM::MVE_VSTRWU32:
1171 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1172 MI.getOperand(2).getImm() == 0) {
1173 FrameIndex =
MI.getOperand(1).getIndex();
1174 return MI.getOperand(0).getReg();
1178 case ARM::VST1d64TPseudo:
1179 case ARM::VST1d64QPseudo:
1180 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1181 FrameIndex =
MI.getOperand(0).getIndex();
1182 return MI.getOperand(2).getReg();
1186 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1187 FrameIndex =
MI.getOperand(1).getIndex();
1188 return MI.getOperand(0).getReg();
1191 case ARM::MQQPRStore:
1192 case ARM::MQQQQPRStore:
1193 if (
MI.getOperand(1).isFI()) {
1194 FrameIndex =
MI.getOperand(1).getIndex();
1195 return MI.getOperand(0).getReg();
1204 int &FrameIndex)
const {
1206 if (
MI.mayStore() && hasStoreToStackSlot(
MI,
Accesses) &&
1223 if (
I !=
MBB.end())
DL =
I->getDebugLoc();
1232 switch (
TRI.getSpillSize(*RC)) {
1234 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1244 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1250 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1256 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1262 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1272 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1278 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1281 if (Subtarget.hasV5TEOps()) {
1304 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1317 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1318 Subtarget.hasMVEIntegerOps()) {
1320 MIB.addFrameIndex(FI)
1322 .addMemOperand(MMO);
1328 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1330 Subtarget.hasNEON()) {
1351 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1352 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1353 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1355 Subtarget.hasNEON()) {
1361 }
else if (Subtarget.hasMVEIntegerOps()) {
1381 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1382 Subtarget.hasMVEIntegerOps()) {
1386 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1410 int &FrameIndex)
const {
1411 switch (
MI.getOpcode()) {
1415 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1416 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1417 MI.getOperand(3).getImm() == 0) {
1418 FrameIndex =
MI.getOperand(1).getIndex();
1419 return MI.getOperand(0).getReg();
1428 case ARM::VLDR_P0_off:
1429 case ARM::VLDR_FPSCR_NZCVQC_off:
1430 case ARM::MVE_VLDRWU32:
1431 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1432 MI.getOperand(2).getImm() == 0) {
1433 FrameIndex =
MI.getOperand(1).getIndex();
1434 return MI.getOperand(0).getReg();
1438 case ARM::VLD1d8TPseudo:
1439 case ARM::VLD1d16TPseudo:
1440 case ARM::VLD1d32TPseudo:
1441 case ARM::VLD1d64TPseudo:
1442 case ARM::VLD1d8QPseudo:
1443 case ARM::VLD1d16QPseudo:
1444 case ARM::VLD1d32QPseudo:
1445 case ARM::VLD1d64QPseudo:
1446 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1447 FrameIndex =
MI.getOperand(1).getIndex();
1448 return MI.getOperand(0).getReg();
1452 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1453 FrameIndex =
MI.getOperand(1).getIndex();
1454 return MI.getOperand(0).getReg();
1457 case ARM::MQQPRLoad:
1458 case ARM::MQQQQPRLoad:
1459 if (
MI.getOperand(1).isFI()) {
1460 FrameIndex =
MI.getOperand(1).getIndex();
1461 return MI.getOperand(0).getReg();
1470 int &FrameIndex)
const {
1472 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI,
Accesses) &&
1486 bool isThumb2 = Subtarget.
isThumb2();
1493 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1495 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1496 : isThumb1 ? ARM::tLDMIA_UPD
1500 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1503 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1504 MachineOperand STWb(
MI->getOperand(0));
1505 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1506 : isThumb1 ? ARM::tSTMIA_UPD
1510 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1513 MachineOperand LDBase(
MI->getOperand(3));
1516 MachineOperand STBase(
MI->getOperand(2));
1525 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1526 return TRI.getEncodingValue(Reg1) <
1527 TRI.getEncodingValue(Reg2);
1530 for (
const auto &
Reg : ScratchRegs) {
1539 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1540 expandLoadStackGuard(
MI);
1541 MI.getParent()->erase(
MI);
1545 if (
MI.getOpcode() == ARM::MEMCPY) {
1554 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1559 Register DstRegS =
MI.getOperand(0).getReg();
1560 Register SrcRegS =
MI.getOperand(1).getReg();
1561 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1566 TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, &ARM::DPRRegClass);
1568 TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, &ARM::DPRRegClass);
1569 if (!DstRegD || !SrcRegD)
1575 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1579 if (
MI.getOperand(0).isDead())
1588 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1589 if (ImpDefIdx != -1)
1590 MI.removeOperand(ImpDefIdx);
1593 MI.setDesc(
get(ARM::VMOVD));
1594 MI.getOperand(0).setReg(DstRegD);
1595 MI.getOperand(1).setReg(SrcRegD);
1602 MI.getOperand(1).setIsUndef();
1607 if (
MI.getOperand(1).isKill()) {
1608 MI.getOperand(1).setIsKill(
false);
1609 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1623 assert(MCPE.isMachineConstantPoolEntry() &&
1624 "Expecting a machine constantpool entry!");
1674 case ARM::tLDRpci_pic:
1675 case ARM::t2LDRpci_pic: {
1695 switch (
I->getOpcode()) {
1696 case ARM::tLDRpci_pic:
1697 case ARM::t2LDRpci_pic: {
1699 unsigned CPI =
I->getOperand(1).getIndex();
1701 I->getOperand(1).setIndex(CPI);
1702 I->getOperand(2).setImm(PCLabelId);
1706 if (!
I->isBundledWithSucc())
1717 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1718 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1719 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1720 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1721 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1722 Opcode == ARM::t2MOV_ga_pcrel) {
1733 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1734 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1735 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1736 Opcode == ARM::t2MOV_ga_pcrel)
1748 if (isARMCP0 && isARMCP1) {
1754 }
else if (!isARMCP0 && !isARMCP1) {
1758 }
else if (Opcode == ARM::PICLDR) {
1766 if (Addr0 != Addr1) {
1802 int64_t &Offset2)
const {
1804 if (Subtarget.isThumb1Only())
return false;
1809 auto IsLoadOpcode = [&](
unsigned Opcode) {
1824 case ARM::t2LDRSHi8:
1826 case ARM::t2LDRBi12:
1827 case ARM::t2LDRSHi12:
1868 int64_t Offset1, int64_t Offset2,
1869 unsigned NumLoads)
const {
1871 if (Subtarget.isThumb1Only())
return false;
1873 assert(Offset2 > Offset1);
1875 if ((Offset2 - Offset1) / 8 > 64)
1906 if (
MI.isDebugInstr())
1910 if (
MI.isTerminator() ||
MI.isPosition())
1914 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1928 while (++
I !=
MBB->end() &&
I->isDebugInstr())
1930 if (
I !=
MBB->end() &&
I->getOpcode() == ARM::t2IT)
1941 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
1949 unsigned NumCycles,
unsigned ExtraPredCycles,
1957 if (
MBB.getParent()->getFunction().hasOptSize()) {
1959 if (!Pred->empty()) {
1961 if (LastMI->
getOpcode() == ARM::t2Bcc) {
1970 MBB, 0, 0, Probability);
1975 unsigned TCycles,
unsigned TExtra,
1977 unsigned FCycles,
unsigned FExtra,
1986 if (Subtarget.isThumb2() &&
TBB.getParent()->getFunction().hasMinSize()) {
1994 const unsigned ScalingUpFactor = 1024;
1996 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
1997 unsigned UnpredCost;
1998 if (!Subtarget.hasBranchPredictor()) {
2001 unsigned NotTakenBranchCost = 1;
2002 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty();
2003 unsigned TUnpredCycles, FUnpredCycles;
2006 TUnpredCycles = TCycles + NotTakenBranchCost;
2007 FUnpredCycles = TakenBranchCost;
2010 TUnpredCycles = TCycles + TakenBranchCost;
2011 FUnpredCycles = FCycles + NotTakenBranchCost;
2014 PredCost -= 1 * ScalingUpFactor;
2017 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2018 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2019 UnpredCost = TUnpredCost + FUnpredCost;
2022 if (Subtarget.isThumb2() && TCycles + FCycles > 4) {
2023 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2026 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2027 unsigned FUnpredCost =
2029 UnpredCost = TUnpredCost + FUnpredCost;
2030 UnpredCost += 1 * ScalingUpFactor;
2031 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10;
2034 return PredCost <= UnpredCost;
2039 unsigned NumInsts)
const {
2043 if (!Subtarget.isThumb2())
2047 unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4;
2056 if (
MI.getOpcode() == ARM::t2Bcc &&
2068 if (Subtarget.isThumb2())
2079 return Subtarget.isProfitableToUnpredicate();
2087 int PIdx =
MI.findFirstPredOperandIdx();
2093 PredReg =
MI.getOperand(PIdx+1).getReg();
2102 if (
Opc == ARM::t2B)
2111 unsigned OpIdx2)
const {
2112 switch (
MI.getOpcode()) {
2114 case ARM::t2MOVCCr: {
2119 if (CC ==
ARMCC::AL || PredReg != ARM::CPSR)
2139 if (!Reg.isVirtual())
2153 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2160 if (MO.getReg().isPhysical())
2162 if (MO.isDef() && !MO.isDead())
2165 bool DontMoveAcrossStores =
true;
2166 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2174 bool PreferFalse)
const {
2175 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2176 "Unknown select instruction");
2179 bool Invert = !
DefMI;
2181 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(), MRI,
this);
2188 Register DestReg =
MI.getOperand(0).getReg();
2204 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2207 unsigned CondCode =
MI.getOperand(3).getImm();
2212 NewMI.
add(
MI.getOperand(4));
2223 NewMI.
add(FalseReg);
2234 if (
DefMI->getParent() !=
MI.getParent())
2238 DefMI->eraseFromParent();
2254 {ARM::ADDSri, ARM::ADDri},
2255 {ARM::ADDSrr, ARM::ADDrr},
2256 {ARM::ADDSrsi, ARM::ADDrsi},
2257 {ARM::ADDSrsr, ARM::ADDrsr},
2259 {ARM::SUBSri, ARM::SUBri},
2260 {ARM::SUBSrr, ARM::SUBrr},
2261 {ARM::SUBSrsi, ARM::SUBrsi},
2262 {ARM::SUBSrsr, ARM::SUBrsr},
2264 {ARM::RSBSri, ARM::RSBri},
2265 {ARM::RSBSrsi, ARM::RSBrsi},
2266 {ARM::RSBSrsr, ARM::RSBrsr},
2268 {ARM::tADDSi3, ARM::tADDi3},
2269 {ARM::tADDSi8, ARM::tADDi8},
2270 {ARM::tADDSrr, ARM::tADDrr},
2271 {ARM::tADCS, ARM::tADC},
2273 {ARM::tSUBSi3, ARM::tSUBi3},
2274 {ARM::tSUBSi8, ARM::tSUBi8},
2275 {ARM::tSUBSrr, ARM::tSUBrr},
2276 {ARM::tSBCS, ARM::tSBC},
2277 {ARM::tRSBS, ARM::tRSB},
2278 {ARM::tLSLSri, ARM::tLSLri},
2280 {ARM::t2ADDSri, ARM::t2ADDri},
2281 {ARM::t2ADDSrr, ARM::t2ADDrr},
2282 {ARM::t2ADDSrs, ARM::t2ADDrs},
2284 {ARM::t2SUBSri, ARM::t2SUBri},
2285 {ARM::t2SUBSrr, ARM::t2SUBrr},
2286 {ARM::t2SUBSrs, ARM::t2SUBrs},
2288 {ARM::t2RSBSri, ARM::t2RSBri},
2289 {ARM::t2RSBSrs, ARM::t2RSBrs},
2294 if (OldOpc == Entry.PseudoOpc)
2295 return Entry.MachineOpc;
2306 if (NumBytes == 0 && DestReg != BaseReg) {
2315 bool isSub = NumBytes < 0;
2316 if (isSub) NumBytes = -NumBytes;
2321 assert(ThisVal &&
"Didn't extract field correctly");
2324 NumBytes &= ~ThisVal;
2329 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2342 unsigned NumBytes) {
2353 if (!IsPush && !IsPop)
2356 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2357 MI->getOpcode() == ARM::VLDMDIA_UPD;
2358 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2359 MI->getOpcode() == ARM::tPOP ||
2360 MI->getOpcode() == ARM::tPOP_RET;
2362 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2363 MI->getOperand(1).getReg() == ARM::SP)) &&
2364 "trying to fold sp update into non-sp-updating push/pop");
2369 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2374 int RegListIdx = IsT1PushPop ? 2 : 4;
2377 unsigned RegsNeeded;
2380 RegsNeeded = NumBytes / 8;
2381 RegClass = &ARM::DPRRegClass;
2383 RegsNeeded = NumBytes / 4;
2384 RegClass = &ARM::GPRRegClass;
2394 unsigned FirstRegEnc = -1;
2397 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2402 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2403 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2406 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2409 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2412 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2419 false,
false,
true));
2429 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2451 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2452 MI->removeOperand(i);
2465 unsigned Opcode =
MI.getOpcode();
2471 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2474 if (Opcode == ARM::ADDri) {
2475 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2478 MI.setDesc(
TII.get(ARM::MOVr));
2479 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2480 MI.removeOperand(FrameRegIdx+1);
2486 MI.setDesc(
TII.get(ARM::SUBri));
2492 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2493 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2508 "Bit extraction didn't work?");
2509 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2511 unsigned ImmIdx = 0;
2513 unsigned NumBits = 0;
2517 ImmIdx = FrameRegIdx + 1;
2518 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2522 ImmIdx = FrameRegIdx+2;
2529 ImmIdx = FrameRegIdx+2;
2540 ImmIdx = FrameRegIdx+1;
2548 ImmIdx = FrameRegIdx+1;
2558 ImmIdx = FrameRegIdx+1;
2559 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2568 Offset += InstrOffs * Scale;
2569 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2579 int ImmedOffset =
Offset / Scale;
2580 unsigned Mask = (1 << NumBits) - 1;
2581 if ((
unsigned)
Offset <= Mask * Scale) {
2583 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2589 ImmedOffset = -ImmedOffset;
2591 ImmedOffset |= 1 << NumBits;
2599 ImmedOffset = ImmedOffset & Mask;
2602 ImmedOffset = -ImmedOffset;
2604 ImmedOffset |= 1 << NumBits;
2620 Register &SrcReg2, int64_t &CmpMask,
2621 int64_t &CmpValue)
const {
2622 switch (
MI.getOpcode()) {
2627 SrcReg =
MI.getOperand(0).getReg();
2630 CmpValue =
MI.getOperand(1).getImm();
2635 SrcReg =
MI.getOperand(0).getReg();
2636 SrcReg2 =
MI.getOperand(1).getReg();
2642 SrcReg =
MI.getOperand(0).getReg();
2644 CmpMask =
MI.getOperand(1).getImm();
2657 int CmpMask,
bool CommonUse) {
2658 switch (
MI->getOpcode()) {
2661 if (CmpMask !=
MI->getOperand(2).getImm())
2663 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2753 switch (
MI->getOpcode()) {
2754 default:
return false;
2850 if (!
MI)
return false;
2853 if (CmpMask != ~0) {
2859 if (UI->getParent() != CmpInstr.
getParent())
2868 if (!
MI)
return false;
2877 if (
I ==
B)
return false;
2888 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
2893 if (CmpInstr.
getOpcode() == ARM::CMPri ||
2901 bool IsThumb1 =
false;
2918 if (
MI && IsThumb1) {
2920 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
2921 bool CanReorder =
true;
2922 for (;
I != E; --
I) {
2923 if (
I->getOpcode() != ARM::tMOVi8) {
2929 MI =
MI->removeFromParent();
2940 bool SubAddIsThumb1 =
false;
2955 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
2956 Instr.readsRegister(ARM::CPSR,
TRI))
2978 IsThumb1 = SubAddIsThumb1;
2993 bool isSafe =
false;
2996 while (!isSafe && ++
I != E) {
2998 for (
unsigned IO = 0, EO = Instr.getNumOperands();
2999 !isSafe && IO != EO; ++IO) {
3013 bool IsInstrVSel =
true;
3014 switch (Instr.getOpcode()) {
3016 IsInstrVSel =
false;
3050 bool IsSub =
Opc == ARM::SUBrr ||
Opc == ARM::t2SUBrr ||
3051 Opc == ARM::SUBri ||
Opc == ARM::t2SUBri ||
3052 Opc == ARM::tSUBrr ||
Opc == ARM::tSUBi3 ||
3054 unsigned OpI =
Opc != ARM::tSUBrr ? 1 : 2;
3066 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3100 if (Succ->isLiveIn(ARM::CPSR))
3107 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3108 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3109 MI->getOperand(CPSRRegNum).setIsDef(
true);
3117 for (
auto &[MO,
Cond] : OperandsToUpdate)
3120 MI->clearRegisterDeads(ARM::CPSR);
3134 int64_t CmpMask, CmpValue;
3136 if (
Next !=
MI.getParent()->end() &&
3147 unsigned DefOpc =
DefMI.getOpcode();
3148 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3149 DefOpc != ARM::tMOVi32imm)
3151 if (!
DefMI.getOperand(1).isImm())
3171 if (
UseMI.getOperand(
NumOps - 1).getReg() == ARM::CPSR)
3177 unsigned UseOpc =
UseMI.getOpcode();
3178 unsigned NewUseOpc = 0;
3180 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3181 bool Commute =
false;
3183 default:
return false;
3191 case ARM::t2EORrr: {
3192 Commute =
UseMI.getOperand(2).getReg() != Reg;
3197 if (UseOpc == ARM::SUBrr && Commute)
3203 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3206 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3220 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3221 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3225 case ARM::t2SUBrr: {
3226 if (UseOpc == ARM::t2SUBrr && Commute)
3231 const bool ToSP =
DefMI.getOperand(0).getReg() == ARM::SP;
3232 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3233 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3235 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3238 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3253 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3254 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3261 unsigned OpIdx = Commute ? 2 : 1;
3263 bool isKill =
UseMI.getOperand(
OpIdx).isKill();
3273 UseMI.getOperand(1).setReg(NewReg);
3274 UseMI.getOperand(1).setIsKill();
3275 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3276 DefMI.eraseFromParent();
3283 case ARM::t2ADDspImm:
3284 case ARM::t2SUBspImm:
3294 switch (
MI.getOpcode()) {
3298 assert(UOps >= 0 &&
"bad # UOps");
3306 unsigned ShOpVal =
MI.getOperand(3).getImm();
3311 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3319 if (!
MI.getOperand(2).getReg())
3322 unsigned ShOpVal =
MI.getOperand(3).getImm();
3327 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3337 case ARM::LDRSB_POST:
3338 case ARM::LDRSH_POST: {
3341 return (Rt == Rm) ? 4 : 3;
3344 case ARM::LDR_PRE_REG:
3345 case ARM::LDRB_PRE_REG: {
3350 unsigned ShOpVal =
MI.getOperand(4).getImm();
3355 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3361 case ARM::STR_PRE_REG:
3362 case ARM::STRB_PRE_REG: {
3363 unsigned ShOpVal =
MI.getOperand(4).getImm();
3368 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3375 case ARM::STRH_PRE: {
3385 case ARM::LDR_POST_REG:
3386 case ARM::LDRB_POST_REG:
3387 case ARM::LDRH_POST: {
3390 return (Rt == Rm) ? 3 : 2;
3393 case ARM::LDR_PRE_IMM:
3394 case ARM::LDRB_PRE_IMM:
3395 case ARM::LDR_POST_IMM:
3396 case ARM::LDRB_POST_IMM:
3397 case ARM::STRB_POST_IMM:
3398 case ARM::STRB_POST_REG:
3399 case ARM::STRB_PRE_IMM:
3400 case ARM::STRH_POST:
3401 case ARM::STR_POST_IMM:
3402 case ARM::STR_POST_REG:
3403 case ARM::STR_PRE_IMM:
3406 case ARM::LDRSB_PRE:
3407 case ARM::LDRSH_PRE: {
3414 unsigned ShOpVal =
MI.getOperand(4).getImm();
3419 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3432 return (Rt == Rn) ? 3 : 2;
3443 case ARM::LDRD_POST:
3444 case ARM::t2LDRD_POST:
3447 case ARM::STRD_POST:
3448 case ARM::t2STRD_POST:
3451 case ARM::LDRD_PRE: {
3458 return (Rt == Rn) ? 4 : 3;
3461 case ARM::t2LDRD_PRE: {
3464 return (Rt == Rn) ? 4 : 3;
3467 case ARM::STRD_PRE: {
3475 case ARM::t2STRD_PRE:
3478 case ARM::t2LDR_POST:
3479 case ARM::t2LDRB_POST:
3480 case ARM::t2LDRB_PRE:
3481 case ARM::t2LDRSBi12:
3482 case ARM::t2LDRSBi8:
3483 case ARM::t2LDRSBpci:
3485 case ARM::t2LDRH_POST:
3486 case ARM::t2LDRH_PRE:
3488 case ARM::t2LDRSB_POST:
3489 case ARM::t2LDRSB_PRE:
3490 case ARM::t2LDRSH_POST:
3491 case ARM::t2LDRSH_PRE:
3492 case ARM::t2LDRSHi12:
3493 case ARM::t2LDRSHi8:
3494 case ARM::t2LDRSHpci:
3498 case ARM::t2LDRDi8: {
3501 return (Rt == Rn) ? 3 : 2;
3504 case ARM::t2STRB_POST:
3505 case ARM::t2STRB_PRE:
3508 case ARM::t2STRH_POST:
3509 case ARM::t2STRH_PRE:
3511 case ARM::t2STR_POST:
3512 case ARM::t2STR_PRE:
3543 E =
MI.memoperands_end();
3545 Size += (*I)->getSize().getValue();
3552 return std::min(
Size / 4, 16U);
3557 unsigned UOps = 1 + NumRegs;
3561 case ARM::VLDMDIA_UPD:
3562 case ARM::VLDMDDB_UPD:
3563 case ARM::VLDMSIA_UPD:
3564 case ARM::VLDMSDB_UPD:
3565 case ARM::VSTMDIA_UPD:
3566 case ARM::VSTMDDB_UPD:
3567 case ARM::VSTMSIA_UPD:
3568 case ARM::VSTMSDB_UPD:
3569 case ARM::LDMIA_UPD:
3570 case ARM::LDMDA_UPD:
3571 case ARM::LDMDB_UPD:
3572 case ARM::LDMIB_UPD:
3573 case ARM::STMIA_UPD:
3574 case ARM::STMDA_UPD:
3575 case ARM::STMDB_UPD:
3576 case ARM::STMIB_UPD:
3577 case ARM::tLDMIA_UPD:
3578 case ARM::tSTMIA_UPD:
3579 case ARM::t2LDMIA_UPD:
3580 case ARM::t2LDMDB_UPD:
3581 case ARM::t2STMIA_UPD:
3582 case ARM::t2STMDB_UPD:
3585 case ARM::LDMIA_RET:
3587 case ARM::t2LDMIA_RET:
3596 if (!ItinData || ItinData->
isEmpty())
3600 unsigned Class =
Desc.getSchedClass();
3602 if (ItinUOps >= 0) {
3603 if (Subtarget.isSwift() && (
Desc.mayLoad() ||
Desc.mayStore()))
3609 unsigned Opc =
MI.getOpcode();
3628 case ARM::VLDMDIA_UPD:
3629 case ARM::VLDMDDB_UPD:
3631 case ARM::VLDMSIA_UPD:
3632 case ARM::VLDMSDB_UPD:
3634 case ARM::VSTMDIA_UPD:
3635 case ARM::VSTMDDB_UPD:
3637 case ARM::VSTMSIA_UPD:
3638 case ARM::VSTMSDB_UPD: {
3639 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3640 return (NumRegs / 2) + (NumRegs % 2) + 1;
3643 case ARM::LDMIA_RET:
3648 case ARM::LDMIA_UPD:
3649 case ARM::LDMDA_UPD:
3650 case ARM::LDMDB_UPD:
3651 case ARM::LDMIB_UPD:
3656 case ARM::STMIA_UPD:
3657 case ARM::STMDA_UPD:
3658 case ARM::STMDB_UPD:
3659 case ARM::STMIB_UPD:
3661 case ARM::tLDMIA_UPD:
3662 case ARM::tSTMIA_UPD:
3666 case ARM::t2LDMIA_RET:
3669 case ARM::t2LDMIA_UPD:
3670 case ARM::t2LDMDB_UPD:
3673 case ARM::t2STMIA_UPD:
3674 case ARM::t2STMDB_UPD: {
3675 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3676 switch (Subtarget.getLdStMultipleTiming()) {
3687 unsigned UOps = (NumRegs / 2);
3693 unsigned UOps = (NumRegs / 2);
3696 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3697 (*
MI.memoperands_begin())->getAlign() <
Align(8))
3707std::optional<unsigned>
3710 unsigned DefIdx,
unsigned DefAlign)
const {
3719 DefCycle = RegNo / 2 + 1;
3724 bool isSLoad =
false;
3729 case ARM::VLDMSIA_UPD:
3730 case ARM::VLDMSDB_UPD:
3737 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3741 DefCycle = RegNo + 2;
3747std::optional<unsigned>
3750 unsigned DefIdx,
unsigned DefAlign)
const {
3757 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3760 DefCycle = RegNo / 2;
3765 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3766 DefCycle = (RegNo / 2);
3769 if ((RegNo % 2) || DefAlign < 8)
3775 DefCycle = RegNo + 2;
3781std::optional<unsigned>
3784 unsigned UseIdx,
unsigned UseAlign)
const {
3790 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3792 UseCycle = RegNo / 2 + 1;
3795 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3797 bool isSStore =
false;
3802 case ARM::VSTMSIA_UPD:
3803 case ARM::VSTMSDB_UPD:
3810 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3814 UseCycle = RegNo + 2;
3820std::optional<unsigned>
3823 unsigned UseIdx,
unsigned UseAlign)
const {
3829 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3830 UseCycle = RegNo / 2;
3835 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3836 UseCycle = (RegNo / 2);
3839 if ((RegNo % 2) || UseAlign < 8)
3850 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
3851 unsigned UseIdx,
unsigned UseAlign)
const {
3861 std::optional<unsigned> DefCycle;
3862 bool LdmBypass =
false;
3869 case ARM::VLDMDIA_UPD:
3870 case ARM::VLDMDDB_UPD:
3872 case ARM::VLDMSIA_UPD:
3873 case ARM::VLDMSDB_UPD:
3874 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3877 case ARM::LDMIA_RET:
3882 case ARM::LDMIA_UPD:
3883 case ARM::LDMDA_UPD:
3884 case ARM::LDMDB_UPD:
3885 case ARM::LDMIB_UPD:
3887 case ARM::tLDMIA_UPD:
3889 case ARM::t2LDMIA_RET:
3892 case ARM::t2LDMIA_UPD:
3893 case ARM::t2LDMDB_UPD:
3895 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3903 std::optional<unsigned> UseCycle;
3910 case ARM::VSTMDIA_UPD:
3911 case ARM::VSTMDDB_UPD:
3913 case ARM::VSTMSIA_UPD:
3914 case ARM::VSTMSDB_UPD:
3915 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3922 case ARM::STMIA_UPD:
3923 case ARM::STMDA_UPD:
3924 case ARM::STMDB_UPD:
3925 case ARM::STMIB_UPD:
3926 case ARM::tSTMIA_UPD:
3931 case ARM::t2STMIA_UPD:
3932 case ARM::t2STMDB_UPD:
3933 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3941 if (UseCycle > *DefCycle + 1)
3942 return std::nullopt;
3944 UseCycle = *DefCycle - *UseCycle + 1;
3945 if (UseCycle > 0u) {
3951 UseCycle = *UseCycle - 1;
3953 UseClass, UseIdx)) {
3954 UseCycle = *UseCycle - 1;
3963 unsigned &DefIdx,
unsigned &Dist) {
3968 assert(
II->isInsideBundle() &&
"Empty bundle?");
3971 while (
II->isInsideBundle()) {
3972 Idx =
II->findRegisterDefOperandIdx(
Reg,
TRI,
false,
true);
3979 assert(Idx != -1 &&
"Cannot find bundled definition!");
3986 unsigned &UseIdx,
unsigned &Dist) {
3990 assert(
II->isInsideBundle() &&
"Empty bundle?");
3995 while (
II !=
E &&
II->isInsideBundle()) {
3996 Idx =
II->findRegisterUseOperandIdx(
Reg,
TRI,
false);
3999 if (
II->getOpcode() != ARM::t2IT)
4027 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4037 case ARM::t2LDRSHs: {
4039 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4040 if (ShAmt == 0 || ShAmt == 2)
4045 }
else if (Subtarget.
isSwift()) {
4052 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4057 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4068 case ARM::t2LDRSHs: {
4070 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4071 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4078 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4085 case ARM::VLD1q8wb_fixed:
4086 case ARM::VLD1q16wb_fixed:
4087 case ARM::VLD1q32wb_fixed:
4088 case ARM::VLD1q64wb_fixed:
4089 case ARM::VLD1q8wb_register:
4090 case ARM::VLD1q16wb_register:
4091 case ARM::VLD1q32wb_register:
4092 case ARM::VLD1q64wb_register:
4099 case ARM::VLD2d8wb_fixed:
4100 case ARM::VLD2d16wb_fixed:
4101 case ARM::VLD2d32wb_fixed:
4102 case ARM::VLD2q8wb_fixed:
4103 case ARM::VLD2q16wb_fixed:
4104 case ARM::VLD2q32wb_fixed:
4105 case ARM::VLD2d8wb_register:
4106 case ARM::VLD2d16wb_register:
4107 case ARM::VLD2d32wb_register:
4108 case ARM::VLD2q8wb_register:
4109 case ARM::VLD2q16wb_register:
4110 case ARM::VLD2q32wb_register:
4115 case ARM::VLD3d8_UPD:
4116 case ARM::VLD3d16_UPD:
4117 case ARM::VLD3d32_UPD:
4118 case ARM::VLD1d64Twb_fixed:
4119 case ARM::VLD1d64Twb_register:
4120 case ARM::VLD3q8_UPD:
4121 case ARM::VLD3q16_UPD:
4122 case ARM::VLD3q32_UPD:
4127 case ARM::VLD4d8_UPD:
4128 case ARM::VLD4d16_UPD:
4129 case ARM::VLD4d32_UPD:
4130 case ARM::VLD1d64Qwb_fixed:
4131 case ARM::VLD1d64Qwb_register:
4132 case ARM::VLD4q8_UPD:
4133 case ARM::VLD4q16_UPD:
4134 case ARM::VLD4q32_UPD:
4135 case ARM::VLD1DUPq8:
4136 case ARM::VLD1DUPq16:
4137 case ARM::VLD1DUPq32:
4138 case ARM::VLD1DUPq8wb_fixed:
4139 case ARM::VLD1DUPq16wb_fixed:
4140 case ARM::VLD1DUPq32wb_fixed:
4141 case ARM::VLD1DUPq8wb_register:
4142 case ARM::VLD1DUPq16wb_register:
4143 case ARM::VLD1DUPq32wb_register:
4144 case ARM::VLD2DUPd8:
4145 case ARM::VLD2DUPd16:
4146 case ARM::VLD2DUPd32:
4147 case ARM::VLD2DUPd8wb_fixed:
4148 case ARM::VLD2DUPd16wb_fixed:
4149 case ARM::VLD2DUPd32wb_fixed:
4150 case ARM::VLD2DUPd8wb_register:
4151 case ARM::VLD2DUPd16wb_register:
4152 case ARM::VLD2DUPd32wb_register:
4153 case ARM::VLD4DUPd8:
4154 case ARM::VLD4DUPd16:
4155 case ARM::VLD4DUPd32:
4156 case ARM::VLD4DUPd8_UPD:
4157 case ARM::VLD4DUPd16_UPD:
4158 case ARM::VLD4DUPd32_UPD:
4160 case ARM::VLD1LNd16:
4161 case ARM::VLD1LNd32:
4162 case ARM::VLD1LNd8_UPD:
4163 case ARM::VLD1LNd16_UPD:
4164 case ARM::VLD1LNd32_UPD:
4166 case ARM::VLD2LNd16:
4167 case ARM::VLD2LNd32:
4168 case ARM::VLD2LNq16:
4169 case ARM::VLD2LNq32:
4170 case ARM::VLD2LNd8_UPD:
4171 case ARM::VLD2LNd16_UPD:
4172 case ARM::VLD2LNd32_UPD:
4173 case ARM::VLD2LNq16_UPD:
4174 case ARM::VLD2LNq32_UPD:
4176 case ARM::VLD4LNd16:
4177 case ARM::VLD4LNd32:
4178 case ARM::VLD4LNq16:
4179 case ARM::VLD4LNq32:
4180 case ARM::VLD4LNd8_UPD:
4181 case ARM::VLD4LNd16_UPD:
4182 case ARM::VLD4LNd32_UPD:
4183 case ARM::VLD4LNq16_UPD:
4184 case ARM::VLD4LNq32_UPD:
4198 if (!ItinData || ItinData->
isEmpty())
4199 return std::nullopt;
4205 unsigned DefAdj = 0;
4206 if (
DefMI.isBundle())
4215 unsigned UseAdj = 0;
4216 if (
UseMI.isBundle()) {
4220 return std::nullopt;
4223 return getOperandLatencyImpl(
4224 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4225 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4228std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4230 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4232 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4233 if (Reg == ARM::CPSR) {
4234 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4236 return Subtarget.
isLikeA9() ? 1 : 20;
4240 if (
UseMI.isBranch())
4259 return std::nullopt;
4261 unsigned DefAlign =
DefMI.hasOneMemOperand()
4262 ? (*
DefMI.memoperands_begin())->getAlign().value()
4264 unsigned UseAlign =
UseMI.hasOneMemOperand()
4265 ? (*
UseMI.memoperands_begin())->getAlign().value()
4270 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4273 return std::nullopt;
4276 int Adj = DefAdj + UseAdj;
4280 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4287std::optional<unsigned>
4289 SDNode *DefNode,
unsigned DefIdx,
4290 SDNode *UseNode,
unsigned UseIdx)
const {
4296 if (isZeroCost(DefMCID.
Opcode))
4299 if (!ItinData || ItinData->
isEmpty())
4300 return DefMCID.
mayLoad() ? 3 : 1;
4303 std::optional<unsigned>
Latency =
4305 int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
4306 int Threshold = 1 + Adj;
4312 unsigned DefAlign = !DefMN->memoperands_empty()
4313 ? (*DefMN->memoperands_begin())->getAlign().value()
4316 unsigned UseAlign = !UseMN->memoperands_empty()
4317 ? (*UseMN->memoperands_begin())->getAlign().value()
4320 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4322 return std::nullopt;
4325 (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
4326 Subtarget.isCortexA7())) {
4343 case ARM::t2LDRSHs: {
4346 if (ShAmt == 0 || ShAmt == 2)
4351 }
else if (DefIdx == 0 &&
Latency > 2U && Subtarget.isSwift()) {
4361 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4378 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4385 case ARM::VLD1q8wb_register:
4386 case ARM::VLD1q16wb_register:
4387 case ARM::VLD1q32wb_register:
4388 case ARM::VLD1q64wb_register:
4389 case ARM::VLD1q8wb_fixed:
4390 case ARM::VLD1q16wb_fixed:
4391 case ARM::VLD1q32wb_fixed:
4392 case ARM::VLD1q64wb_fixed:
4396 case ARM::VLD2q8Pseudo:
4397 case ARM::VLD2q16Pseudo:
4398 case ARM::VLD2q32Pseudo:
4399 case ARM::VLD2d8wb_fixed:
4400 case ARM::VLD2d16wb_fixed:
4401 case ARM::VLD2d32wb_fixed:
4402 case ARM::VLD2q8PseudoWB_fixed:
4403 case ARM::VLD2q16PseudoWB_fixed:
4404 case ARM::VLD2q32PseudoWB_fixed:
4405 case ARM::VLD2d8wb_register:
4406 case ARM::VLD2d16wb_register:
4407 case ARM::VLD2d32wb_register:
4408 case ARM::VLD2q8PseudoWB_register:
4409 case ARM::VLD2q16PseudoWB_register:
4410 case ARM::VLD2q32PseudoWB_register:
4411 case ARM::VLD3d8Pseudo:
4412 case ARM::VLD3d16Pseudo:
4413 case ARM::VLD3d32Pseudo:
4414 case ARM::VLD1d8TPseudo:
4415 case ARM::VLD1d16TPseudo:
4416 case ARM::VLD1d32TPseudo:
4417 case ARM::VLD1d64TPseudo:
4418 case ARM::VLD1d64TPseudoWB_fixed:
4419 case ARM::VLD1d64TPseudoWB_register:
4420 case ARM::VLD3d8Pseudo_UPD:
4421 case ARM::VLD3d16Pseudo_UPD:
4422 case ARM::VLD3d32Pseudo_UPD:
4423 case ARM::VLD3q8Pseudo_UPD:
4424 case ARM::VLD3q16Pseudo_UPD:
4425 case ARM::VLD3q32Pseudo_UPD:
4426 case ARM::VLD3q8oddPseudo:
4427 case ARM::VLD3q16oddPseudo:
4428 case ARM::VLD3q32oddPseudo:
4429 case ARM::VLD3q8oddPseudo_UPD:
4430 case ARM::VLD3q16oddPseudo_UPD:
4431 case ARM::VLD3q32oddPseudo_UPD:
4432 case ARM::VLD4d8Pseudo:
4433 case ARM::VLD4d16Pseudo:
4434 case ARM::VLD4d32Pseudo:
4435 case ARM::VLD1d8QPseudo:
4436 case ARM::VLD1d16QPseudo:
4437 case ARM::VLD1d32QPseudo:
4438 case ARM::VLD1d64QPseudo:
4439 case ARM::VLD1d64QPseudoWB_fixed:
4440 case ARM::VLD1d64QPseudoWB_register:
4441 case ARM::VLD1q8HighQPseudo:
4442 case ARM::VLD1q8LowQPseudo_UPD:
4443 case ARM::VLD1q8HighTPseudo:
4444 case ARM::VLD1q8LowTPseudo_UPD:
4445 case ARM::VLD1q16HighQPseudo:
4446 case ARM::VLD1q16LowQPseudo_UPD:
4447 case ARM::VLD1q16HighTPseudo:
4448 case ARM::VLD1q16LowTPseudo_UPD:
4449 case ARM::VLD1q32HighQPseudo:
4450 case ARM::VLD1q32LowQPseudo_UPD:
4451 case ARM::VLD1q32HighTPseudo:
4452 case ARM::VLD1q32LowTPseudo_UPD:
4453 case ARM::VLD1q64HighQPseudo:
4454 case ARM::VLD1q64LowQPseudo_UPD:
4455 case ARM::VLD1q64HighTPseudo:
4456 case ARM::VLD1q64LowTPseudo_UPD:
4457 case ARM::VLD4d8Pseudo_UPD:
4458 case ARM::VLD4d16Pseudo_UPD:
4459 case ARM::VLD4d32Pseudo_UPD:
4460 case ARM::VLD4q8Pseudo_UPD:
4461 case ARM::VLD4q16Pseudo_UPD:
4462 case ARM::VLD4q32Pseudo_UPD:
4463 case ARM::VLD4q8oddPseudo:
4464 case ARM::VLD4q16oddPseudo:
4465 case ARM::VLD4q32oddPseudo:
4466 case ARM::VLD4q8oddPseudo_UPD:
4467 case ARM::VLD4q16oddPseudo_UPD:
4468 case ARM::VLD4q32oddPseudo_UPD:
4469 case ARM::VLD1DUPq8:
4470 case ARM::VLD1DUPq16:
4471 case ARM::VLD1DUPq32:
4472 case ARM::VLD1DUPq8wb_fixed:
4473 case ARM::VLD1DUPq16wb_fixed:
4474 case ARM::VLD1DUPq32wb_fixed:
4475 case ARM::VLD1DUPq8wb_register:
4476 case ARM::VLD1DUPq16wb_register:
4477 case ARM::VLD1DUPq32wb_register:
4478 case ARM::VLD2DUPd8:
4479 case ARM::VLD2DUPd16:
4480 case ARM::VLD2DUPd32:
4481 case ARM::VLD2DUPd8wb_fixed:
4482 case ARM::VLD2DUPd16wb_fixed:
4483 case ARM::VLD2DUPd32wb_fixed:
4484 case ARM::VLD2DUPd8wb_register:
4485 case ARM::VLD2DUPd16wb_register:
4486 case ARM::VLD2DUPd32wb_register:
4487 case ARM::VLD2DUPq8EvenPseudo:
4488 case ARM::VLD2DUPq8OddPseudo:
4489 case ARM::VLD2DUPq16EvenPseudo:
4490 case ARM::VLD2DUPq16OddPseudo:
4491 case ARM::VLD2DUPq32EvenPseudo:
4492 case ARM::VLD2DUPq32OddPseudo:
4493 case ARM::VLD3DUPq8EvenPseudo:
4494 case ARM::VLD3DUPq8OddPseudo:
4495 case ARM::VLD3DUPq16EvenPseudo:
4496 case ARM::VLD3DUPq16OddPseudo:
4497 case ARM::VLD3DUPq32EvenPseudo:
4498 case ARM::VLD3DUPq32OddPseudo:
4499 case ARM::VLD4DUPd8Pseudo:
4500 case ARM::VLD4DUPd16Pseudo:
4501 case ARM::VLD4DUPd32Pseudo:
4502 case ARM::VLD4DUPd8Pseudo_UPD:
4503 case ARM::VLD4DUPd16Pseudo_UPD:
4504 case ARM::VLD4DUPd32Pseudo_UPD:
4505 case ARM::VLD4DUPq8EvenPseudo:
4506 case ARM::VLD4DUPq8OddPseudo:
4507 case ARM::VLD4DUPq16EvenPseudo:
4508 case ARM::VLD4DUPq16OddPseudo:
4509 case ARM::VLD4DUPq32EvenPseudo:
4510 case ARM::VLD4DUPq32OddPseudo:
4511 case ARM::VLD1LNq8Pseudo:
4512 case ARM::VLD1LNq16Pseudo:
4513 case ARM::VLD1LNq32Pseudo:
4514 case ARM::VLD1LNq8Pseudo_UPD:
4515 case ARM::VLD1LNq16Pseudo_UPD:
4516 case ARM::VLD1LNq32Pseudo_UPD:
4517 case ARM::VLD2LNd8Pseudo:
4518 case ARM::VLD2LNd16Pseudo:
4519 case ARM::VLD2LNd32Pseudo:
4520 case ARM::VLD2LNq16Pseudo:
4521 case ARM::VLD2LNq32Pseudo:
4522 case ARM::VLD2LNd8Pseudo_UPD:
4523 case ARM::VLD2LNd16Pseudo_UPD:
4524 case ARM::VLD2LNd32Pseudo_UPD:
4525 case ARM::VLD2LNq16Pseudo_UPD:
4526 case ARM::VLD2LNq32Pseudo_UPD:
4527 case ARM::VLD4LNd8Pseudo:
4528 case ARM::VLD4LNd16Pseudo:
4529 case ARM::VLD4LNd32Pseudo:
4530 case ARM::VLD4LNq16Pseudo:
4531 case ARM::VLD4LNq32Pseudo:
4532 case ARM::VLD4LNd8Pseudo_UPD:
4533 case ARM::VLD4LNd16Pseudo_UPD:
4534 case ARM::VLD4LNd32Pseudo_UPD:
4535 case ARM::VLD4LNq16Pseudo_UPD:
4536 case ARM::VLD4LNq32Pseudo_UPD:
4546unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4547 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4556 if (
MCID.isCall() || (
MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4557 !Subtarget.cheapPredicableCPSRDef())) {
4567 unsigned *PredCost)
const {
4568 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4574 if (
MI.isBundle()) {
4578 while (++
I !=
E &&
I->isInsideBundle()) {
4579 if (
I->getOpcode() != ARM::t2IT)
4580 Latency += getInstrLatency(ItinData, *
I, PredCost);
4585 const MCInstrDesc &MCID =
MI.getDesc();
4587 !Subtarget.cheapPredicableCPSRDef()))) {
4595 return MI.mayLoad() ? 3 : 1;
4608 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->getAlign().value() : 0;
4610 if (Adj >= 0 || (
int)
Latency > -Adj) {
4618 if (!
Node->isMachineOpcode())
4621 if (!ItinData || ItinData->
isEmpty())
4624 unsigned Opcode =
Node->getMachineOpcode();
4634bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4639 unsigned UseIdx)
const {
4642 if (Subtarget.nonpipelinedVFP() &&
4657 unsigned DefIdx)
const {
4659 if (!ItinData || ItinData->
isEmpty())
4664 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4665 std::optional<unsigned> DefCycle =
4667 return DefCycle && DefCycle <= 2U;
4675 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4678 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4680 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4681 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4682 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4686 if (
MI.getOpcode() == ARM::tPUSH ||
4687 MI.getOpcode() == ARM::tPOP ||
4688 MI.getOpcode() == ARM::tPOP_RET) {
4690 if (MO.isImplicit() || !MO.isReg())
4694 if (!(
MI.getOpcode() == ARM::tPUSH &&
Reg == ARM::LR) &&
4695 !(
MI.getOpcode() == ARM::tPOP_RET &&
Reg == ARM::PC)) {
4696 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4702 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4703 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4704 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4705 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4706 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4727 for (
auto Op :
MI.operands()) {
4734 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4744 unsigned LoadImmOpc,
4745 unsigned LoadOpc)
const {
4746 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() &&
4747 "ROPI/RWPI not currently supported with stack guard");
4755 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4756 assert(!Subtarget.isReadTPSoft() &&
4757 "TLS stack protector requires hardware TLS register");
4767 Module &M = *
MBB.getParent()->getFunction().getParent();
4768 Offset = M.getStackProtectorGuardOffset();
4773 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4784 bool IsIndirect = Subtarget.isGVIndirectSymbol(GV);
4787 if (Subtarget.isTargetMachO()) {
4789 }
else if (Subtarget.isTargetCOFF()) {
4792 else if (IsIndirect)
4794 }
else if (IsIndirect) {
4798 if (LoadImmOpc == ARM::tMOVi32imm) {
4801 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
4837 unsigned &AddSubOpc,
4838 bool &NegAcc,
bool &HasLane)
const {
4840 if (
I == MLxEntryMap.end())
4844 MulOpc = Entry.MulOpc;
4845 AddSubOpc = Entry.AddSubOpc;
4846 NegAcc = Entry.NegAcc;
4847 HasLane = Entry.HasLane;
4871std::pair<uint16_t, uint16_t>
4875 if (Subtarget.hasNEON()) {
4884 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
4885 MI.getOpcode() == ARM::VMOVS))
4892 return std::make_pair(
ExeNEON, 0);
4897 return std::make_pair(
ExeNEON, 0);
4900 return std::make_pair(
ExeVFP, 0);
4906 unsigned SReg,
unsigned &Lane) {
4908 TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
4915 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
4917 assert(DReg &&
"S-register with no D super-register?");
4942 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
4948 ImplicitSReg =
TRI->getSubReg(DReg,
4949 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
4951 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
4966 unsigned DstReg, SrcReg;
4971 switch (
MI.getOpcode()) {
4983 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
4986 DstReg =
MI.getOperand(0).getReg();
4987 SrcReg =
MI.getOperand(1).getReg();
4989 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
4990 MI.removeOperand(i - 1);
4993 MI.setDesc(
get(ARM::VORRd));
5005 DstReg =
MI.getOperand(0).getReg();
5006 SrcReg =
MI.getOperand(1).getReg();
5008 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5009 MI.removeOperand(i - 1);
5016 MI.setDesc(
get(ARM::VGETLNi32));
5032 DstReg =
MI.getOperand(0).getReg();
5033 SrcReg =
MI.getOperand(1).getReg();
5041 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5042 MI.removeOperand(i - 1);
5046 MI.setDesc(
get(ARM::VSETLNi32));
5065 DstReg =
MI.getOperand(0).getReg();
5066 SrcReg =
MI.getOperand(1).getReg();
5068 unsigned DstLane = 0, SrcLane = 0;
5077 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5078 MI.removeOperand(i - 1);
5083 MI.setDesc(
get(ARM::VDUPLN32d));
5117 MCRegister CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5118 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5121 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5122 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5127 if (SrcLane == DstLane)
5130 MI.setDesc(
get(ARM::VEXTd32));
5135 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5136 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5139 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5140 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5145 if (SrcLane != DstLane)
5151 if (ImplicitSReg != 0)
5177 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
5178 if (!PartialUpdateClearance)
5189 switch (
MI.getOpcode()) {
5195 case ARM::VMOVv4i16:
5196 case ARM::VMOVv2i32:
5197 case ARM::VMOVv2f32:
5198 case ARM::VMOVv1i64:
5199 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5203 case ARM::VLD1LNd32:
5212 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5216 if (Reg.isVirtual()) {
5218 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5220 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5223 TRI->getMatchingSuperReg(Reg, ARM::ssub_0, &ARM::DPRRegClass);
5224 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5230 return PartialUpdateClearance;
5237 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5242 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5243 unsigned DReg = Reg;
5246 if (ARM::SPRRegClass.
contains(Reg)) {
5247 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5248 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5251 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5252 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5265 MI.addRegisterKilled(DReg,
TRI,
true);
5269 return Subtarget.hasFeature(ARM::HasV6KOps);
5273 if (
MI->getNumOperands() < 4)
5275 unsigned ShOpVal =
MI->getOperand(3).getImm();
5279 ((ShImm == 1 || ShImm == 2) &&
5289 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5290 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5292 switch (
MI.getOpcode()) {
5304 MOReg = &
MI.getOperand(2);
5316 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5317 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5319 switch (
MI.getOpcode()) {
5330 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5339 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5340 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5342 switch (
MI.getOpcode()) {
5343 case ARM::VSETLNi32:
5344 case ARM::MVE_VMOV_to_lane_32:
5352 BaseReg.Reg = MOBaseReg.
getReg();
5355 InsertedReg.
Reg = MOInsertedReg.
getReg();
5363std::pair<unsigned, unsigned>
5366 return std::make_pair(TF & Mask, TF & ~Mask);
5371 using namespace ARMII;
5373 static const std::pair<unsigned, const char *> TargetFlags[] = {
5374 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5375 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5376 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5383 using namespace ARMII;
5385 static const std::pair<unsigned, const char *> TargetFlags[] = {
5386 {MO_COFFSTUB,
"arm-coffstub"},
5387 {MO_GOT,
"arm-got"},
5388 {MO_SBREL,
"arm-sbrel"},
5389 {MO_DLLIMPORT,
"arm-dllimport"},
5390 {MO_SECREL,
"arm-secrel"},
5391 {MO_NONLAZY,
"arm-nonlazy"}};
5395std::optional<RegImmPair>
5398 unsigned Opcode =
MI.getOpcode();
5405 return std::nullopt;
5408 if (Opcode == ARM::SUBri)
5410 else if (Opcode != ARM::ADDri)
5411 return std::nullopt;
5416 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5417 return std::nullopt;
5419 Offset =
MI.getOperand(2).getImm() * Sign;
5427 for (
auto I = From;
I != To; ++
I)
5428 if (
I->modifiesRegister(Reg,
TRI))
5441 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5443 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5449 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5451 Register Reg = CmpMI->getOperand(0).getReg();
5454 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5467 if (Subtarget->isThumb()) {
5469 return ForCodesize ? 2 : 1;
5470 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5473 return ForCodesize ? 4 : 1;
5475 return ForCodesize ? 4 : 2;
5477 return ForCodesize ? 4 : 2;
5479 return ForCodesize ? 4 : 2;
5482 return ForCodesize ? 4 : 1;
5484 return ForCodesize ? 4 : 1;
5485 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5486 return ForCodesize ? 4 : 1;
5488 return ForCodesize ? 8 : 2;
5490 return ForCodesize ? 8 : 2;
5493 return ForCodesize ? 8 : 2;
5494 return ForCodesize ? 8 : 3;
5658 MachineFunction *MF =
C.getMF();
5660 const ARMBaseRegisterInfo *ARI =
5661 static_cast<const ARMBaseRegisterInfo *
>(&
TRI);
5670 C.isAvailableAcrossAndOutOfSeq(
Reg,
TRI) &&
5671 C.isAvailableInsideSeq(
Reg,
TRI))
5685 for (;
I !=
E; ++
I) {
5689 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5693 unsigned Opcode =
MI.getOpcode();
5694 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5695 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5696 Opcode == ARM::tBXNS_RET || Opcode == ARM::t2BXAUT_RET) {
5702 if (
MI.readsRegister(ARM::LR, &
TRI))
5708std::optional<std::unique_ptr<outliner::OutlinedFunction>>
5711 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
5712 unsigned MinRepeats)
const {
5713 unsigned SequenceSize = 0;
5714 for (
auto &
MI : RepeatedSequenceLocs[0])
5718 unsigned FlagsSetInAll = 0xF;
5723 FlagsSetInAll &=
C.Flags;
5742 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5750 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5753 if (RepeatedSequenceLocs.size() < MinRepeats)
5754 return std::nullopt;
5773 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5774 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5775 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5777 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5779 if (RepeatedSequenceLocs.size() < MinRepeats)
5780 return std::nullopt;
5790 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5791 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5792 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5794 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5796 if (RepeatedSequenceLocs.size() < MinRepeats)
5797 return std::nullopt;
5802 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5805 auto SetCandidateCallInfo =
5806 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5808 C.setCallInfo(CallID, NumBytesForCall);
5813 const auto &SomeMFI =
5816 if (SomeMFI.branchTargetEnforcement()) {
5825 if (SomeMFI.shouldSignReturnAddress(
true)) {
5835 if (RepeatedSequenceLocs[0].back().isTerminator()) {
5839 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
5840 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
5841 LastInstrOpcode == ARM::tBLXr ||
5842 LastInstrOpcode == ARM::tBLXr_noip ||
5843 LastInstrOpcode == ARM::tBLXi) {
5851 unsigned NumBytesNoStackCalls = 0;
5852 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5857 const auto Last =
C.getMBB()->rbegin();
5858 const bool LRIsAvailable =
5859 C.getMBB()->isReturnBlock() && !
Last->isCall()
5862 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
5863 if (LRIsAvailable) {
5867 CandidatesWithoutStackFixups.push_back(
C);
5872 else if (findRegisterToSaveLRTo(
C)) {
5876 CandidatesWithoutStackFixups.push_back(
C);
5881 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
5884 CandidatesWithoutStackFixups.push_back(
C);
5890 NumBytesNoStackCalls += SequenceSize;
5896 if (NumBytesNoStackCalls <=
5897 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
5898 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5900 if (RepeatedSequenceLocs.size() < MinRepeats)
5901 return std::nullopt;
5926 return std::make_unique<outliner::OutlinedFunction>(
5927 RepeatedSequenceLocs, SequenceSize, NumBytesToCreateFrame, FrameID);
5930bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
5933 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
5958 unsigned NumOps =
MI->getDesc().getNumOperands();
5959 unsigned ImmIdx =
NumOps - 3;
5963 int64_t OffVal =
Offset.getImm();
5969 unsigned NumBits = 0;
5998 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6018 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6019 "Can't encode this offset!");
6020 OffVal +=
Fixup / Scale;
6022 unsigned Mask = (1 << NumBits) - 1;
6024 if (OffVal <= Mask) {
6026 MI->getOperand(ImmIdx).setImm(OffVal);
6034 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6038 const Function &CFn =
C.getMF()->getFunction();
6045 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6053 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6072 unsigned &Flags)
const {
6075 assert(
MBB.getParent()->getRegInfo().tracksLiveness() &&
6076 "Suitable Machine Function for outlining must track liveness");
6084 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6085 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6089 if (R12AvailableInBlock && CPSRAvailableInBlock)
6097 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6099 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6109 bool LRIsAvailable =
6110 MBB.isReturnBlock() && !
MBB.back().isCall()
6122 unsigned Flags)
const {
6128 unsigned Opc =
MI.getOpcode();
6129 if (
Opc == ARM::tPICADD ||
Opc == ARM::PICADD ||
Opc == ARM::PICSTR ||
6130 Opc == ARM::PICSTRB ||
Opc == ARM::PICSTRH ||
Opc == ARM::PICLDR ||
6131 Opc == ARM::PICLDRB ||
Opc == ARM::PICLDRH ||
Opc == ARM::PICLDRSB ||
6132 Opc == ARM::PICLDRSH ||
Opc == ARM::t2LDRpci_pic ||
6133 Opc == ARM::t2MOVi16_ga_pcrel ||
Opc == ARM::t2MOVTi16_ga_pcrel ||
6134 Opc == ARM::t2MOV_ga_pcrel)
6138 if (
Opc == ARM::t2BF_LabelPseudo ||
Opc == ARM::t2DoLoopStart ||
6139 Opc == ARM::t2DoLoopStartTP ||
Opc == ARM::t2WhileLoopStart ||
6140 Opc == ARM::t2WhileLoopStartLR ||
Opc == ARM::t2WhileLoopStartTP ||
6141 Opc == ARM::t2LoopDec ||
Opc == ARM::t2LoopEnd ||
6142 Opc == ARM::t2LoopEndDec)
6151 if (
MI.isTerminator())
6157 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6165 if (MOP.isGlobal()) {
6174 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6175 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6183 if (
Opc == ARM::BL ||
Opc == ARM::tBL ||
Opc == ARM::BLX ||
6184 Opc == ARM::BLX_noip ||
Opc == ARM::tBLXr ||
Opc == ARM::tBLXr_noip ||
6189 return UnknownCallOutlineType;
6197 return UnknownCallOutlineType;
6205 return UnknownCallOutlineType;
6213 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6217 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6230 bool MightNeedStackFixUp =
6234 if (!MightNeedStackFixUp)
6240 if (
MI.modifiesRegister(ARM::SP,
TRI))
6245 if (checkAndUpdateStackOffset(&
MI, Subtarget.getStackAlignment().value(),
6254 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6255 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6259 if (
MI.isCFIInstruction())
6274 int Align = std::max(Subtarget.getStackAlignment().value(), uint64_t(8));
6276 assert(Align >= 8 && Align <= 256);
6278 assert(Subtarget.isThumb2());
6290 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6304 CFIBuilder.buildDefCFAOffset(Align);
6309 CFIBuilder.buildOffset(ARM::LR, -LROffset);
6312 CFIBuilder.buildOffset(ARM::RA_AUTH_CODE, -Align);
6318 bool CFI,
bool Auth)
const {
6319 int Align = Subtarget.getStackAlignment().value();
6322 assert(Subtarget.isThumb2());
6334 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6338 if (!Subtarget.isThumb())
6340 MIB.
addImm(Subtarget.getStackAlignment().value())
6348 CFIBuilder.buildDefCFAOffset(0);
6349 CFIBuilder.buildRestore(ARM::LR);
6351 CFIBuilder.buildUndefined(ARM::RA_AUTH_CODE);
6365 bool isThumb = Subtarget.isThumb();
6366 unsigned FuncOp =
isThumb ? 2 : 0;
6367 unsigned Opc =
Call->getOperand(FuncOp).isReg()
6368 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6369 :
isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd
6373 .
add(
Call->getOperand(FuncOp));
6376 Call->eraseFromParent();
6381 return MI.isCall() && !
MI.isReturn();
6389 Et = std::prev(
MBB.end());
6394 if (!
MBB.isLiveIn(ARM::LR))
6395 MBB.addLiveIn(ARM::LR);
6399 saveLROnStack(
MBB, It,
true, Auth);
6404 "Can only fix up stack references once");
6405 fixupPostOutline(
MBB);
6408 restoreLRFromStack(
MBB, Et,
true, Auth);
6428 fixupPostOutline(
MBB);
6437 bool isThumb = Subtarget.isThumb();
6443 ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6449 It =
MBB.insert(It, MIB);
6463 It =
MBB.insert(It, CallMIB);
6470 Register Reg = findRegisterToSaveLRTo(
C);
6471 assert(Reg != 0 &&
"No callee-saved register available?");
6478 CallPt =
MBB.insert(It, CallMIB);
6486 if (!
MBB.isLiveIn(ARM::LR))
6487 MBB.addLiveIn(ARM::LR);
6490 CallPt =
MBB.insert(It, CallMIB);
6501bool ARMBaseInstrInfo::isReMaterializableImpl(
6535 static int constexpr MAX_STAGES = 30;
6536 static int constexpr LAST_IS_USE = MAX_STAGES;
6537 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6538 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6539 typedef std::map<Register, IterNeed> IterNeeds;
6542 const IterNeeds &CIN);
6554 : EndLoop(EndLoop), LoopCount(LoopCount),
6556 TII(MF->getSubtarget().getInstrInfo()) {}
6558 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6560 return MI == EndLoop ||
MI == LoopCount;
6563 bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
override {
6564 if (tooMuchRegisterPressure(SSD, SMS))
6570 std::optional<bool> createTripCountGreaterCondition(
6571 int TC, MachineBasicBlock &
MBB,
6572 SmallVectorImpl<MachineOperand> &
Cond)
override {
6581 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6584 MachineInstr *LoopDec =
nullptr;
6586 if (
I.getOpcode() == ARM::t2LoopDec)
6588 assert(LoopDec &&
"Unable to find copied LoopDec");
6594 .
addReg(ARM::NoRegister);
6602 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
6604 void adjustTripCount(
int TripCountAdjust)
override {}
6608 const IterNeeds &CIN) {
6610 for (
const auto &
N : CIN) {
6611 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6612 for (
int I = 0;
I < Cnt; ++
I)
6617 for (
const auto &
N : CIN) {
6618 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6619 for (
int I = 0;
I < Cnt; ++
I)
6627 IterNeeds CrossIterationNeeds;
6632 for (
auto &SU : SSD.
SUnits) {
6635 for (
auto &S : SU.Succs)
6639 CrossIterationNeeds[
Reg.
id()].set(0);
6640 }
else if (S.isAssignedRegDep()) {
6642 if (OStg >= 0 && OStg != Stg) {
6645 CrossIterationNeeds[
Reg.
id()] |= ((1 << (OStg - Stg)) - 1);
6654 std::vector<SUnit *> ProposedSchedule;
6658 std::deque<SUnit *> Instrs =
6660 std::sort(Instrs.begin(), Instrs.end(),
6661 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6668 for (
auto *SU : ProposedSchedule)
6672 if (!MO.isReg() || !MO.getReg())
6675 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6676 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6677 CIter->second[SEEN_AS_LIVE])
6679 if (MO.isDef() && !MO.isDead())
6680 CIter->second.set(SEEN_AS_LIVE);
6681 else if (MO.isUse())
6682 CIter->second.set(LAST_IS_USE);
6684 for (
auto &CI : CrossIterationNeeds)
6685 CI.second.reset(LAST_IS_USE);
6691 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6694 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6696 for (
auto *SU : ProposedSchedule) {
6698 RPTracker.setPos(std::next(CurInstI));
6704 if (!MO.isReg() || !MO.getReg())
6707 if (MO.isDef() && !MO.isDead()) {
6708 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6709 if (CIter != CrossIterationNeeds.end()) {
6710 CIter->second.reset(0);
6711 CIter->second.reset(SEEN_AS_LIVE);
6715 for (
auto &S : SU->Preds) {
6717 if (S.isAssignedRegDep()) {
6719 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6720 if (CIter != CrossIterationNeeds.end()) {
6722 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6723 if (Stg - Stg2 < MAX_STAGES)
6724 CIter->second.set(Stg - Stg2);
6725 CIter->second.set(SEEN_AS_LIVE);
6730 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6733 auto &
P = RPTracker.getPressure().MaxSetPressure;
6734 for (
unsigned I = 0,
E =
P.size();
I <
E; ++
I) {
6736 if (
I == ARM::DQuad_with_ssub_0 ||
I == ARM::DTripleSpc_with_ssub_0 ||
6737 I == ARM::DTriple_with_qsub_0_in_QPR)
6749std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
6753 if (Preheader == LoopBB)
6754 Preheader = *std::next(LoopBB->
pred_begin());
6756 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
6762 for (
auto &L : LoopBB->
instrs()) {
6769 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
6783 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
6784 for (
auto &L : LoopBB->
instrs())
6789 Register LoopDecResult =
I->getOperand(0).getReg();
6792 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
6795 for (
auto &J : Preheader->
instrs())
6796 if (J.getOpcode() == ARM::t2DoLoopStart)
6800 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, MCRegister DReg, unsigned Lane, MCRegister &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static MCRegister getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
This file defines the DenseMap class.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, RegState State) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
const ARMBaseRegisterInfo & getRegisterInfo() const
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool expandPostRAPseudo(MachineInstr &MI) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig, LaneBitmask UsedLanes=LaneBitmask::getAll()) const override
ARMBaseInstrInfo(const ARMSubtarget &STI, const ARMBaseRegisterInfo &TRI)
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
Helper class for creating CFI instructions and inserting them into MIR.
void buildRegister(MCRegister Reg1, MCRegister Reg2) const
void buildRestore(MCRegister Reg) const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool hasDLLImportStorageClass() const
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
LLVM_ABI void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isCall() const
Return true if the instruction is a call.
unsigned getOpcode() const
Return the opcode number for this descriptor.
LLVM_ABI bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineInstrBundleIterator< MachineInstr > iterator
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
LLVM_ABI MachineInstrBundleIterator< MachineInstr > eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
LLVM_ABI MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
LLVM_ABI bool hasOneNonDBGUse(Register RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug use of the specified register.
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
LLVM_ABI MachineInstr * getVRegDef(Register Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
use_instr_iterator use_instr_begin(Register RegNo) const
LLVM_ABI Register createVirtualRegister(const TargetRegisterClass *RegClass, StringRef Name="")
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
static use_instr_iterator use_instr_end()
const TargetRegisterInfo * getTargetRegisterInfo() const
LLVM_ABI const TargetRegisterClass * constrainRegClass(Register Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
LLVM_ABI MachineInstr * getUniqueVRegDef(Register Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
LLVM_ABI void increaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
LLVM_ABI void decreaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr unsigned id() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Wrapper class representing a virtual register or register unit.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static CondCodes getOppositeCondition(CondCodes CC)
ARMII - This namespace holds all of the target specific flags that instruction info tracks.
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
Define some predicates that are used for node matching.
@ C
The default llvm calling convention, compatible with C.
InstrType
Represents how an instruction should be mapped by the outliner.
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
constexpr T rotr(T V, int R)
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
constexpr RegState getKillRegState(bool B)
unsigned getBLXpredOpcode(const MachineFunction &MF)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
static bool isIndirectBranchOpcode(int Opc)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
static bool isCalleeSavedRegister(MCRegister Reg, const MCPhysReg *CSRegs)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool isPopOpcode(int Opc)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
FunctionAddr VTableAddr Next
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
constexpr RegState getUndefRegState(bool B)
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.