73#define DEBUG_TYPE "arm-instrinfo"
75#define GET_INSTRINFO_CTOR_DTOR
76#include "ARMGenInstrInfo.inc"
90 { ARM::VMLAS, ARM::VMULS, ARM::VADDS,
false,
false },
91 { ARM::VMLSS, ARM::VMULS, ARM::VSUBS,
false,
false },
92 { ARM::VMLAD, ARM::VMULD, ARM::VADDD,
false,
false },
93 { ARM::VMLSD, ARM::VMULD, ARM::VSUBD,
false,
false },
94 { ARM::VNMLAS, ARM::VNMULS, ARM::VSUBS,
true,
false },
95 { ARM::VNMLSS, ARM::VMULS, ARM::VSUBS,
true,
false },
96 { ARM::VNMLAD, ARM::VNMULD, ARM::VSUBD,
true,
false },
97 { ARM::VNMLSD, ARM::VMULD, ARM::VSUBD,
true,
false },
100 { ARM::VMLAfd, ARM::VMULfd, ARM::VADDfd,
false,
false },
101 { ARM::VMLSfd, ARM::VMULfd, ARM::VSUBfd,
false,
false },
102 { ARM::VMLAfq, ARM::VMULfq, ARM::VADDfq,
false,
false },
103 { ARM::VMLSfq, ARM::VMULfq, ARM::VSUBfq,
false,
false },
104 { ARM::VMLAslfd, ARM::VMULslfd, ARM::VADDfd,
false,
true },
105 { ARM::VMLSslfd, ARM::VMULslfd, ARM::VSUBfd,
false,
true },
106 { ARM::VMLAslfq, ARM::VMULslfq, ARM::VADDfq,
false,
true },
107 { ARM::VMLSslfq, ARM::VMULslfq, ARM::VSUBfq,
false,
true },
114 for (
unsigned i = 0, e = std::size(
ARM_MLxTable); i != e; ++i) {
115 if (!MLxEntryMap.insert(std::make_pair(
ARM_MLxTable[i].MLxOpc, i)).second)
127 if (usePreRAHazardRecognizer()) {
129 static_cast<const ARMSubtarget *
>(STI)->getInstrItineraryData();
149 std::make_unique<ARMBankConflictHazardRecognizer>(DAG, 0x4,
true));
165 if (Subtarget.isThumb2() || Subtarget.hasVFP2Base())
186 bool AllowModify)
const {
191 if (
I ==
MBB.instr_begin())
201 bool CantAnalyze =
false;
205 while (
I->isDebugInstr() || !
I->isTerminator() ||
207 I->getOpcode() == ARM::t2DoLoopStartTP){
208 if (
I ==
MBB.instr_begin())
219 TBB =
I->getOperand(0).getMBB();
225 assert(!FBB &&
"FBB should have been null.");
227 TBB =
I->getOperand(0).getMBB();
228 Cond.push_back(
I->getOperand(1));
229 Cond.push_back(
I->getOperand(2));
230 }
else if (
I->isReturn()) {
233 }
else if (
I->getOpcode() == ARM::t2LoopEnd &&
240 TBB =
I->getOperand(1).getMBB();
242 Cond.push_back(
I->getOperand(0));
264 while (DI !=
MBB.instr_end()) {
287 if (
I ==
MBB.instr_begin())
299 int *BytesRemoved)
const {
300 assert(!BytesRemoved &&
"code size not handled");
311 I->eraseFromParent();
315 if (
I ==
MBB.begin())
return 1;
321 I->eraseFromParent();
330 int *BytesAdded)
const {
331 assert(!BytesAdded &&
"code size not handled");
340 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
342 "ARM branch conditions have two or three components!");
352 }
else if (
Cond.size() == 2) {
363 if (
Cond.size() == 2)
368 else if (
Cond.size() == 3)
379 if (
Cond.size() == 2) {
391 while (++
I != E &&
I->isInsideBundle()) {
392 int PIdx =
I->findFirstPredOperandIdx();
393 if (PIdx != -1 &&
I->getOperand(PIdx).getImm() !=
ARMCC::AL)
399 int PIdx =
MI.findFirstPredOperandIdx();
400 return PIdx != -1 &&
MI.getOperand(PIdx).getImm() !=
ARMCC::AL;
408 std::string GenericComment =
410 if (!GenericComment.empty())
411 return GenericComment;
415 return std::string();
419 int FirstPredOp =
MI.findFirstPredOperandIdx();
420 if (FirstPredOp != (
int)
OpIdx)
421 return std::string();
423 std::string CC =
"CC::";
430 unsigned Opc =
MI.getOpcode();
439 int PIdx =
MI.findFirstPredOperandIdx();
443 MI.getOperand(PIdx+1).setReg(Pred[1].
getReg());
450 "CPSR def isn't expected operand");
451 assert((
MI.getOperand(1).isDead() ||
452 MI.getOperand(1).getReg() != ARM::CPSR) &&
453 "if conversion tried to stop defining used CPSR");
454 MI.getOperand(1).setReg(ARM::NoRegister);
464 if (Pred1.
size() > 2 || Pred2.
size() > 2)
489 std::vector<MachineOperand> &Pred,
490 bool SkipDead)
const {
493 bool ClobbersCPSR = MO.isRegMask() && MO.clobbersPhysReg(ARM::CPSR);
494 bool IsCPSR = MO.isReg() && MO.isDef() && MO.getReg() == ARM::CPSR;
495 if (ClobbersCPSR || IsCPSR) {
513 for (
const auto &MO :
MI.operands())
514 if (MO.isReg() && MO.getReg() == ARM::CPSR && MO.isDef() && !MO.isDead())
520 switch (
MI->getOpcode()) {
521 default:
return true;
552 if (!
MI.isPredicable())
590 if (!MO.isReg() || MO.isUndef() || MO.isUse())
592 if (MO.getReg() != ARM::CPSR)
612 switch (
MI.getOpcode()) {
619 return MCID.getSize();
620 case TargetOpcode::BUNDLE:
621 return getInstBundleLength(
MI);
622 case TargetOpcode::COPY:
627 case ARM::CONSTPOOL_ENTRY:
628 case ARM::JUMPTABLE_INSTS:
629 case ARM::JUMPTABLE_ADDRS:
630 case ARM::JUMPTABLE_TBB:
631 case ARM::JUMPTABLE_TBH:
634 return MI.getOperand(2).getImm();
636 return MI.getOperand(1).getImm();
638 case ARM::INLINEASM_BR: {
640 unsigned Size = getInlineAsmLength(
MI.getOperand(0).getSymbolName(), *MAI);
648unsigned ARMBaseInstrInfo::getInstBundleLength(
const MachineInstr &
MI)
const {
652 while (++
I != E &&
I->isInsideBundle()) {
653 assert(!
I->isBundle() &&
"No nested bundle!");
663 unsigned Opc = Subtarget.isThumb()
664 ? (Subtarget.isMClass() ? ARM::t2MRS_M : ARM::t2MRS_AR)
672 if (Subtarget.isMClass())
683 unsigned Opc = Subtarget.isThumb()
684 ? (Subtarget.isMClass() ? ARM::t2MSR_M : ARM::t2MSR_AR)
689 if (Subtarget.isMClass())
718 unsigned Cond,
unsigned Inactive) {
728 bool RenamableSrc)
const {
729 bool GPRDest = ARM::GPRRegClass.contains(DestReg);
730 bool GPRSrc = ARM::GPRRegClass.contains(SrcReg);
732 if (GPRDest && GPRSrc) {
740 bool SPRDest = ARM::SPRRegClass.contains(DestReg);
741 bool SPRSrc = ARM::SPRRegClass.contains(SrcReg);
744 if (SPRDest && SPRSrc)
746 else if (GPRDest && SPRSrc)
748 else if (SPRDest && GPRSrc)
750 else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) && Subtarget.hasFP64())
752 else if (ARM::QPRRegClass.
contains(DestReg, SrcReg))
753 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MQPRCopy;
758 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR)
760 if (
Opc == ARM::MVE_VORR)
762 else if (
Opc != ARM::MQPRCopy)
768 unsigned BeginIdx = 0;
769 unsigned SubRegs = 0;
773 if (ARM::QQPRRegClass.
contains(DestReg, SrcReg)) {
774 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
775 BeginIdx = ARM::qsub_0;
777 }
else if (ARM::QQQQPRRegClass.
contains(DestReg, SrcReg)) {
778 Opc = Subtarget.hasNEON() ? ARM::VORRq : ARM::MVE_VORR;
779 BeginIdx = ARM::qsub_0;
782 }
else if (ARM::DPairRegClass.
contains(DestReg, SrcReg)) {
784 BeginIdx = ARM::dsub_0;
786 }
else if (ARM::DTripleRegClass.
contains(DestReg, SrcReg)) {
788 BeginIdx = ARM::dsub_0;
790 }
else if (ARM::DQuadRegClass.
contains(DestReg, SrcReg)) {
792 BeginIdx = ARM::dsub_0;
794 }
else if (ARM::GPRPairRegClass.
contains(DestReg, SrcReg)) {
795 Opc = Subtarget.isThumb2() ? ARM::tMOVr : ARM::MOVr;
796 BeginIdx = ARM::gsub_0;
798 }
else if (ARM::DPairSpcRegClass.
contains(DestReg, SrcReg)) {
800 BeginIdx = ARM::dsub_0;
803 }
else if (ARM::DTripleSpcRegClass.
contains(DestReg, SrcReg)) {
805 BeginIdx = ARM::dsub_0;
808 }
else if (ARM::DQuadSpcRegClass.
contains(DestReg, SrcReg)) {
810 BeginIdx = ARM::dsub_0;
813 }
else if (ARM::DPRRegClass.
contains(DestReg, SrcReg) &&
814 !Subtarget.hasFP64()) {
816 BeginIdx = ARM::ssub_0;
818 }
else if (SrcReg == ARM::CPSR) {
821 }
else if (DestReg == ARM::CPSR) {
824 }
else if (DestReg == ARM::VPR) {
830 }
else if (SrcReg == ARM::VPR) {
836 }
else if (DestReg == ARM::FPSCR_NZCV) {
838 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMSR_FPSCR_NZCVQC), DestReg)
842 }
else if (SrcReg == ARM::FPSCR_NZCV) {
844 BuildMI(
MBB,
I,
I->getDebugLoc(),
get(ARM::VMRS_FPSCR_NZCVQC), DestReg)
850 assert(
Opc &&
"Impossible reg-to-reg copy");
856 if (
TRI->regsOverlap(SrcReg,
TRI->getSubReg(DestReg, BeginIdx))) {
857 BeginIdx = BeginIdx + ((SubRegs - 1) * Spacing);
863 for (
unsigned i = 0; i != SubRegs; ++i) {
864 Register Dst =
TRI->getSubReg(DestReg, BeginIdx + i * Spacing);
865 Register Src =
TRI->getSubReg(SrcReg, BeginIdx + i * Spacing);
866 assert(Dst && Src &&
"Bad sub-register");
868 assert(!DstRegs.
count(Src) &&
"destructive vector copy");
873 if (
Opc == ARM::VORRq ||
Opc == ARM::MVE_VORR) {
877 if (
Opc == ARM::MVE_VORR)
882 if (
Opc == ARM::MOVr)
891std::optional<DestSourcePair>
900 if (!
MI.isMoveReg() ||
901 (
MI.getOpcode() == ARM::VORRq &&
902 MI.getOperand(1).getReg() !=
MI.getOperand(2).getReg()))
907std::optional<ParamLoadedValue>
911 Register DstReg = DstSrcPair->Destination->getReg();
942 return MIB.
addReg(Reg, State);
946 return MIB.
addReg(Reg, State, SubIdx);
951 Register SrcReg,
bool isKill,
int FI,
964 switch (
TRI.getSpillSize(*RC)) {
966 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
977 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
984 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
991 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
998 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1009 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1016 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1017 if (Subtarget.hasV5TEOps()) {
1020 AddDReg(MIB, SrcReg, ARM::gsub_1, {});
1031 AddDReg(MIB, SrcReg, ARM::gsub_1, {});
1037 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1053 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1054 Subtarget.hasMVEIntegerOps()) {
1059 .addMemOperand(MMO);
1065 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1068 Subtarget.hasNEON()) {
1082 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1083 AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1089 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1090 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1091 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1093 Subtarget.hasNEON()) {
1102 }
else if (Subtarget.hasMVEIntegerOps()) {
1114 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1115 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1116 AddDReg(MIB, SrcReg, ARM::dsub_3, {});
1122 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1123 Subtarget.hasMVEIntegerOps()) {
1128 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1134 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_1, {});
1135 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_2, {});
1136 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_3, {});
1137 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_4, {});
1138 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_5, {});
1139 MIB =
AddDReg(MIB, SrcReg, ARM::dsub_6, {});
1140 AddDReg(MIB, SrcReg, ARM::dsub_7, {});
1150 int &FrameIndex)
const {
1151 switch (
MI.getOpcode()) {
1155 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1156 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1157 MI.getOperand(3).getImm() == 0) {
1158 FrameIndex =
MI.getOperand(1).getIndex();
1159 return MI.getOperand(0).getReg();
1168 case ARM::VSTR_P0_off:
1169 case ARM::VSTR_FPSCR_NZCVQC_off:
1170 case ARM::MVE_VSTRWU32:
1171 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1172 MI.getOperand(2).getImm() == 0) {
1173 FrameIndex =
MI.getOperand(1).getIndex();
1174 return MI.getOperand(0).getReg();
1178 case ARM::VST1d64TPseudo:
1179 case ARM::VST1d64QPseudo:
1180 if (
MI.getOperand(0).isFI() &&
MI.getOperand(2).getSubReg() == 0) {
1181 FrameIndex =
MI.getOperand(0).getIndex();
1182 return MI.getOperand(2).getReg();
1186 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1187 FrameIndex =
MI.getOperand(1).getIndex();
1188 return MI.getOperand(0).getReg();
1191 case ARM::MQQPRStore:
1192 case ARM::MQQQQPRStore:
1193 if (
MI.getOperand(1).isFI()) {
1194 FrameIndex =
MI.getOperand(1).getIndex();
1195 return MI.getOperand(0).getReg();
1204 int &FrameIndex)
const {
1206 if (
MI.mayStore() && hasStoreToStackSlot(
MI,
Accesses) &&
1223 if (
I !=
MBB.end())
DL =
I->getDebugLoc();
1232 switch (
TRI.getSpillSize(*RC)) {
1234 if (ARM::HPRRegClass.hasSubClassEq(RC)) {
1244 if (ARM::GPRRegClass.hasSubClassEq(RC)) {
1250 }
else if (ARM::SPRRegClass.hasSubClassEq(RC)) {
1256 }
else if (ARM::VCCRRegClass.hasSubClassEq(RC)) {
1262 }
else if (ARM::cl_FPSCR_NZCVRegClass.hasSubClassEq(RC)) {
1272 if (ARM::DPRRegClass.hasSubClassEq(RC)) {
1278 }
else if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
1281 if (Subtarget.hasV5TEOps()) {
1304 if (ARM::DPairRegClass.hasSubClassEq(RC) && Subtarget.hasNEON()) {
1317 }
else if (ARM::QPRRegClass.hasSubClassEq(RC) &&
1318 Subtarget.hasMVEIntegerOps()) {
1320 MIB.addFrameIndex(FI)
1322 .addMemOperand(MMO);
1328 if (ARM::DTripleRegClass.hasSubClassEq(RC)) {
1330 Subtarget.hasNEON()) {
1351 if (ARM::QQPRRegClass.hasSubClassEq(RC) ||
1352 ARM::MQQPRRegClass.hasSubClassEq(RC) ||
1353 ARM::DQuadRegClass.hasSubClassEq(RC)) {
1355 Subtarget.hasNEON()) {
1361 }
else if (Subtarget.hasMVEIntegerOps()) {
1381 if (ARM::MQQQQPRRegClass.hasSubClassEq(RC) &&
1382 Subtarget.hasMVEIntegerOps()) {
1386 }
else if (ARM::QQQQPRRegClass.hasSubClassEq(RC)) {
1410 int &FrameIndex)
const {
1411 switch (
MI.getOpcode()) {
1415 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isReg() &&
1416 MI.getOperand(3).isImm() &&
MI.getOperand(2).getReg() == 0 &&
1417 MI.getOperand(3).getImm() == 0) {
1418 FrameIndex =
MI.getOperand(1).getIndex();
1419 return MI.getOperand(0).getReg();
1428 case ARM::VLDR_P0_off:
1429 case ARM::VLDR_FPSCR_NZCVQC_off:
1430 case ARM::MVE_VLDRWU32:
1431 if (
MI.getOperand(1).isFI() &&
MI.getOperand(2).isImm() &&
1432 MI.getOperand(2).getImm() == 0) {
1433 FrameIndex =
MI.getOperand(1).getIndex();
1434 return MI.getOperand(0).getReg();
1438 case ARM::VLD1d8TPseudo:
1439 case ARM::VLD1d16TPseudo:
1440 case ARM::VLD1d32TPseudo:
1441 case ARM::VLD1d64TPseudo:
1442 case ARM::VLD1d8QPseudo:
1443 case ARM::VLD1d16QPseudo:
1444 case ARM::VLD1d32QPseudo:
1445 case ARM::VLD1d64QPseudo:
1446 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1447 FrameIndex =
MI.getOperand(1).getIndex();
1448 return MI.getOperand(0).getReg();
1452 if (
MI.getOperand(1).isFI() &&
MI.getOperand(0).getSubReg() == 0) {
1453 FrameIndex =
MI.getOperand(1).getIndex();
1454 return MI.getOperand(0).getReg();
1457 case ARM::MQQPRLoad:
1458 case ARM::MQQQQPRLoad:
1459 if (
MI.getOperand(1).isFI()) {
1460 FrameIndex =
MI.getOperand(1).getIndex();
1461 return MI.getOperand(0).getReg();
1470 int &FrameIndex)
const {
1472 if (
MI.mayLoad() && hasLoadFromStackSlot(
MI,
Accesses) &&
1486 bool isThumb2 = Subtarget.
isThumb2();
1493 if (isThumb1 || !
MI->getOperand(1).isDead()) {
1495 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA_UPD
1496 : isThumb1 ? ARM::tLDMIA_UPD
1500 LDM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2LDMIA : ARM::LDMIA));
1503 if (isThumb1 || !
MI->getOperand(0).isDead()) {
1504 MachineOperand STWb(
MI->getOperand(0));
1505 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA_UPD
1506 : isThumb1 ? ARM::tSTMIA_UPD
1510 STM =
BuildMI(*BB,
MI, dl,
TII->get(isThumb2 ? ARM::t2STMIA : ARM::STMIA));
1513 MachineOperand LDBase(
MI->getOperand(3));
1516 MachineOperand STBase(
MI->getOperand(2));
1525 [&
TRI](
const unsigned &Reg1,
const unsigned &Reg2) ->
bool {
1526 return TRI.getEncodingValue(Reg1) <
1527 TRI.getEncodingValue(Reg2);
1530 for (
const auto &
Reg : ScratchRegs) {
1539 if (
MI.getOpcode() == TargetOpcode::LOAD_STACK_GUARD) {
1540 expandLoadStackGuard(
MI);
1541 MI.getParent()->erase(
MI);
1545 if (
MI.getOpcode() == ARM::MEMCPY) {
1554 if (!
MI.isCopy() || Subtarget.dontWidenVMOVS() || !Subtarget.hasFP64())
1559 Register DstRegS =
MI.getOperand(0).getReg();
1560 Register SrcRegS =
MI.getOperand(1).getReg();
1561 if (!ARM::SPRRegClass.
contains(DstRegS, SrcRegS))
1566 TRI->getMatchingSuperReg(DstRegS, ARM::ssub_0, &ARM::DPRRegClass);
1568 TRI->getMatchingSuperReg(SrcRegS, ARM::ssub_0, &ARM::DPRRegClass);
1569 if (!DstRegD || !SrcRegD)
1575 if (!
MI.definesRegister(DstRegD,
TRI) ||
MI.readsRegister(DstRegD,
TRI))
1579 if (
MI.getOperand(0).isDead())
1588 int ImpDefIdx =
MI.findRegisterDefOperandIdx(DstRegD,
nullptr);
1589 if (ImpDefIdx != -1)
1590 MI.removeOperand(ImpDefIdx);
1593 MI.setDesc(
get(ARM::VMOVD));
1594 MI.getOperand(0).setReg(DstRegD);
1595 MI.getOperand(1).setReg(SrcRegD);
1602 MI.getOperand(1).setIsUndef();
1607 if (
MI.getOperand(1).isKill()) {
1608 MI.getOperand(1).setIsKill(
false);
1609 MI.addRegisterKilled(SrcRegS,
TRI,
true);
1623 assert(MCPE.isMachineConstantPoolEntry() &&
1624 "Expecting a machine constantpool entry!");
1673 case ARM::tLDRpci_pic:
1674 case ARM::t2LDRpci_pic: {
1694 switch (
I->getOpcode()) {
1695 case ARM::tLDRpci_pic:
1696 case ARM::t2LDRpci_pic: {
1698 unsigned CPI =
I->getOperand(1).getIndex();
1700 I->getOperand(1).setIndex(CPI);
1701 I->getOperand(2).setImm(PCLabelId);
1705 if (!
I->isBundledWithSucc())
1716 if (Opcode == ARM::t2LDRpci || Opcode == ARM::t2LDRpci_pic ||
1717 Opcode == ARM::tLDRpci || Opcode == ARM::tLDRpci_pic ||
1718 Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1719 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1720 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1721 Opcode == ARM::t2MOV_ga_pcrel) {
1732 if (Opcode == ARM::LDRLIT_ga_pcrel || Opcode == ARM::LDRLIT_ga_pcrel_ldr ||
1733 Opcode == ARM::tLDRLIT_ga_pcrel || Opcode == ARM::t2LDRLIT_ga_pcrel ||
1734 Opcode == ARM::MOV_ga_pcrel || Opcode == ARM::MOV_ga_pcrel_ldr ||
1735 Opcode == ARM::t2MOV_ga_pcrel)
1747 if (isARMCP0 && isARMCP1) {
1753 }
else if (!isARMCP0 && !isARMCP1) {
1757 }
else if (Opcode == ARM::PICLDR) {
1765 if (Addr0 != Addr1) {
1801 int64_t &Offset2)
const {
1803 if (Subtarget.isThumb1Only())
return false;
1808 auto IsLoadOpcode = [&](
unsigned Opcode) {
1823 case ARM::t2LDRSHi8:
1825 case ARM::t2LDRBi12:
1826 case ARM::t2LDRSHi12:
1867 int64_t Offset1, int64_t Offset2,
1868 unsigned NumLoads)
const {
1870 if (Subtarget.isThumb1Only())
return false;
1872 assert(Offset2 > Offset1);
1874 if ((Offset2 - Offset1) / 8 > 64)
1905 if (
MI.isDebugInstr())
1909 if (
MI.isTerminator() ||
MI.isPosition())
1913 if (
MI.getOpcode() == TargetOpcode::INLINEASM_BR)
1927 while (++
I !=
MBB->end() &&
I->isDebugInstr())
1929 if (
I !=
MBB->end() &&
I->getOpcode() == ARM::t2IT)
1940 if (!
MI.isCall() &&
MI.definesRegister(ARM::SP,
nullptr))
1948 unsigned NumCycles,
unsigned ExtraPredCycles,
1956 if (
MBB.getParent()->getFunction().hasOptSize()) {
1958 if (!Pred->empty()) {
1960 if (LastMI->
getOpcode() == ARM::t2Bcc) {
1969 MBB, 0, 0, Probability);
1974 unsigned TCycles,
unsigned TExtra,
1976 unsigned FCycles,
unsigned FExtra,
1985 if (Subtarget.isThumb2() &&
TBB.getParent()->getFunction().hasMinSize()) {
1993 const unsigned ScalingUpFactor = 1024;
1995 unsigned PredCost = (TCycles + FCycles + TExtra + FExtra) * ScalingUpFactor;
1996 unsigned UnpredCost;
1997 if (!Subtarget.hasBranchPredictor()) {
2000 unsigned NotTakenBranchCost = 1;
2001 unsigned TakenBranchCost = Subtarget.getMispredictionPenalty();
2002 unsigned TUnpredCycles, FUnpredCycles;
2005 TUnpredCycles = TCycles + NotTakenBranchCost;
2006 FUnpredCycles = TakenBranchCost;
2009 TUnpredCycles = TCycles + TakenBranchCost;
2010 FUnpredCycles = FCycles + NotTakenBranchCost;
2013 PredCost -= 1 * ScalingUpFactor;
2016 unsigned TUnpredCost = Probability.
scale(TUnpredCycles * ScalingUpFactor);
2017 unsigned FUnpredCost = Probability.
getCompl().
scale(FUnpredCycles * ScalingUpFactor);
2018 UnpredCost = TUnpredCost + FUnpredCost;
2021 if (Subtarget.isThumb2() && TCycles + FCycles > 4) {
2022 PredCost += ((TCycles + FCycles - 4) / 4) * ScalingUpFactor;
2025 unsigned TUnpredCost = Probability.
scale(TCycles * ScalingUpFactor);
2026 unsigned FUnpredCost =
2028 UnpredCost = TUnpredCost + FUnpredCost;
2029 UnpredCost += 1 * ScalingUpFactor;
2030 UnpredCost += Subtarget.getMispredictionPenalty() * ScalingUpFactor / 10;
2033 return PredCost <= UnpredCost;
2038 unsigned NumInsts)
const {
2042 if (!Subtarget.isThumb2())
2046 unsigned MaxInsts = Subtarget.restrictIT() ? 1 : 4;
2055 if (
MI.getOpcode() == ARM::t2Bcc &&
2067 if (Subtarget.isThumb2())
2078 return Subtarget.isProfitableToUnpredicate();
2086 int PIdx =
MI.findFirstPredOperandIdx();
2092 PredReg =
MI.getOperand(PIdx+1).getReg();
2101 if (
Opc == ARM::t2B)
2110 unsigned OpIdx2)
const {
2111 switch (
MI.getOpcode()) {
2113 case ARM::t2MOVCCr: {
2118 if (CC ==
ARMCC::AL || PredReg != ARM::CPSR)
2138 if (!Reg.isVirtual())
2140 if (!
MRI.hasOneNonDBGUse(Reg))
2152 if (MO.isFI() || MO.isCPI() || MO.isJTI())
2159 if (MO.getReg().isPhysical())
2161 if (MO.isDef() && !MO.isDead())
2164 bool DontMoveAcrossStores =
true;
2165 if (!
MI->isSafeToMove(DontMoveAcrossStores))
2173 bool PreferFalse)
const {
2174 assert((
MI.getOpcode() == ARM::MOVCCr ||
MI.getOpcode() == ARM::t2MOVCCr) &&
2175 "Unknown select instruction");
2178 bool Invert = !
DefMI;
2180 DefMI = canFoldIntoMOVCC(
MI.getOperand(1).getReg(),
MRI,
this);
2187 Register DestReg =
MI.getOperand(0).getReg();
2190 if (!
MRI.constrainRegClass(DestReg, FalseClass))
2192 if (!
MRI.constrainRegClass(DestReg, TrueClass))
2203 i != e && !DefDesc.
operands()[i].isPredicate(); ++i)
2206 unsigned CondCode =
MI.getOperand(3).getImm();
2211 NewMI.
add(
MI.getOperand(4));
2222 NewMI.
add(FalseReg);
2233 if (
DefMI->getParent() !=
MI.getParent())
2237 DefMI->eraseFromParent();
2253 {ARM::ADDSri, ARM::ADDri},
2254 {ARM::ADDSrr, ARM::ADDrr},
2255 {ARM::ADDSrsi, ARM::ADDrsi},
2256 {ARM::ADDSrsr, ARM::ADDrsr},
2258 {ARM::SUBSri, ARM::SUBri},
2259 {ARM::SUBSrr, ARM::SUBrr},
2260 {ARM::SUBSrsi, ARM::SUBrsi},
2261 {ARM::SUBSrsr, ARM::SUBrsr},
2263 {ARM::RSBSri, ARM::RSBri},
2264 {ARM::RSBSrsi, ARM::RSBrsi},
2265 {ARM::RSBSrsr, ARM::RSBrsr},
2267 {ARM::tADDSi3, ARM::tADDi3},
2268 {ARM::tADDSi8, ARM::tADDi8},
2269 {ARM::tADDSrr, ARM::tADDrr},
2270 {ARM::tADCS, ARM::tADC},
2272 {ARM::tSUBSi3, ARM::tSUBi3},
2273 {ARM::tSUBSi8, ARM::tSUBi8},
2274 {ARM::tSUBSrr, ARM::tSUBrr},
2275 {ARM::tSBCS, ARM::tSBC},
2276 {ARM::tRSBS, ARM::tRSB},
2277 {ARM::tLSLSri, ARM::tLSLri},
2279 {ARM::t2ADDSri, ARM::t2ADDri},
2280 {ARM::t2ADDSrr, ARM::t2ADDrr},
2281 {ARM::t2ADDSrs, ARM::t2ADDrs},
2283 {ARM::t2SUBSri, ARM::t2SUBri},
2284 {ARM::t2SUBSrr, ARM::t2SUBrr},
2285 {ARM::t2SUBSrs, ARM::t2SUBrs},
2287 {ARM::t2RSBSri, ARM::t2RSBri},
2288 {ARM::t2RSBSrs, ARM::t2RSBrs},
2293 if (OldOpc == Entry.PseudoOpc)
2294 return Entry.MachineOpc;
2305 if (NumBytes == 0 && DestReg != BaseReg) {
2314 bool isSub = NumBytes < 0;
2315 if (isSub) NumBytes = -NumBytes;
2320 assert(ThisVal &&
"Didn't extract field correctly");
2323 NumBytes &= ~ThisVal;
2328 unsigned Opc = isSub ? ARM::SUBri : ARM::ADDri;
2341 unsigned NumBytes) {
2352 if (!IsPush && !IsPop)
2355 bool IsVFPPushPop =
MI->getOpcode() == ARM::VSTMDDB_UPD ||
2356 MI->getOpcode() == ARM::VLDMDIA_UPD;
2357 bool IsT1PushPop =
MI->getOpcode() == ARM::tPUSH ||
2358 MI->getOpcode() == ARM::tPOP ||
2359 MI->getOpcode() == ARM::tPOP_RET;
2361 assert((IsT1PushPop || (
MI->getOperand(0).getReg() == ARM::SP &&
2362 MI->getOperand(1).getReg() == ARM::SP)) &&
2363 "trying to fold sp update into non-sp-updating push/pop");
2368 if (NumBytes % (IsVFPPushPop ? 8 : 4) != 0)
2373 int RegListIdx = IsT1PushPop ? 2 : 4;
2376 unsigned RegsNeeded;
2379 RegsNeeded = NumBytes / 8;
2380 RegClass = &ARM::DPRRegClass;
2382 RegsNeeded = NumBytes / 4;
2383 RegClass = &ARM::GPRRegClass;
2393 unsigned FirstRegEnc = -1;
2396 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i) {
2401 TRI->getEncodingValue(MO.
getReg()) < FirstRegEnc)
2402 FirstRegEnc =
TRI->getEncodingValue(MO.
getReg());
2405 const MCPhysReg *CSRegs =
TRI->getCalleeSavedRegs(&MF);
2408 for (
int CurRegEnc = FirstRegEnc - 1; CurRegEnc >= 0 && RegsNeeded;
2411 if (IsT1PushPop && CurRegEnc >
TRI->getEncodingValue(ARM::R7))
2418 false,
false,
true));
2428 MI->getParent()->computeRegisterLiveness(
TRI, CurReg,
MI) !=
2450 for (
int i =
MI->getNumOperands() - 1; i >= RegListIdx; --i)
2451 MI->removeOperand(i);
2464 unsigned Opcode =
MI.getOpcode();
2470 if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
2473 if (Opcode == ARM::ADDri) {
2474 Offset +=
MI.getOperand(FrameRegIdx+1).getImm();
2477 MI.setDesc(
TII.get(ARM::MOVr));
2478 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2479 MI.removeOperand(FrameRegIdx+1);
2485 MI.setDesc(
TII.get(ARM::SUBri));
2491 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2492 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(
Offset);
2507 "Bit extraction didn't work?");
2508 MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
2510 unsigned ImmIdx = 0;
2512 unsigned NumBits = 0;
2516 ImmIdx = FrameRegIdx + 1;
2517 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2521 ImmIdx = FrameRegIdx+2;
2528 ImmIdx = FrameRegIdx+2;
2539 ImmIdx = FrameRegIdx+1;
2547 ImmIdx = FrameRegIdx+1;
2557 ImmIdx = FrameRegIdx+1;
2558 InstrOffs =
MI.getOperand(ImmIdx).getImm();
2567 Offset += InstrOffs * Scale;
2568 assert((
Offset & (Scale-1)) == 0 &&
"Can't encode this offset!");
2578 int ImmedOffset =
Offset / Scale;
2579 unsigned Mask = (1 << NumBits) - 1;
2580 if ((
unsigned)
Offset <= Mask * Scale) {
2582 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg,
false);
2588 ImmedOffset = -ImmedOffset;
2590 ImmedOffset |= 1 << NumBits;
2598 ImmedOffset = ImmedOffset & Mask;
2601 ImmedOffset = -ImmedOffset;
2603 ImmedOffset |= 1 << NumBits;
2619 Register &SrcReg2, int64_t &CmpMask,
2620 int64_t &CmpValue)
const {
2621 switch (
MI.getOpcode()) {
2626 SrcReg =
MI.getOperand(0).getReg();
2629 CmpValue =
MI.getOperand(1).getImm();
2634 SrcReg =
MI.getOperand(0).getReg();
2635 SrcReg2 =
MI.getOperand(1).getReg();
2641 SrcReg =
MI.getOperand(0).getReg();
2643 CmpMask =
MI.getOperand(1).getImm();
2656 int CmpMask,
bool CommonUse) {
2657 switch (
MI->getOpcode()) {
2660 if (CmpMask !=
MI->getOperand(2).getImm())
2662 if (SrcReg ==
MI->getOperand(CommonUse ? 1 : 0).getReg())
2752 switch (
MI->getOpcode()) {
2753 default:
return false;
2849 if (!
MI)
return false;
2852 if (CmpMask != ~0) {
2856 UI =
MRI->use_instr_begin(SrcReg), UE =
MRI->use_instr_end();
2858 if (UI->getParent() != CmpInstr.
getParent())
2867 if (!
MI)
return false;
2876 if (
I ==
B)
return false;
2887 else if (
MI->getParent() != CmpInstr.
getParent() || CmpValue != 0) {
2892 if (CmpInstr.
getOpcode() == ARM::CMPri ||
2900 bool IsThumb1 =
false;
2917 if (
MI && IsThumb1) {
2919 if (
I != E && !
MI->readsRegister(ARM::CPSR,
TRI)) {
2920 bool CanReorder =
true;
2921 for (;
I != E; --
I) {
2922 if (
I->getOpcode() != ARM::tMOVi8) {
2928 MI =
MI->removeFromParent();
2939 bool SubAddIsThumb1 =
false;
2954 if (Instr.modifiesRegister(ARM::CPSR,
TRI) ||
2955 Instr.readsRegister(ARM::CPSR,
TRI))
2977 IsThumb1 = SubAddIsThumb1;
2992 bool isSafe =
false;
2995 while (!isSafe && ++
I != E) {
2997 for (
unsigned IO = 0, EO = Instr.getNumOperands();
2998 !isSafe && IO != EO; ++IO) {
3012 bool IsInstrVSel =
true;
3013 switch (Instr.getOpcode()) {
3015 IsInstrVSel =
false;
3049 bool IsSub =
Opc == ARM::SUBrr ||
Opc == ARM::t2SUBrr ||
3050 Opc == ARM::SUBri ||
Opc == ARM::t2SUBri ||
3051 Opc == ARM::tSUBrr ||
Opc == ARM::tSUBi3 ||
3053 unsigned OpI =
Opc != ARM::tSUBrr ? 1 : 2;
3065 std::make_pair(&((*I).getOperand(IO - 1)), NewCC));
3099 if (Succ->isLiveIn(ARM::CPSR))
3106 unsigned CPSRRegNum =
MI->getNumExplicitOperands() - 1;
3107 MI->getOperand(CPSRRegNum).setReg(ARM::CPSR);
3108 MI->getOperand(CPSRRegNum).setIsDef(
true);
3116 for (
auto &[MO,
Cond] : OperandsToUpdate)
3119 MI->clearRegisterDeads(ARM::CPSR);
3133 int64_t CmpMask, CmpValue;
3135 if (
Next !=
MI.getParent()->end() &&
3146 unsigned DefOpc =
DefMI.getOpcode();
3147 if (DefOpc != ARM::t2MOVi32imm && DefOpc != ARM::MOVi32imm &&
3148 DefOpc != ARM::tMOVi32imm)
3150 if (!
DefMI.getOperand(1).isImm())
3154 if (!
MRI->hasOneNonDBGUse(Reg))
3170 if (
UseMI.getOperand(
NumOps - 1).getReg() == ARM::CPSR)
3176 unsigned UseOpc =
UseMI.getOpcode();
3177 unsigned NewUseOpc = 0;
3179 uint32_t SOImmValV1 = 0, SOImmValV2 = 0;
3180 bool Commute =
false;
3182 default:
return false;
3190 case ARM::t2EORrr: {
3191 Commute =
UseMI.getOperand(2).getReg() != Reg;
3196 if (UseOpc == ARM::SUBrr && Commute)
3202 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::ADDri : ARM::SUBri;
3205 NewUseOpc = UseOpc == ARM::ADDrr ? ARM::SUBri : ARM::ADDri;
3219 case ARM::ORRrr: NewUseOpc = ARM::ORRri;
break;
3220 case ARM::EORrr: NewUseOpc = ARM::EORri;
break;
3224 case ARM::t2SUBrr: {
3225 if (UseOpc == ARM::t2SUBrr && Commute)
3230 const bool ToSP =
DefMI.getOperand(0).getReg() == ARM::SP;
3231 const unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
3232 const unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
3234 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2ADD : t2SUB;
3237 NewUseOpc = UseOpc == ARM::t2ADDrr ? t2SUB : t2ADD;
3252 case ARM::t2ORRrr: NewUseOpc = ARM::t2ORRri;
break;
3253 case ARM::t2EORrr: NewUseOpc = ARM::t2EORri;
break;
3260 unsigned OpIdx = Commute ? 2 : 1;
3262 bool isKill =
UseMI.getOperand(
OpIdx).isKill();
3264 Register NewReg =
MRI->createVirtualRegister(TRC);
3272 UseMI.getOperand(1).setReg(NewReg);
3273 UseMI.getOperand(1).setIsKill();
3274 UseMI.getOperand(2).ChangeToImmediate(SOImmValV2);
3275 DefMI.eraseFromParent();
3282 case ARM::t2ADDspImm:
3283 case ARM::t2SUBspImm:
3286 MRI->constrainRegClass(
UseMI.getOperand(0).getReg(), TRC);
3293 switch (
MI.getOpcode()) {
3297 assert(UOps >= 0 &&
"bad # UOps");
3305 unsigned ShOpVal =
MI.getOperand(3).getImm();
3310 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3318 if (!
MI.getOperand(2).getReg())
3321 unsigned ShOpVal =
MI.getOperand(3).getImm();
3326 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3336 case ARM::LDRSB_POST:
3337 case ARM::LDRSH_POST: {
3340 return (Rt == Rm) ? 4 : 3;
3343 case ARM::LDR_PRE_REG:
3344 case ARM::LDRB_PRE_REG: {
3349 unsigned ShOpVal =
MI.getOperand(4).getImm();
3354 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3360 case ARM::STR_PRE_REG:
3361 case ARM::STRB_PRE_REG: {
3362 unsigned ShOpVal =
MI.getOperand(4).getImm();
3367 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3374 case ARM::STRH_PRE: {
3384 case ARM::LDR_POST_REG:
3385 case ARM::LDRB_POST_REG:
3386 case ARM::LDRH_POST: {
3389 return (Rt == Rm) ? 3 : 2;
3392 case ARM::LDR_PRE_IMM:
3393 case ARM::LDRB_PRE_IMM:
3394 case ARM::LDR_POST_IMM:
3395 case ARM::LDRB_POST_IMM:
3396 case ARM::STRB_POST_IMM:
3397 case ARM::STRB_POST_REG:
3398 case ARM::STRB_PRE_IMM:
3399 case ARM::STRH_POST:
3400 case ARM::STR_POST_IMM:
3401 case ARM::STR_POST_REG:
3402 case ARM::STR_PRE_IMM:
3405 case ARM::LDRSB_PRE:
3406 case ARM::LDRSH_PRE: {
3413 unsigned ShOpVal =
MI.getOperand(4).getImm();
3418 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
3431 return (Rt == Rn) ? 3 : 2;
3442 case ARM::LDRD_POST:
3443 case ARM::t2LDRD_POST:
3446 case ARM::STRD_POST:
3447 case ARM::t2STRD_POST:
3450 case ARM::LDRD_PRE: {
3457 return (Rt == Rn) ? 4 : 3;
3460 case ARM::t2LDRD_PRE: {
3463 return (Rt == Rn) ? 4 : 3;
3466 case ARM::STRD_PRE: {
3474 case ARM::t2STRD_PRE:
3477 case ARM::t2LDR_POST:
3478 case ARM::t2LDRB_POST:
3479 case ARM::t2LDRB_PRE:
3480 case ARM::t2LDRSBi12:
3481 case ARM::t2LDRSBi8:
3482 case ARM::t2LDRSBpci:
3484 case ARM::t2LDRH_POST:
3485 case ARM::t2LDRH_PRE:
3487 case ARM::t2LDRSB_POST:
3488 case ARM::t2LDRSB_PRE:
3489 case ARM::t2LDRSH_POST:
3490 case ARM::t2LDRSH_PRE:
3491 case ARM::t2LDRSHi12:
3492 case ARM::t2LDRSHi8:
3493 case ARM::t2LDRSHpci:
3497 case ARM::t2LDRDi8: {
3500 return (Rt == Rn) ? 3 : 2;
3503 case ARM::t2STRB_POST:
3504 case ARM::t2STRB_PRE:
3507 case ARM::t2STRH_POST:
3508 case ARM::t2STRH_PRE:
3510 case ARM::t2STR_POST:
3511 case ARM::t2STR_PRE:
3542 E =
MI.memoperands_end();
3544 Size += (*I)->getSize().getValue();
3551 return std::min(
Size / 4, 16U);
3556 unsigned UOps = 1 + NumRegs;
3560 case ARM::VLDMDIA_UPD:
3561 case ARM::VLDMDDB_UPD:
3562 case ARM::VLDMSIA_UPD:
3563 case ARM::VLDMSDB_UPD:
3564 case ARM::VSTMDIA_UPD:
3565 case ARM::VSTMDDB_UPD:
3566 case ARM::VSTMSIA_UPD:
3567 case ARM::VSTMSDB_UPD:
3568 case ARM::LDMIA_UPD:
3569 case ARM::LDMDA_UPD:
3570 case ARM::LDMDB_UPD:
3571 case ARM::LDMIB_UPD:
3572 case ARM::STMIA_UPD:
3573 case ARM::STMDA_UPD:
3574 case ARM::STMDB_UPD:
3575 case ARM::STMIB_UPD:
3576 case ARM::tLDMIA_UPD:
3577 case ARM::tSTMIA_UPD:
3578 case ARM::t2LDMIA_UPD:
3579 case ARM::t2LDMDB_UPD:
3580 case ARM::t2STMIA_UPD:
3581 case ARM::t2STMDB_UPD:
3584 case ARM::LDMIA_RET:
3586 case ARM::t2LDMIA_RET:
3595 if (!ItinData || ItinData->
isEmpty())
3599 unsigned Class =
Desc.getSchedClass();
3601 if (ItinUOps >= 0) {
3602 if (Subtarget.isSwift() && (
Desc.mayLoad() ||
Desc.mayStore()))
3608 unsigned Opc =
MI.getOpcode();
3627 case ARM::VLDMDIA_UPD:
3628 case ARM::VLDMDDB_UPD:
3630 case ARM::VLDMSIA_UPD:
3631 case ARM::VLDMSDB_UPD:
3633 case ARM::VSTMDIA_UPD:
3634 case ARM::VSTMDDB_UPD:
3636 case ARM::VSTMSIA_UPD:
3637 case ARM::VSTMSDB_UPD: {
3638 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands();
3639 return (NumRegs / 2) + (NumRegs % 2) + 1;
3642 case ARM::LDMIA_RET:
3647 case ARM::LDMIA_UPD:
3648 case ARM::LDMDA_UPD:
3649 case ARM::LDMDB_UPD:
3650 case ARM::LDMIB_UPD:
3655 case ARM::STMIA_UPD:
3656 case ARM::STMDA_UPD:
3657 case ARM::STMDB_UPD:
3658 case ARM::STMIB_UPD:
3660 case ARM::tLDMIA_UPD:
3661 case ARM::tSTMIA_UPD:
3665 case ARM::t2LDMIA_RET:
3668 case ARM::t2LDMIA_UPD:
3669 case ARM::t2LDMDB_UPD:
3672 case ARM::t2STMIA_UPD:
3673 case ARM::t2STMDB_UPD: {
3674 unsigned NumRegs =
MI.getNumOperands() -
Desc.getNumOperands() + 1;
3675 switch (Subtarget.getLdStMultipleTiming()) {
3686 unsigned UOps = (NumRegs / 2);
3692 unsigned UOps = (NumRegs / 2);
3695 if ((NumRegs % 2) || !
MI.hasOneMemOperand() ||
3696 (*
MI.memoperands_begin())->getAlign() <
Align(8))
3706std::optional<unsigned>
3709 unsigned DefIdx,
unsigned DefAlign)
const {
3718 DefCycle = RegNo / 2 + 1;
3723 bool isSLoad =
false;
3728 case ARM::VLDMSIA_UPD:
3729 case ARM::VLDMSDB_UPD:
3736 if ((isSLoad && (RegNo % 2)) || DefAlign < 8)
3740 DefCycle = RegNo + 2;
3746std::optional<unsigned>
3749 unsigned DefIdx,
unsigned DefAlign)
const {
3756 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3759 DefCycle = RegNo / 2;
3764 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3765 DefCycle = (RegNo / 2);
3768 if ((RegNo % 2) || DefAlign < 8)
3774 DefCycle = RegNo + 2;
3780std::optional<unsigned>
3783 unsigned UseIdx,
unsigned UseAlign)
const {
3789 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3791 UseCycle = RegNo / 2 + 1;
3794 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3796 bool isSStore =
false;
3801 case ARM::VSTMSIA_UPD:
3802 case ARM::VSTMSDB_UPD:
3809 if ((isSStore && (RegNo % 2)) || UseAlign < 8)
3813 UseCycle = RegNo + 2;
3819std::optional<unsigned>
3822 unsigned UseIdx,
unsigned UseAlign)
const {
3828 if (Subtarget.isCortexA8() || Subtarget.isCortexA7()) {
3829 UseCycle = RegNo / 2;
3834 }
else if (Subtarget.isLikeA9() || Subtarget.isSwift()) {
3835 UseCycle = (RegNo / 2);
3838 if ((RegNo % 2) || UseAlign < 8)
3849 unsigned DefIdx,
unsigned DefAlign,
const MCInstrDesc &UseMCID,
3850 unsigned UseIdx,
unsigned UseAlign)
const {
3860 std::optional<unsigned> DefCycle;
3861 bool LdmBypass =
false;
3868 case ARM::VLDMDIA_UPD:
3869 case ARM::VLDMDDB_UPD:
3871 case ARM::VLDMSIA_UPD:
3872 case ARM::VLDMSDB_UPD:
3873 DefCycle = getVLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3876 case ARM::LDMIA_RET:
3881 case ARM::LDMIA_UPD:
3882 case ARM::LDMDA_UPD:
3883 case ARM::LDMDB_UPD:
3884 case ARM::LDMIB_UPD:
3886 case ARM::tLDMIA_UPD:
3888 case ARM::t2LDMIA_RET:
3891 case ARM::t2LDMIA_UPD:
3892 case ARM::t2LDMDB_UPD:
3894 DefCycle = getLDMDefCycle(ItinData, DefMCID, DefClass, DefIdx, DefAlign);
3902 std::optional<unsigned> UseCycle;
3909 case ARM::VSTMDIA_UPD:
3910 case ARM::VSTMDDB_UPD:
3912 case ARM::VSTMSIA_UPD:
3913 case ARM::VSTMSDB_UPD:
3914 UseCycle = getVSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3921 case ARM::STMIA_UPD:
3922 case ARM::STMDA_UPD:
3923 case ARM::STMDB_UPD:
3924 case ARM::STMIB_UPD:
3925 case ARM::tSTMIA_UPD:
3930 case ARM::t2STMIA_UPD:
3931 case ARM::t2STMDB_UPD:
3932 UseCycle = getSTMUseCycle(ItinData, UseMCID, UseClass, UseIdx, UseAlign);
3940 if (UseCycle > *DefCycle + 1)
3941 return std::nullopt;
3943 UseCycle = *DefCycle - *UseCycle + 1;
3944 if (UseCycle > 0u) {
3950 UseCycle = *UseCycle - 1;
3952 UseClass, UseIdx)) {
3953 UseCycle = *UseCycle - 1;
3962 unsigned &DefIdx,
unsigned &Dist) {
3967 assert(
II->isInsideBundle() &&
"Empty bundle?");
3970 while (
II->isInsideBundle()) {
3971 Idx =
II->findRegisterDefOperandIdx(
Reg,
TRI,
false,
true);
3978 assert(Idx != -1 &&
"Cannot find bundled definition!");
3985 unsigned &UseIdx,
unsigned &Dist) {
3989 assert(
II->isInsideBundle() &&
"Empty bundle?");
3994 while (
II !=
E &&
II->isInsideBundle()) {
3995 Idx =
II->findRegisterUseOperandIdx(
Reg,
TRI,
false);
3998 if (
II->getOpcode() != ARM::t2IT)
4026 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4036 case ARM::t2LDRSHs: {
4038 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4039 if (ShAmt == 0 || ShAmt == 2)
4044 }
else if (Subtarget.
isSwift()) {
4051 unsigned ShOpVal =
DefMI.getOperand(3).getImm();
4056 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4067 case ARM::t2LDRSHs: {
4069 unsigned ShAmt =
DefMI.getOperand(3).getImm();
4070 if (ShAmt == 0 || ShAmt == 1 || ShAmt == 2 || ShAmt == 3)
4077 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment()) {
4084 case ARM::VLD1q8wb_fixed:
4085 case ARM::VLD1q16wb_fixed:
4086 case ARM::VLD1q32wb_fixed:
4087 case ARM::VLD1q64wb_fixed:
4088 case ARM::VLD1q8wb_register:
4089 case ARM::VLD1q16wb_register:
4090 case ARM::VLD1q32wb_register:
4091 case ARM::VLD1q64wb_register:
4098 case ARM::VLD2d8wb_fixed:
4099 case ARM::VLD2d16wb_fixed:
4100 case ARM::VLD2d32wb_fixed:
4101 case ARM::VLD2q8wb_fixed:
4102 case ARM::VLD2q16wb_fixed:
4103 case ARM::VLD2q32wb_fixed:
4104 case ARM::VLD2d8wb_register:
4105 case ARM::VLD2d16wb_register:
4106 case ARM::VLD2d32wb_register:
4107 case ARM::VLD2q8wb_register:
4108 case ARM::VLD2q16wb_register:
4109 case ARM::VLD2q32wb_register:
4114 case ARM::VLD3d8_UPD:
4115 case ARM::VLD3d16_UPD:
4116 case ARM::VLD3d32_UPD:
4117 case ARM::VLD1d64Twb_fixed:
4118 case ARM::VLD1d64Twb_register:
4119 case ARM::VLD3q8_UPD:
4120 case ARM::VLD3q16_UPD:
4121 case ARM::VLD3q32_UPD:
4126 case ARM::VLD4d8_UPD:
4127 case ARM::VLD4d16_UPD:
4128 case ARM::VLD4d32_UPD:
4129 case ARM::VLD1d64Qwb_fixed:
4130 case ARM::VLD1d64Qwb_register:
4131 case ARM::VLD4q8_UPD:
4132 case ARM::VLD4q16_UPD:
4133 case ARM::VLD4q32_UPD:
4134 case ARM::VLD1DUPq8:
4135 case ARM::VLD1DUPq16:
4136 case ARM::VLD1DUPq32:
4137 case ARM::VLD1DUPq8wb_fixed:
4138 case ARM::VLD1DUPq16wb_fixed:
4139 case ARM::VLD1DUPq32wb_fixed:
4140 case ARM::VLD1DUPq8wb_register:
4141 case ARM::VLD1DUPq16wb_register:
4142 case ARM::VLD1DUPq32wb_register:
4143 case ARM::VLD2DUPd8:
4144 case ARM::VLD2DUPd16:
4145 case ARM::VLD2DUPd32:
4146 case ARM::VLD2DUPd8wb_fixed:
4147 case ARM::VLD2DUPd16wb_fixed:
4148 case ARM::VLD2DUPd32wb_fixed:
4149 case ARM::VLD2DUPd8wb_register:
4150 case ARM::VLD2DUPd16wb_register:
4151 case ARM::VLD2DUPd32wb_register:
4152 case ARM::VLD4DUPd8:
4153 case ARM::VLD4DUPd16:
4154 case ARM::VLD4DUPd32:
4155 case ARM::VLD4DUPd8_UPD:
4156 case ARM::VLD4DUPd16_UPD:
4157 case ARM::VLD4DUPd32_UPD:
4159 case ARM::VLD1LNd16:
4160 case ARM::VLD1LNd32:
4161 case ARM::VLD1LNd8_UPD:
4162 case ARM::VLD1LNd16_UPD:
4163 case ARM::VLD1LNd32_UPD:
4165 case ARM::VLD2LNd16:
4166 case ARM::VLD2LNd32:
4167 case ARM::VLD2LNq16:
4168 case ARM::VLD2LNq32:
4169 case ARM::VLD2LNd8_UPD:
4170 case ARM::VLD2LNd16_UPD:
4171 case ARM::VLD2LNd32_UPD:
4172 case ARM::VLD2LNq16_UPD:
4173 case ARM::VLD2LNq32_UPD:
4175 case ARM::VLD4LNd16:
4176 case ARM::VLD4LNd32:
4177 case ARM::VLD4LNq16:
4178 case ARM::VLD4LNq32:
4179 case ARM::VLD4LNd8_UPD:
4180 case ARM::VLD4LNd16_UPD:
4181 case ARM::VLD4LNd32_UPD:
4182 case ARM::VLD4LNq16_UPD:
4183 case ARM::VLD4LNq32_UPD:
4197 if (!ItinData || ItinData->
isEmpty())
4198 return std::nullopt;
4204 unsigned DefAdj = 0;
4205 if (
DefMI.isBundle())
4214 unsigned UseAdj = 0;
4215 if (
UseMI.isBundle()) {
4219 return std::nullopt;
4222 return getOperandLatencyImpl(
4223 ItinData, *ResolvedDefMI, DefIdx, ResolvedDefMI->
getDesc(), DefAdj, DefMO,
4224 Reg, *ResolvedUseMI, UseIdx, ResolvedUseMI->
getDesc(), UseAdj);
4227std::optional<unsigned> ARMBaseInstrInfo::getOperandLatencyImpl(
4229 unsigned DefIdx,
const MCInstrDesc &DefMCID,
unsigned DefAdj,
4231 unsigned UseIdx,
const MCInstrDesc &UseMCID,
unsigned UseAdj)
const {
4232 if (Reg == ARM::CPSR) {
4233 if (
DefMI.getOpcode() == ARM::FMSTAT) {
4235 return Subtarget.
isLikeA9() ? 1 : 20;
4239 if (
UseMI.isBranch())
4258 return std::nullopt;
4260 unsigned DefAlign =
DefMI.hasOneMemOperand()
4261 ? (*
DefMI.memoperands_begin())->getAlign().value()
4263 unsigned UseAlign =
UseMI.hasOneMemOperand()
4264 ? (*
UseMI.memoperands_begin())->getAlign().value()
4269 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4272 return std::nullopt;
4275 int Adj = DefAdj + UseAdj;
4279 if (Adj >= 0 || (
int)*
Latency > -Adj) {
4286std::optional<unsigned>
4288 SDNode *DefNode,
unsigned DefIdx,
4289 SDNode *UseNode,
unsigned UseIdx)
const {
4295 if (isZeroCost(DefMCID.
Opcode))
4298 if (!ItinData || ItinData->
isEmpty())
4299 return DefMCID.
mayLoad() ? 3 : 1;
4302 std::optional<unsigned>
Latency =
4304 int Adj = Subtarget.getPreISelOperandLatencyAdjustment();
4305 int Threshold = 1 + Adj;
4311 unsigned DefAlign = !DefMN->memoperands_empty()
4312 ? (*DefMN->memoperands_begin())->getAlign().value()
4315 unsigned UseAlign = !UseMN->memoperands_empty()
4316 ? (*UseMN->memoperands_begin())->getAlign().value()
4319 ItinData, DefMCID, DefIdx, DefAlign, UseMCID, UseIdx, UseAlign);
4321 return std::nullopt;
4324 (Subtarget.isCortexA8() || Subtarget.isLikeA9() ||
4325 Subtarget.isCortexA7())) {
4342 case ARM::t2LDRSHs: {
4345 if (ShAmt == 0 || ShAmt == 2)
4350 }
else if (DefIdx == 0 &&
Latency > 2U && Subtarget.isSwift()) {
4360 ((ShImm == 1 || ShImm == 2 || ShImm == 3) &&
4377 if (DefAlign < 8 && Subtarget.checkVLDnAccessAlignment())
4384 case ARM::VLD1q8wb_register:
4385 case ARM::VLD1q16wb_register:
4386 case ARM::VLD1q32wb_register:
4387 case ARM::VLD1q64wb_register:
4388 case ARM::VLD1q8wb_fixed:
4389 case ARM::VLD1q16wb_fixed:
4390 case ARM::VLD1q32wb_fixed:
4391 case ARM::VLD1q64wb_fixed:
4395 case ARM::VLD2q8Pseudo:
4396 case ARM::VLD2q16Pseudo:
4397 case ARM::VLD2q32Pseudo:
4398 case ARM::VLD2d8wb_fixed:
4399 case ARM::VLD2d16wb_fixed:
4400 case ARM::VLD2d32wb_fixed:
4401 case ARM::VLD2q8PseudoWB_fixed:
4402 case ARM::VLD2q16PseudoWB_fixed:
4403 case ARM::VLD2q32PseudoWB_fixed:
4404 case ARM::VLD2d8wb_register:
4405 case ARM::VLD2d16wb_register:
4406 case ARM::VLD2d32wb_register:
4407 case ARM::VLD2q8PseudoWB_register:
4408 case ARM::VLD2q16PseudoWB_register:
4409 case ARM::VLD2q32PseudoWB_register:
4410 case ARM::VLD3d8Pseudo:
4411 case ARM::VLD3d16Pseudo:
4412 case ARM::VLD3d32Pseudo:
4413 case ARM::VLD1d8TPseudo:
4414 case ARM::VLD1d16TPseudo:
4415 case ARM::VLD1d32TPseudo:
4416 case ARM::VLD1d64TPseudo:
4417 case ARM::VLD1d64TPseudoWB_fixed:
4418 case ARM::VLD1d64TPseudoWB_register:
4419 case ARM::VLD3d8Pseudo_UPD:
4420 case ARM::VLD3d16Pseudo_UPD:
4421 case ARM::VLD3d32Pseudo_UPD:
4422 case ARM::VLD3q8Pseudo_UPD:
4423 case ARM::VLD3q16Pseudo_UPD:
4424 case ARM::VLD3q32Pseudo_UPD:
4425 case ARM::VLD3q8oddPseudo:
4426 case ARM::VLD3q16oddPseudo:
4427 case ARM::VLD3q32oddPseudo:
4428 case ARM::VLD3q8oddPseudo_UPD:
4429 case ARM::VLD3q16oddPseudo_UPD:
4430 case ARM::VLD3q32oddPseudo_UPD:
4431 case ARM::VLD4d8Pseudo:
4432 case ARM::VLD4d16Pseudo:
4433 case ARM::VLD4d32Pseudo:
4434 case ARM::VLD1d8QPseudo:
4435 case ARM::VLD1d16QPseudo:
4436 case ARM::VLD1d32QPseudo:
4437 case ARM::VLD1d64QPseudo:
4438 case ARM::VLD1d64QPseudoWB_fixed:
4439 case ARM::VLD1d64QPseudoWB_register:
4440 case ARM::VLD1q8HighQPseudo:
4441 case ARM::VLD1q8LowQPseudo_UPD:
4442 case ARM::VLD1q8HighTPseudo:
4443 case ARM::VLD1q8LowTPseudo_UPD:
4444 case ARM::VLD1q16HighQPseudo:
4445 case ARM::VLD1q16LowQPseudo_UPD:
4446 case ARM::VLD1q16HighTPseudo:
4447 case ARM::VLD1q16LowTPseudo_UPD:
4448 case ARM::VLD1q32HighQPseudo:
4449 case ARM::VLD1q32LowQPseudo_UPD:
4450 case ARM::VLD1q32HighTPseudo:
4451 case ARM::VLD1q32LowTPseudo_UPD:
4452 case ARM::VLD1q64HighQPseudo:
4453 case ARM::VLD1q64LowQPseudo_UPD:
4454 case ARM::VLD1q64HighTPseudo:
4455 case ARM::VLD1q64LowTPseudo_UPD:
4456 case ARM::VLD4d8Pseudo_UPD:
4457 case ARM::VLD4d16Pseudo_UPD:
4458 case ARM::VLD4d32Pseudo_UPD:
4459 case ARM::VLD4q8Pseudo_UPD:
4460 case ARM::VLD4q16Pseudo_UPD:
4461 case ARM::VLD4q32Pseudo_UPD:
4462 case ARM::VLD4q8oddPseudo:
4463 case ARM::VLD4q16oddPseudo:
4464 case ARM::VLD4q32oddPseudo:
4465 case ARM::VLD4q8oddPseudo_UPD:
4466 case ARM::VLD4q16oddPseudo_UPD:
4467 case ARM::VLD4q32oddPseudo_UPD:
4468 case ARM::VLD1DUPq8:
4469 case ARM::VLD1DUPq16:
4470 case ARM::VLD1DUPq32:
4471 case ARM::VLD1DUPq8wb_fixed:
4472 case ARM::VLD1DUPq16wb_fixed:
4473 case ARM::VLD1DUPq32wb_fixed:
4474 case ARM::VLD1DUPq8wb_register:
4475 case ARM::VLD1DUPq16wb_register:
4476 case ARM::VLD1DUPq32wb_register:
4477 case ARM::VLD2DUPd8:
4478 case ARM::VLD2DUPd16:
4479 case ARM::VLD2DUPd32:
4480 case ARM::VLD2DUPd8wb_fixed:
4481 case ARM::VLD2DUPd16wb_fixed:
4482 case ARM::VLD2DUPd32wb_fixed:
4483 case ARM::VLD2DUPd8wb_register:
4484 case ARM::VLD2DUPd16wb_register:
4485 case ARM::VLD2DUPd32wb_register:
4486 case ARM::VLD2DUPq8EvenPseudo:
4487 case ARM::VLD2DUPq8OddPseudo:
4488 case ARM::VLD2DUPq16EvenPseudo:
4489 case ARM::VLD2DUPq16OddPseudo:
4490 case ARM::VLD2DUPq32EvenPseudo:
4491 case ARM::VLD2DUPq32OddPseudo:
4492 case ARM::VLD3DUPq8EvenPseudo:
4493 case ARM::VLD3DUPq8OddPseudo:
4494 case ARM::VLD3DUPq16EvenPseudo:
4495 case ARM::VLD3DUPq16OddPseudo:
4496 case ARM::VLD3DUPq32EvenPseudo:
4497 case ARM::VLD3DUPq32OddPseudo:
4498 case ARM::VLD4DUPd8Pseudo:
4499 case ARM::VLD4DUPd16Pseudo:
4500 case ARM::VLD4DUPd32Pseudo:
4501 case ARM::VLD4DUPd8Pseudo_UPD:
4502 case ARM::VLD4DUPd16Pseudo_UPD:
4503 case ARM::VLD4DUPd32Pseudo_UPD:
4504 case ARM::VLD4DUPq8EvenPseudo:
4505 case ARM::VLD4DUPq8OddPseudo:
4506 case ARM::VLD4DUPq16EvenPseudo:
4507 case ARM::VLD4DUPq16OddPseudo:
4508 case ARM::VLD4DUPq32EvenPseudo:
4509 case ARM::VLD4DUPq32OddPseudo:
4510 case ARM::VLD1LNq8Pseudo:
4511 case ARM::VLD1LNq16Pseudo:
4512 case ARM::VLD1LNq32Pseudo:
4513 case ARM::VLD1LNq8Pseudo_UPD:
4514 case ARM::VLD1LNq16Pseudo_UPD:
4515 case ARM::VLD1LNq32Pseudo_UPD:
4516 case ARM::VLD2LNd8Pseudo:
4517 case ARM::VLD2LNd16Pseudo:
4518 case ARM::VLD2LNd32Pseudo:
4519 case ARM::VLD2LNq16Pseudo:
4520 case ARM::VLD2LNq32Pseudo:
4521 case ARM::VLD2LNd8Pseudo_UPD:
4522 case ARM::VLD2LNd16Pseudo_UPD:
4523 case ARM::VLD2LNd32Pseudo_UPD:
4524 case ARM::VLD2LNq16Pseudo_UPD:
4525 case ARM::VLD2LNq32Pseudo_UPD:
4526 case ARM::VLD4LNd8Pseudo:
4527 case ARM::VLD4LNd16Pseudo:
4528 case ARM::VLD4LNd32Pseudo:
4529 case ARM::VLD4LNq16Pseudo:
4530 case ARM::VLD4LNq32Pseudo:
4531 case ARM::VLD4LNd8Pseudo_UPD:
4532 case ARM::VLD4LNd16Pseudo_UPD:
4533 case ARM::VLD4LNd32Pseudo_UPD:
4534 case ARM::VLD4LNq16Pseudo_UPD:
4535 case ARM::VLD4LNq32Pseudo_UPD:
4545unsigned ARMBaseInstrInfo::getPredicationCost(
const MachineInstr &
MI)
const {
4546 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4555 if (
MCID.isCall() || (
MCID.hasImplicitDefOfPhysReg(ARM::CPSR) &&
4556 !Subtarget.cheapPredicableCPSRDef())) {
4566 unsigned *PredCost)
const {
4567 if (
MI.isCopyLike() ||
MI.isInsertSubreg() ||
MI.isRegSequence() ||
4573 if (
MI.isBundle()) {
4577 while (++
I !=
E &&
I->isInsideBundle()) {
4578 if (
I->getOpcode() != ARM::t2IT)
4579 Latency += getInstrLatency(ItinData, *
I, PredCost);
4584 const MCInstrDesc &MCID =
MI.getDesc();
4586 !Subtarget.cheapPredicableCPSRDef()))) {
4594 return MI.mayLoad() ? 3 : 1;
4607 MI.hasOneMemOperand() ? (*
MI.memoperands_begin())->getAlign().value() : 0;
4609 if (Adj >= 0 || (
int)
Latency > -Adj) {
4617 if (!
Node->isMachineOpcode())
4620 if (!ItinData || ItinData->
isEmpty())
4623 unsigned Opcode =
Node->getMachineOpcode();
4633bool ARMBaseInstrInfo::hasHighOperandLatency(
const TargetSchedModel &SchedModel,
4638 unsigned UseIdx)
const {
4641 if (Subtarget.nonpipelinedVFP() &&
4656 unsigned DefIdx)
const {
4658 if (!ItinData || ItinData->
isEmpty())
4663 unsigned DefClass =
DefMI.getDesc().getSchedClass();
4664 std::optional<unsigned> DefCycle =
4666 return DefCycle && DefCycle <= 2U;
4674 ErrInfo =
"Pseudo flag setting opcodes only exist in Selection DAG";
4677 if (
MI.getOpcode() == ARM::tMOVr && !Subtarget.hasV6Ops()) {
4679 if (!ARM::hGPRRegClass.
contains(
MI.getOperand(0).getReg()) &&
4680 !ARM::hGPRRegClass.contains(
MI.getOperand(1).getReg())) {
4681 ErrInfo =
"Non-flag-setting Thumb1 mov is v6-only";
4685 if (
MI.getOpcode() == ARM::tPUSH ||
4686 MI.getOpcode() == ARM::tPOP ||
4687 MI.getOpcode() == ARM::tPOP_RET) {
4689 if (MO.isImplicit() || !MO.isReg())
4693 if (!(
MI.getOpcode() == ARM::tPUSH &&
Reg == ARM::LR) &&
4694 !(
MI.getOpcode() == ARM::tPOP_RET &&
Reg == ARM::PC)) {
4695 ErrInfo =
"Unsupported register in Thumb1 push/pop";
4701 if (
MI.getOpcode() == ARM::MVE_VMOV_q_rr) {
4702 assert(
MI.getOperand(4).isImm() &&
MI.getOperand(5).isImm());
4703 if ((
MI.getOperand(4).getImm() != 2 &&
MI.getOperand(4).getImm() != 3) ||
4704 MI.getOperand(4).getImm() !=
MI.getOperand(5).getImm() + 2) {
4705 ErrInfo =
"Incorrect array index for MVE_VMOV_q_rr";
4726 for (
auto Op :
MI.operands()) {
4733 ErrInfo =
"Incorrect AddrMode Imm for instruction";
4743 unsigned LoadImmOpc,
4744 unsigned LoadOpc)
const {
4745 assert(!Subtarget.isROPI() && !Subtarget.isRWPI() &&
4746 "ROPI/RWPI not currently supported with stack guard");
4754 if (LoadImmOpc == ARM::MRC || LoadImmOpc == ARM::t2MRC) {
4755 assert(!Subtarget.isReadTPSoft() &&
4756 "TLS stack protector requires hardware TLS register");
4766 Module &M = *
MBB.getParent()->getFunction().getParent();
4767 Offset = M.getStackProtectorGuardOffset();
4772 unsigned AddOpc = (LoadImmOpc == ARM::MRC) ? ARM::ADDri : ARM::t2ADDri;
4783 bool IsIndirect = Subtarget.isGVIndirectSymbol(GV);
4786 if (Subtarget.isTargetMachO()) {
4788 }
else if (Subtarget.isTargetCOFF()) {
4791 else if (IsIndirect)
4793 }
else if (IsIndirect) {
4797 if (LoadImmOpc == ARM::tMOVi32imm) {
4800 ARMSysReg::lookupMClassSysRegByName(
"apsr_nzcvq")->Encoding;
4836 unsigned &AddSubOpc,
4837 bool &NegAcc,
bool &HasLane)
const {
4839 if (
I == MLxEntryMap.end())
4843 MulOpc = Entry.MulOpc;
4844 AddSubOpc = Entry.AddSubOpc;
4845 NegAcc = Entry.NegAcc;
4846 HasLane = Entry.HasLane;
4870std::pair<uint16_t, uint16_t>
4874 if (Subtarget.hasNEON()) {
4883 (
MI.getOpcode() == ARM::VMOVRS ||
MI.getOpcode() == ARM::VMOVSR ||
4884 MI.getOpcode() == ARM::VMOVS))
4891 return std::make_pair(
ExeNEON, 0);
4896 return std::make_pair(
ExeNEON, 0);
4899 return std::make_pair(
ExeVFP, 0);
4905 unsigned SReg,
unsigned &Lane) {
4907 TRI->getMatchingSuperReg(SReg, ARM::ssub_0, &ARM::DPRRegClass);
4914 DReg =
TRI->getMatchingSuperReg(SReg, ARM::ssub_1, &ARM::DPRRegClass);
4916 assert(DReg &&
"S-register with no D super-register?");
4941 if (
MI.definesRegister(DReg,
TRI) ||
MI.readsRegister(DReg,
TRI)) {
4947 ImplicitSReg =
TRI->getSubReg(DReg,
4948 (Lane & 1) ? ARM::ssub_0 : ARM::ssub_1);
4950 MI.getParent()->computeRegisterLiveness(
TRI, ImplicitSReg,
MI);
4965 unsigned DstReg, SrcReg;
4970 switch (
MI.getOpcode()) {
4982 assert(Subtarget.hasNEON() &&
"VORRd requires NEON");
4985 DstReg =
MI.getOperand(0).getReg();
4986 SrcReg =
MI.getOperand(1).getReg();
4988 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
4989 MI.removeOperand(i - 1);
4992 MI.setDesc(
get(ARM::VORRd));
5004 DstReg =
MI.getOperand(0).getReg();
5005 SrcReg =
MI.getOperand(1).getReg();
5007 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5008 MI.removeOperand(i - 1);
5015 MI.setDesc(
get(ARM::VGETLNi32));
5031 DstReg =
MI.getOperand(0).getReg();
5032 SrcReg =
MI.getOperand(1).getReg();
5040 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5041 MI.removeOperand(i - 1);
5045 MI.setDesc(
get(ARM::VSETLNi32));
5064 DstReg =
MI.getOperand(0).getReg();
5065 SrcReg =
MI.getOperand(1).getReg();
5067 unsigned DstLane = 0, SrcLane = 0;
5076 for (
unsigned i =
MI.getDesc().getNumOperands(); i; --i)
5077 MI.removeOperand(i - 1);
5082 MI.setDesc(
get(ARM::VDUPLN32d));
5116 MCRegister CurReg = SrcLane == 1 && DstLane == 1 ? DSrc : DDst;
5117 bool CurUndef = !
MI.readsRegister(CurReg,
TRI);
5120 CurReg = SrcLane == 0 && DstLane == 0 ? DSrc : DDst;
5121 CurUndef = !
MI.readsRegister(CurReg,
TRI);
5126 if (SrcLane == DstLane)
5129 MI.setDesc(
get(ARM::VEXTd32));
5134 CurReg = SrcLane == 1 && DstLane == 0 ? DSrc : DDst;
5135 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5138 CurReg = SrcLane == 0 && DstLane == 1 ? DSrc : DDst;
5139 CurUndef = CurReg == DSrc && !
MI.readsRegister(CurReg,
TRI);
5144 if (SrcLane != DstLane)
5150 if (ImplicitSReg != 0)
5176 auto PartialUpdateClearance = Subtarget.getPartialUpdateClearance();
5177 if (!PartialUpdateClearance)
5188 switch (
MI.getOpcode()) {
5194 case ARM::VMOVv4i16:
5195 case ARM::VMOVv2i32:
5196 case ARM::VMOVv2f32:
5197 case ARM::VMOVv1i64:
5198 UseOp =
MI.findRegisterUseOperandIdx(Reg,
TRI,
false);
5202 case ARM::VLD1LNd32:
5211 if (UseOp != -1 &&
MI.getOperand(UseOp).readsReg())
5215 if (Reg.isVirtual()) {
5217 if (!MO.
getSubReg() ||
MI.readsVirtualRegister(Reg))
5219 }
else if (ARM::SPRRegClass.
contains(Reg)) {
5222 TRI->getMatchingSuperReg(Reg, ARM::ssub_0, &ARM::DPRRegClass);
5223 if (!DReg || !
MI.definesRegister(DReg,
TRI))
5229 return PartialUpdateClearance;
5236 assert(OpNum <
MI.getDesc().getNumDefs() &&
"OpNum is not a def");
5241 assert(Reg.isPhysical() &&
"Can't break virtual register dependencies.");
5242 unsigned DReg = Reg;
5245 if (ARM::SPRRegClass.
contains(Reg)) {
5246 DReg = ARM::D0 + (Reg - ARM::S0) / 2;
5247 assert(
TRI->isSuperRegister(Reg, DReg) &&
"Register enums broken");
5250 assert(ARM::DPRRegClass.
contains(DReg) &&
"Can only break D-reg deps");
5251 assert(
MI.definesRegister(DReg,
TRI) &&
"MI doesn't clobber full D-reg");
5264 MI.addRegisterKilled(DReg,
TRI,
true);
5268 return Subtarget.hasFeature(ARM::HasV6KOps);
5272 if (
MI->getNumOperands() < 4)
5274 unsigned ShOpVal =
MI->getOperand(3).getImm();
5278 ((ShImm == 1 || ShImm == 2) &&
5288 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5289 assert(
MI.isRegSequenceLike() &&
"Invalid kind of instruction");
5291 switch (
MI.getOpcode()) {
5303 MOReg = &
MI.getOperand(2);
5315 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5316 assert(
MI.isExtractSubregLike() &&
"Invalid kind of instruction");
5318 switch (
MI.getOpcode()) {
5329 InputReg.
SubIdx = DefIdx == 0 ? ARM::ssub_0 : ARM::ssub_1;
5338 assert(DefIdx <
MI.getDesc().getNumDefs() &&
"Invalid definition index");
5339 assert(
MI.isInsertSubregLike() &&
"Invalid kind of instruction");
5341 switch (
MI.getOpcode()) {
5342 case ARM::VSETLNi32:
5343 case ARM::MVE_VMOV_to_lane_32:
5351 BaseReg.Reg = MOBaseReg.
getReg();
5354 InsertedReg.
Reg = MOInsertedReg.
getReg();
5362std::pair<unsigned, unsigned>
5365 return std::make_pair(TF & Mask, TF & ~Mask);
5370 using namespace ARMII;
5372 static const std::pair<unsigned, const char *> TargetFlags[] = {
5373 {MO_LO16,
"arm-lo16"}, {MO_HI16,
"arm-hi16"},
5374 {MO_LO_0_7,
"arm-lo-0-7"}, {MO_HI_0_7,
"arm-hi-0-7"},
5375 {MO_LO_8_15,
"arm-lo-8-15"}, {MO_HI_8_15,
"arm-hi-8-15"},
5382 using namespace ARMII;
5384 static const std::pair<unsigned, const char *> TargetFlags[] = {
5385 {MO_COFFSTUB,
"arm-coffstub"},
5386 {MO_GOT,
"arm-got"},
5387 {MO_SBREL,
"arm-sbrel"},
5388 {MO_DLLIMPORT,
"arm-dllimport"},
5389 {MO_SECREL,
"arm-secrel"},
5390 {MO_NONLAZY,
"arm-nonlazy"}};
5394std::optional<RegImmPair>
5397 unsigned Opcode =
MI.getOpcode();
5404 return std::nullopt;
5407 if (Opcode == ARM::SUBri)
5409 else if (Opcode != ARM::ADDri)
5410 return std::nullopt;
5415 if (!
MI.getOperand(1).isReg() || !
MI.getOperand(2).isImm())
5416 return std::nullopt;
5418 Offset =
MI.getOperand(2).getImm() * Sign;
5426 for (
auto I = From;
I != To; ++
I)
5427 if (
I->modifiesRegister(Reg,
TRI))
5440 if (CmpMI->modifiesRegister(ARM::CPSR,
TRI))
5442 if (CmpMI->readsRegister(ARM::CPSR,
TRI))
5448 if (CmpMI->getOpcode() != ARM::tCMPi8 && CmpMI->getOpcode() != ARM::t2CMPri)
5450 Register Reg = CmpMI->getOperand(0).getReg();
5453 if (Pred !=
ARMCC::AL || CmpMI->getOperand(1).getImm() != 0)
5466 if (Subtarget->isThumb()) {
5468 return ForCodesize ? 2 : 1;
5469 if (Subtarget->hasV6T2Ops() && (Val <= 0xffff ||
5472 return ForCodesize ? 4 : 1;
5474 return ForCodesize ? 4 : 2;
5476 return ForCodesize ? 4 : 2;
5478 return ForCodesize ? 4 : 2;
5481 return ForCodesize ? 4 : 1;
5483 return ForCodesize ? 4 : 1;
5484 if (Subtarget->hasV6T2Ops() && Val <= 0xffff)
5485 return ForCodesize ? 4 : 1;
5487 return ForCodesize ? 8 : 2;
5489 return ForCodesize ? 8 : 2;
5492 return ForCodesize ? 8 : 2;
5493 return ForCodesize ? 8 : 3;
5657 MachineFunction *MF =
C.getMF();
5659 const ARMBaseRegisterInfo *ARI =
5660 static_cast<const ARMBaseRegisterInfo *
>(&
TRI);
5669 C.isAvailableAcrossAndOutOfSeq(
Reg,
TRI) &&
5670 C.isAvailableInsideSeq(
Reg,
TRI))
5684 for (;
I !=
E; ++
I) {
5688 if (
MI.modifiesRegister(ARM::LR, &
TRI))
5692 unsigned Opcode =
MI.getOpcode();
5693 if (Opcode == ARM::BX_RET || Opcode == ARM::MOVPCLR ||
5694 Opcode == ARM::SUBS_PC_LR || Opcode == ARM::tBX_RET ||
5695 Opcode == ARM::tBXNS_RET) {
5701 if (
MI.readsRegister(ARM::LR, &
TRI))
5707std::optional<std::unique_ptr<outliner::OutlinedFunction>>
5710 std::vector<outliner::Candidate> &RepeatedSequenceLocs,
5711 unsigned MinRepeats)
const {
5712 unsigned SequenceSize = 0;
5713 for (
auto &
MI : RepeatedSequenceLocs[0])
5717 unsigned FlagsSetInAll = 0xF;
5722 FlagsSetInAll &=
C.Flags;
5741 return C.isAnyUnavailableAcrossOrOutOfSeq({ARM::R12, ARM::CPSR},
TRI);
5749 llvm::erase_if(RepeatedSequenceLocs, CantGuaranteeValueAcrossCall);
5752 if (RepeatedSequenceLocs.size() < MinRepeats)
5753 return std::nullopt;
5772 if (std::distance(RepeatedSequenceLocs.begin(), NoBTI) >
5773 std::distance(NoBTI, RepeatedSequenceLocs.end()))
5774 RepeatedSequenceLocs.erase(NoBTI, RepeatedSequenceLocs.end());
5776 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoBTI);
5778 if (RepeatedSequenceLocs.size() < MinRepeats)
5779 return std::nullopt;
5789 if (std::distance(RepeatedSequenceLocs.begin(), NoPAC) >
5790 std::distance(NoPAC, RepeatedSequenceLocs.end()))
5791 RepeatedSequenceLocs.erase(NoPAC, RepeatedSequenceLocs.end());
5793 RepeatedSequenceLocs.erase(RepeatedSequenceLocs.begin(), NoPAC);
5795 if (RepeatedSequenceLocs.size() < MinRepeats)
5796 return std::nullopt;
5801 unsigned LastInstrOpcode = RepeatedSequenceLocs[0].back().getOpcode();
5804 auto SetCandidateCallInfo =
5805 [&RepeatedSequenceLocs](
unsigned CallID,
unsigned NumBytesForCall) {
5807 C.setCallInfo(CallID, NumBytesForCall);
5812 const auto &SomeMFI =
5815 if (SomeMFI.branchTargetEnforcement()) {
5824 if (SomeMFI.shouldSignReturnAddress(
true)) {
5834 if (RepeatedSequenceLocs[0].back().isTerminator()) {
5838 }
else if (LastInstrOpcode == ARM::BL || LastInstrOpcode == ARM::BLX ||
5839 LastInstrOpcode == ARM::BLX_noip || LastInstrOpcode == ARM::tBL ||
5840 LastInstrOpcode == ARM::tBLXr ||
5841 LastInstrOpcode == ARM::tBLXr_noip ||
5842 LastInstrOpcode == ARM::tBLXi) {
5850 unsigned NumBytesNoStackCalls = 0;
5851 std::vector<outliner::Candidate> CandidatesWithoutStackFixups;
5856 const auto Last =
C.getMBB()->rbegin();
5857 const bool LRIsAvailable =
5858 C.getMBB()->isReturnBlock() && !
Last->isCall()
5861 :
C.isAvailableAcrossAndOutOfSeq(ARM::LR,
TRI);
5862 if (LRIsAvailable) {
5866 CandidatesWithoutStackFixups.push_back(
C);
5871 else if (findRegisterToSaveLRTo(
C)) {
5875 CandidatesWithoutStackFixups.push_back(
C);
5880 else if (
C.isAvailableInsideSeq(ARM::SP,
TRI)) {
5883 CandidatesWithoutStackFixups.push_back(
C);
5889 NumBytesNoStackCalls += SequenceSize;
5895 if (NumBytesNoStackCalls <=
5896 RepeatedSequenceLocs.size() * Costs.
CallDefault) {
5897 RepeatedSequenceLocs = CandidatesWithoutStackFixups;
5899 if (RepeatedSequenceLocs.size() < MinRepeats)
5900 return std::nullopt;
5925 return std::make_unique<outliner::OutlinedFunction>(
5926 RepeatedSequenceLocs, SequenceSize, NumBytesToCreateFrame, FrameID);
5929bool ARMBaseInstrInfo::checkAndUpdateStackOffset(
MachineInstr *
MI,
5932 int SPIdx =
MI->findRegisterUseOperandIdx(ARM::SP,
nullptr);
5957 unsigned NumOps =
MI->getDesc().getNumOperands();
5958 unsigned ImmIdx =
NumOps - 3;
5962 int64_t OffVal =
Offset.getImm();
5968 unsigned NumBits = 0;
5997 assert((
Fixup & 3) == 0 &&
"Can't encode this offset!");
6017 assert(((OffVal * Scale +
Fixup) & (Scale - 1)) == 0 &&
6018 "Can't encode this offset!");
6019 OffVal +=
Fixup / Scale;
6021 unsigned Mask = (1 << NumBits) - 1;
6023 if (OffVal <= Mask) {
6025 MI->getOperand(ImmIdx).setImm(OffVal);
6033 Function &
F, std::vector<outliner::Candidate> &Candidates)
const {
6037 const Function &CFn =
C.getMF()->getFunction();
6044 ARMGenInstrInfo::mergeOutliningCandidateAttributes(
F, Candidates);
6052 if (!OutlineFromLinkOnceODRs &&
F.hasLinkOnceODRLinkage())
6071 unsigned &Flags)
const {
6074 assert(
MBB.getParent()->getRegInfo().tracksLiveness() &&
6075 "Suitable Machine Function for outlining must track liveness");
6083 bool R12AvailableInBlock = LRU.
available(ARM::R12);
6084 bool CPSRAvailableInBlock = LRU.
available(ARM::CPSR);
6088 if (R12AvailableInBlock && CPSRAvailableInBlock)
6096 if (R12AvailableInBlock && !LRU.
available(ARM::R12))
6098 if (CPSRAvailableInBlock && !LRU.
available(ARM::CPSR))
6108 bool LRIsAvailable =
6109 MBB.isReturnBlock() && !
MBB.back().isCall()
6121 unsigned Flags)
const {
6127 unsigned Opc =
MI.getOpcode();
6128 if (
Opc == ARM::tPICADD ||
Opc == ARM::PICADD ||
Opc == ARM::PICSTR ||
6129 Opc == ARM::PICSTRB ||
Opc == ARM::PICSTRH ||
Opc == ARM::PICLDR ||
6130 Opc == ARM::PICLDRB ||
Opc == ARM::PICLDRH ||
Opc == ARM::PICLDRSB ||
6131 Opc == ARM::PICLDRSH ||
Opc == ARM::t2LDRpci_pic ||
6132 Opc == ARM::t2MOVi16_ga_pcrel ||
Opc == ARM::t2MOVTi16_ga_pcrel ||
6133 Opc == ARM::t2MOV_ga_pcrel)
6137 if (
Opc == ARM::t2BF_LabelPseudo ||
Opc == ARM::t2DoLoopStart ||
6138 Opc == ARM::t2DoLoopStartTP ||
Opc == ARM::t2WhileLoopStart ||
6139 Opc == ARM::t2WhileLoopStartLR ||
Opc == ARM::t2WhileLoopStartTP ||
6140 Opc == ARM::t2LoopDec ||
Opc == ARM::t2LoopEnd ||
6141 Opc == ARM::t2LoopEndDec)
6150 if (
MI.isTerminator())
6156 if (
MI.readsRegister(ARM::LR,
TRI) ||
MI.readsRegister(ARM::PC,
TRI))
6164 if (MOP.isGlobal()) {
6173 (Callee->getName() ==
"\01__gnu_mcount_nc" ||
6174 Callee->getName() ==
"\01mcount" || Callee->getName() ==
"__mcount"))
6182 if (
Opc == ARM::BL ||
Opc == ARM::tBL ||
Opc == ARM::BLX ||
6183 Opc == ARM::BLX_noip ||
Opc == ARM::tBLXr ||
Opc == ARM::tBLXr_noip ||
6188 return UnknownCallOutlineType;
6196 return UnknownCallOutlineType;
6204 return UnknownCallOutlineType;
6212 if (
MI.modifiesRegister(ARM::LR,
TRI) ||
MI.modifiesRegister(ARM::PC,
TRI))
6216 if (
MI.modifiesRegister(ARM::SP,
TRI) ||
MI.readsRegister(ARM::SP,
TRI)) {
6229 bool MightNeedStackFixUp =
6233 if (!MightNeedStackFixUp)
6239 if (
MI.modifiesRegister(ARM::SP,
TRI))
6244 if (checkAndUpdateStackOffset(&
MI, Subtarget.getStackAlignment().value(),
6253 if (
MI.readsRegister(ARM::ITSTATE,
TRI) ||
6254 MI.modifiesRegister(ARM::ITSTATE,
TRI))
6258 if (
MI.isCFIInstruction())
6273 int Align = std::max(Subtarget.getStackAlignment().value(), uint64_t(8));
6275 assert(Align >= 8 && Align <= 256);
6277 assert(Subtarget.isThumb2());
6289 unsigned Opc = Subtarget.isThumb() ? ARM::t2STR_PRE : ARM::STR_PRE_IMM;
6303 CFIBuilder.buildDefCFAOffset(Align);
6308 CFIBuilder.buildOffset(ARM::LR, -LROffset);
6311 CFIBuilder.buildOffset(ARM::RA_AUTH_CODE, -Align);
6317 bool CFI,
bool Auth)
const {
6318 int Align = Subtarget.getStackAlignment().value();
6321 assert(Subtarget.isThumb2());
6333 unsigned Opc = Subtarget.isThumb() ? ARM::t2LDR_POST : ARM::LDR_POST_IMM;
6337 if (!Subtarget.isThumb())
6339 MIB.
addImm(Subtarget.getStackAlignment().value())
6347 CFIBuilder.buildDefCFAOffset(0);
6348 CFIBuilder.buildRestore(ARM::LR);
6350 CFIBuilder.buildUndefined(ARM::RA_AUTH_CODE);
6364 bool isThumb = Subtarget.isThumb();
6365 unsigned FuncOp =
isThumb ? 2 : 0;
6366 unsigned Opc =
Call->getOperand(FuncOp).isReg()
6367 ?
isThumb ? ARM::tTAILJMPr : ARM::TAILJMPr
6368 :
isThumb ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd
6372 .
add(
Call->getOperand(FuncOp));
6375 Call->eraseFromParent();
6380 return MI.isCall() && !
MI.isReturn();
6388 Et = std::prev(
MBB.end());
6393 if (!
MBB.isLiveIn(ARM::LR))
6394 MBB.addLiveIn(ARM::LR);
6398 saveLROnStack(
MBB, It,
true, Auth);
6403 "Can only fix up stack references once");
6404 fixupPostOutline(
MBB);
6407 restoreLRFromStack(
MBB, Et,
true, Auth);
6427 fixupPostOutline(
MBB);
6436 bool isThumb = Subtarget.isThumb();
6442 ? Subtarget.isTargetMachO() ? ARM::tTAILJMPd : ARM::tTAILJMPdND
6448 It =
MBB.insert(It, MIB);
6462 It =
MBB.insert(It, CallMIB);
6469 Register Reg = findRegisterToSaveLRTo(
C);
6470 assert(Reg != 0 &&
"No callee-saved register available?");
6477 CallPt =
MBB.insert(It, CallMIB);
6485 if (!
MBB.isLiveIn(ARM::LR))
6486 MBB.addLiveIn(ARM::LR);
6489 CallPt =
MBB.insert(It, CallMIB);
6500bool ARMBaseInstrInfo::isReMaterializableImpl(
6534 static int constexpr MAX_STAGES = 30;
6535 static int constexpr LAST_IS_USE = MAX_STAGES;
6536 static int constexpr SEEN_AS_LIVE = MAX_STAGES + 1;
6537 typedef std::bitset<MAX_STAGES + 2> IterNeed;
6538 typedef std::map<Register, IterNeed> IterNeeds;
6541 const IterNeeds &CIN);
6553 : EndLoop(EndLoop), LoopCount(LoopCount),
6555 TII(MF->getSubtarget().getInstrInfo()) {}
6557 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
6559 return MI == EndLoop ||
MI == LoopCount;
6562 bool shouldUseSchedule(SwingSchedulerDAG &SSD, SMSchedule &SMS)
override {
6563 if (tooMuchRegisterPressure(SSD, SMS))
6569 std::optional<bool> createTripCountGreaterCondition(
6570 int TC, MachineBasicBlock &
MBB,
6571 SmallVectorImpl<MachineOperand> &
Cond)
override {
6580 }
else if (EndLoop->
getOpcode() == ARM::t2LoopEnd) {
6583 MachineInstr *LoopDec =
nullptr;
6585 if (
I.getOpcode() == ARM::t2LoopDec)
6587 assert(LoopDec &&
"Unable to find copied LoopDec");
6593 .
addReg(ARM::NoRegister);
6601 void setPreheader(MachineBasicBlock *NewPreheader)
override {}
6603 void adjustTripCount(
int TripCountAdjust)
override {}
6607 const IterNeeds &CIN) {
6609 for (
const auto &
N : CIN) {
6610 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6611 for (
int I = 0;
I < Cnt; ++
I)
6616 for (
const auto &
N : CIN) {
6617 int Cnt =
N.second.count() -
N.second[SEEN_AS_LIVE] * 2;
6618 for (
int I = 0;
I < Cnt; ++
I)
6626 IterNeeds CrossIterationNeeds;
6631 for (
auto &SU : SSD.
SUnits) {
6634 for (
auto &S : SU.Succs)
6638 CrossIterationNeeds[
Reg.
id()].set(0);
6639 }
else if (S.isAssignedRegDep()) {
6641 if (OStg >= 0 && OStg != Stg) {
6644 CrossIterationNeeds[
Reg.
id()] |= ((1 << (OStg - Stg)) - 1);
6653 std::vector<SUnit *> ProposedSchedule;
6657 std::deque<SUnit *> Instrs =
6659 std::sort(Instrs.begin(), Instrs.end(),
6660 [](
SUnit *
A,
SUnit *
B) { return A->NodeNum > B->NodeNum; });
6667 for (
auto *SU : ProposedSchedule)
6671 if (!MO.isReg() || !MO.getReg())
6674 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6675 if (CIter == CrossIterationNeeds.end() || CIter->second[LAST_IS_USE] ||
6676 CIter->second[SEEN_AS_LIVE])
6678 if (MO.isDef() && !MO.isDead())
6679 CIter->second.set(SEEN_AS_LIVE);
6680 else if (MO.isUse())
6681 CIter->second.set(LAST_IS_USE);
6683 for (
auto &CI : CrossIterationNeeds)
6684 CI.second.reset(LAST_IS_USE);
6690 RPTracker.init(MF, &RegClassInfo,
nullptr, EndLoop->
getParent(),
6693 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6695 for (
auto *SU : ProposedSchedule) {
6697 RPTracker.setPos(std::next(CurInstI));
6703 if (!MO.isReg() || !MO.getReg())
6706 if (MO.isDef() && !MO.isDead()) {
6707 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6708 if (CIter != CrossIterationNeeds.end()) {
6709 CIter->second.reset(0);
6710 CIter->second.reset(SEEN_AS_LIVE);
6714 for (
auto &S : SU->Preds) {
6716 if (S.isAssignedRegDep()) {
6718 auto CIter = CrossIterationNeeds.find(
Reg.
id());
6719 if (CIter != CrossIterationNeeds.end()) {
6721 assert(Stg2 <= Stg &&
"Data dependence upon earlier stage");
6722 if (Stg - Stg2 < MAX_STAGES)
6723 CIter->second.set(Stg - Stg2);
6724 CIter->second.set(SEEN_AS_LIVE);
6729 bumpCrossIterationPressure(RPTracker, CrossIterationNeeds);
6732 auto &
P = RPTracker.getPressure().MaxSetPressure;
6733 for (
unsigned I = 0,
E =
P.size();
I <
E; ++
I) {
6735 if (
I == ARM::DQuad_with_ssub_0 ||
I == ARM::DTripleSpc_with_ssub_0 ||
6736 I == ARM::DTriple_with_qsub_0_in_QPR)
6748std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
6752 if (Preheader == LoopBB)
6753 Preheader = *std::next(LoopBB->
pred_begin());
6755 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2Bcc) {
6761 for (
auto &L : LoopBB->
instrs()) {
6768 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, CCSetter);
6782 if (
I != LoopBB->
end() &&
I->getOpcode() == ARM::t2LoopEnd) {
6783 for (
auto &L : LoopBB->
instrs())
6788 Register LoopDecResult =
I->getOperand(0).getReg();
6791 if (!LoopDec || LoopDec->
getOpcode() != ARM::t2LoopDec)
6794 for (
auto &J : Preheader->
instrs())
6795 if (J.getOpcode() == ARM::t2DoLoopStart)
6799 return std::make_unique<ARMPipelinerLoopInfo>(&*
I, LoopDec);
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineOutlinerClass
Constants defining how certain sequences should be outlined.
@ MachineOutlinerTailCall
Emit a save, restore, call, and return.
@ MachineOutlinerRegSave
Emit a call and tail-call.
@ MachineOutlinerNoLRSave
Only emit a branch.
@ MachineOutlinerThunk
Emit a call and return.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static bool isThumb(const MCSubtargetInfo &STI)
static bool getImplicitSPRUseForDPRUse(const TargetRegisterInfo *TRI, MachineInstr &MI, MCRegister DReg, unsigned Lane, MCRegister &ImplicitSReg)
getImplicitSPRUseForDPRUse - Given a use of a DPR register and lane, set ImplicitSReg to a register n...
static const MachineInstr * getBundledUseMI(const TargetRegisterInfo *TRI, const MachineInstr &MI, unsigned Reg, unsigned &UseIdx, unsigned &Dist)
static unsigned duplicateCPV(MachineFunction &MF, unsigned &CPI)
Create a copy of a const pool value.
static bool isSuitableForMask(MachineInstr *&MI, Register SrcReg, int CmpMask, bool CommonUse)
isSuitableForMask - Identify a suitable 'and' instruction that operates on the given source register ...
static int adjustDefLatency(const ARMSubtarget &Subtarget, const MachineInstr &DefMI, const MCInstrDesc &DefMCID, unsigned DefAlign)
Return the number of cycles to add to (or subtract from) the static itinerary based on the def opcode...
static unsigned getNumMicroOpsSwiftLdSt(const InstrItineraryData *ItinData, const MachineInstr &MI)
static MCRegister getCorrespondingDRegAndLane(const TargetRegisterInfo *TRI, unsigned SReg, unsigned &Lane)
static const AddSubFlagsOpcodePair AddSubFlagsOpcodeMap[]
static bool isEligibleForITBlock(const MachineInstr *MI)
static ARMCC::CondCodes getCmpToAddCondition(ARMCC::CondCodes CC)
getCmpToAddCondition - assume the flags are set by CMP(a,b), return the condition code if we modify t...
static bool isOptimizeCompareCandidate(MachineInstr *MI, bool &IsThumb1)
static bool isLRAvailable(const TargetRegisterInfo &TRI, MachineBasicBlock::reverse_iterator I, MachineBasicBlock::reverse_iterator E)
static const ARM_MLxEntry ARM_MLxTable[]
static bool isRedundantFlagInstr(const MachineInstr *CmpI, Register SrcReg, Register SrcReg2, int64_t ImmValue, const MachineInstr *OI, bool &IsThumb1)
isRedundantFlagInstr - check whether the first instruction, whose only purpose is to update flags,...
static unsigned getNumMicroOpsSingleIssuePlusExtras(unsigned Opc, unsigned NumRegs)
static const MachineInstr * getBundledDefMI(const TargetRegisterInfo *TRI, const MachineInstr *MI, unsigned Reg, unsigned &DefIdx, unsigned &Dist)
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
This file contains the simple types necessary to represent the attributes associated with functions a...
static const Function * getParent(const Value *V)
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
DXIL Forward Handle Accesses
This file defines the DenseMap class.
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
const size_t AbstractManglingParser< Derived, Alloc >::NumOps
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
TargetInstrInfo::RegSubRegPair RegSubRegPair
Register const TargetRegisterInfo * TRI
Promote Memory to Register
static MCRegister getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
MachineInstr unsigned OpIdx
uint64_t IntrinsicInst * II
PowerPC TLS Dynamic Call Fixup
TargetInstrInfo::RegSubRegPairAndIdx RegSubRegPairAndIdx
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the SmallSet class.
This file defines the SmallVector class.
static X86::CondCode getSwappedCondition(X86::CondCode CC)
Assuming the flags are set by MI(a,b), return the condition code if we modify the instructions such t...
static bool isCPSRDefined(const MachineInstr &MI)
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t CmpMask, int64_t CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction to set the zero flag so that we can remove a "comparis...
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, Register DestReg, Register SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
foldImmediate - 'Reg' is known to be defined by a move immediate instruction, try to fold the immedia...
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, Register VReg, unsigned SubReg=0, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
void copyFromCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister DestReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned getNumMicroOps(const InstrItineraryData *ItinData, const MachineInstr &MI) const override
std::optional< RegImmPair > isAddImmediate(const MachineInstr &MI, Register Reg) const override
unsigned getPartialRegUpdateClearance(const MachineInstr &, unsigned, const TargetRegisterInfo *) const override
unsigned getNumLDMAddresses(const MachineInstr &MI) const
Get the number of addresses by LDM or VLDM or zero for unknown.
MachineInstr * optimizeSelect(MachineInstr &MI, SmallPtrSetImpl< MachineInstr * > &SeenMIs, bool) const override
bool produceSameValue(const MachineInstr &MI0, const MachineInstr &MI1, const MachineRegisterInfo *MRI) const override
void setExecutionDomain(MachineInstr &MI, unsigned Domain) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Returns the size of the specified MachineInstr.
void copyToCPSR(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, MCRegister SrcReg, bool KillSrc, const ARMSubtarget &Subtarget) const
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
void mergeOutliningCandidateAttributes(Function &F, std::vector< outliner::Candidate > &Candidates) const override
const MachineInstrBuilder & AddDReg(MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, RegState State) const
bool isFunctionSafeToOutlineFrom(MachineFunction &MF, bool OutlineFromLinkOnceODRs) const override
ARM supports the MachineOutliner.
bool shouldOutlineFromFunctionByDefault(MachineFunction &MF) const override
Enable outlining by default at -Oz.
std::optional< DestSourcePair > isCopyInstrImpl(const MachineInstr &MI) const override
If the specific machine instruction is an instruction that moves/copies value from one register to an...
MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const override
ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *II, const ScheduleDAGMI *DAG) const override
MachineBasicBlock::iterator insertOutlinedCall(Module &M, MachineBasicBlock &MBB, MachineBasicBlock::iterator &It, MachineFunction &MF, outliner::Candidate &C) const override
std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const override
bool isPredicated(const MachineInstr &MI) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
void expandLoadStackGuardBase(MachineBasicBlock::iterator MI, unsigned LoadImmOpc, unsigned LoadOpc) const
bool isPredicable(const MachineInstr &MI) const override
isPredicable - Return true if the specified instruction can be predicated.
Register isLoadFromStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const override
Specialization of TargetInstrInfo::describeLoadedValue, used to enhance debug entry value description...
std::optional< std::unique_ptr< outliner::OutlinedFunction > > getOutliningCandidateInfo(const MachineModuleInfo &MMI, std::vector< outliner::Candidate > &RepeatedSequenceLocs, unsigned MinRepeats) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
unsigned extraSizeToPredicateInstructions(const MachineFunction &MF, unsigned NumInsts) const override
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
const ARMBaseRegisterInfo & getRegisterInfo() const
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, Register DestReg, unsigned SubIdx, const MachineInstr &Orig) const override
bool areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, int64_t &Offset1, int64_t &Offset2) const override
areLoadsFromSameBasePtr - This is used by the pre-regalloc scheduler to determine if two loads are lo...
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
bool getRegSequenceLikeInputs(const MachineInstr &MI, unsigned DefIdx, SmallVectorImpl< RegSubRegPairAndIdx > &InputRegs) const override
Build the equivalent inputs of a REG_SEQUENCE for the given MI and DefIdx.
unsigned predictBranchSizeForIfCvt(MachineInstr &MI) const override
bool getInsertSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPair &BaseReg, RegSubRegPairAndIdx &InsertedReg) const override
Build the equivalent inputs of a INSERT_SUBREG for the given MI and DefIdx.
bool expandPostRAPseudo(MachineInstr &MI) const override
outliner::InstrType getOutliningTypeImpl(const MachineModuleInfo &MMI, MachineBasicBlock::iterator &MIT, unsigned Flags) const override
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
bool shouldScheduleLoadsNear(SDNode *Load1, SDNode *Load2, int64_t Offset1, int64_t Offset2, unsigned NumLoads) const override
shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to determine (in conjunction w...
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
std::pair< uint16_t, uint16_t > getExecutionDomain(const MachineInstr &MI) const override
VFP/NEON execution domains.
bool isProfitableToUnpredicate(MachineBasicBlock &TMBB, MachineBasicBlock &FMBB) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isFpMLxInstruction(unsigned Opcode) const
isFpMLxInstruction - Return true if the specified opcode is a fp MLA / MLS instruction.
bool isSwiftFastImmShift(const MachineInstr *MI) const
Returns true if the instruction has a shift by immediate that can be executed in one cycle less.
ARMBaseInstrInfo(const ARMSubtarget &STI, const ARMBaseRegisterInfo &TRI)
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
Register isStoreToStackSlotPostFE(const MachineInstr &MI, int &FrameIndex) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &CmpMask, int64_t &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2 if h...
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
void breakPartialRegDependency(MachineInstr &, unsigned, const TargetRegisterInfo *TRI) const override
bool isMBBSafeToOutlineFrom(MachineBasicBlock &MBB, unsigned &Flags) const override
void buildOutlinedFrame(MachineBasicBlock &MBB, MachineFunction &MF, const outliner::OutlinedFunction &OF) const override
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const ARMSubtarget & getSubtarget() const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool getExtractSubregLikeInputs(const MachineInstr &MI, unsigned DefIdx, RegSubRegPairAndIdx &InputReg) const override
Build the equivalent inputs of a EXTRACT_SUBREG for the given MI and DefIdx.
bool shouldSink(const MachineInstr &MI) const override
BitVector getReservedRegs(const MachineFunction &MF) const override
static ARMConstantPoolConstant * Create(const Constant *C, unsigned ID)
static ARMConstantPoolMBB * Create(LLVMContext &C, const MachineBasicBlock *mbb, unsigned ID, unsigned char PCAdj)
static ARMConstantPoolSymbol * Create(LLVMContext &C, StringRef s, unsigned ID, unsigned char PCAdj)
ARMConstantPoolValue - ARM specific constantpool value.
bool isMachineBasicBlock() const
bool isGlobalValue() const
ARMCP::ARMCPModifier getModifier() const
bool mustAddCurrentAddress() const
virtual bool hasSameValue(ARMConstantPoolValue *ACPV)
hasSameValue - Return true if this ARM constpool value can share the same constantpool entry as anoth...
bool isBlockAddress() const
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
bool isThumb2Function() const
bool branchTargetEnforcement() const
unsigned createPICLabelUId()
bool isThumb1OnlyFunction() const
bool isThumbFunction() const
bool shouldSignReturnAddress() const
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
Align getStackAlignment() const
getStackAlignment - Returns the minimum alignment known to hold of the stack frame on entry to the fu...
bool enableMachinePipeliner() const override
Returns true if machine pipeliner should be enabled.
@ DoubleIssueCheckUnalignedAccess
Can load/store 2 registers/cycle, but needs an extra cycle if the access is not 64-bit aligned.
@ SingleIssue
Can load/store 1 register/cycle.
@ DoubleIssue
Can load/store 2 registers/cycle.
@ SingleIssuePlusExtras
Can load/store 1 register/cycle, but needs an extra cycle for address computation and potentially als...
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
size_t size() const
size - Get the array size.
bool test(unsigned Idx) const
size_type size() const
size - Returns the number of bits in this bitvector.
LLVM_ABI uint64_t scale(uint64_t Num) const
Scale a large integer.
BranchProbability getCompl() const
Helper class for creating CFI instructions and inserting them into MIR.
void buildRegister(MCRegister Reg1, MCRegister Reg2) const
void buildRestore(MCRegister Reg) const
ConstMIBundleOperands - Iterate over all operands in a const bundle of machine instructions.
DenseMapIterator< KeyT, ValueT, KeyInfoT, BucketT, true > const_iterator
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Attribute getFnAttribute(Attribute::AttrKind Kind) const
Return the attribute for the given attribute kind.
bool hasMinSize() const
Optimize this function for minimum size (-Oz).
LLVMContext & getContext() const
getContext - Return a reference to the LLVMContext associated with this function.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool hasDLLImportStorageClass() const
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
Reverses the branch condition of the specified condition list, returning false on success and true if...
Itinerary data supplied by a subtarget to be used by a target.
int getNumMicroOps(unsigned ItinClassIndx) const
Return the number of micro-ops that the given class decodes to.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
unsigned getStageLatency(unsigned ItinClassIndx) const
Return the total stage latency of the given class.
std::optional< unsigned > getOperandLatency(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Compute and return the use operand latency of a given itinerary class and operand index if the value ...
bool hasPipelineForwarding(unsigned DefClass, unsigned DefIdx, unsigned UseClass, unsigned UseIdx) const
Return true if there is a pipeline forwarding between instructions of itinerary classes DefClass and ...
bool isEmpty() const
Returns true if there are no itineraries.
A set of register units used to track register liveness.
bool available(MCRegister Reg) const
Returns true if no part of physical register Reg is live.
LLVM_ABI void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
LLVM_ABI void accumulate(const MachineInstr &MI)
Adds all register units used, defined or clobbered in MI.
This class is intended to be used as a base class for asm properties and features specific to the tar...
Describe properties that are true of each instruction in the target description file.
unsigned getSchedClass() const
Return the scheduling class for this instruction.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool mayLoad() const
Return true if this instruction could possibly read memory.
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
unsigned getNumDefs() const
Return the number of MachineOperands that are register definitions.
bool isCall() const
Return true if the instruction is a call.
unsigned getOpcode() const
Return the opcode number for this descriptor.
LLVM_ABI bool hasImplicitDefOfPhysReg(MCRegister Reg, const MCRegisterInfo *MRI=nullptr) const
Return true if this instruction implicitly defines the specified physical register.
Wrapper class representing physical registers. Should be passed by value.
bool isValid() const
isValid - Returns true until all the operands have been visited.
unsigned pred_size() const
MachineInstrBundleIterator< const MachineInstr > const_iterator
LLVM_ABI instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LLVM_ABI iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
MachineInstrBundleIterator< MachineInstr, true > reverse_iterator
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
LLVM_ABI instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MachineInstrBundleIterator< MachineInstr > iterator
LivenessQueryResult
Possible outcome of a register liveness query to computeRegisterLiveness()
@ LQR_Dead
Register is known to be fully dead.
@ LQR_Live
Register is known to be (at least partially) live.
@ LQR_Unknown
Register liveness not decidable from local neighborhood.
This class is a data container for one entry in a MachineConstantPool.
union llvm::MachineConstantPoolEntry::@004270020304201266316354007027341142157160323045 Val
The constant itself.
bool isMachineConstantPoolEntry() const
isMachineConstantPoolEntry - Return true if the MachineConstantPoolEntry is indeed a target specific ...
MachineConstantPoolValue * MachineCPVal
const Constant * ConstVal
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getStackSize() const
Return the number of bytes that must be allocated to hold all of the fixed size frame objects.
bool isCalleeSavedInfoValid() const
Has the callee saved info been calculated yet?
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
unsigned getNumObjects() const
Return the number of objects.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addReg(Register RegNo, RegState Flags={}, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
ArrayRef< MachineMemOperand * >::iterator mmo_iterator
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool isImplicitDef() const
const MachineBasicBlock * getParent() const
bool isCopyLike() const
Return true if the instruction behaves like a copy.
bool isCall(QueryType Type=AnyInBundle) const
unsigned getNumOperands() const
Retuns the total number of operands.
LLVM_ABI int findFirstPredOperandIdx() const
Find the index of the first operand in the operand list that is used to represent the predicate.
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
bool isRegSequence() const
bool isInsertSubreg() const
LLVM_ABI void tieOperands(unsigned DefIdx, unsigned UseIdx)
Add a tie between the register operands at DefIdx and UseIdx.
LLVM_ABI bool isIdenticalTo(const MachineInstr &Other, MICheckType Check=CheckDefs) const
Return true if this instruction is identical to Other.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
LLVM_ABI void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
LLVM_ABI bool addRegisterKilled(Register IncomingReg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI kills a register.
bool hasOptionalDef(QueryType Type=IgnoreBundle) const
Set if this instruction has an optional definition, e.g.
LLVM_ABI void addRegisterDefined(Register Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
const MachineOperand & getOperand(unsigned i) const
LLVM_ABI void clearKillInfo()
Clears kill flags on all operands.
A description of a memory reference used in the backend.
@ MODereferenceable
The memory access is dereferenceable (i.e., doesn't trap).
@ MOLoad
The memory access reads data.
@ MOInvariant
The memory access always returns the same value (or traps).
@ MOStore
The memory access writes data.
This class contains meta information specific to a module.
LLVM_ABI MachineFunction * getMachineFunction(const Function &F) const
Returns the MachineFunction associated to IR function F if there is one, otherwise nullptr.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImplicit(bool Val=true)
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isRegMask() const
isRegMask - Tests if this is a MO_RegisterMask operand.
MachineBasicBlock * getMBB() const
LLVM_ABI void ChangeToImmediate(int64_t ImmVal, unsigned TargetFlags=0)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value.
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
LLVM_ABI bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static bool clobbersPhysReg(const uint32_t *RegMask, MCRegister PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
defusechain_instr_iterator< true, false, false, true > use_instr_iterator
use_instr_iterator/use_instr_begin/use_instr_end - Walk all uses of the specified register,...
const TargetRegisterInfo * getTargetRegisterInfo() const
A Module instance is used to store all the information related to an LLVM module.
void AddHazardRecognizer(std::unique_ptr< ScheduleHazardRecognizer > &&)
Track the current register pressure at some position in the instruction stream, and remember the high...
LLVM_ABI void increaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
LLVM_ABI void decreaseRegPressure(VirtRegOrUnit VRegOrUnit, LaneBitmask PreviousMask, LaneBitmask NewMask)
unsigned getRegPressureSetLimit(unsigned Idx) const
Get the register unit limit for the given pressure set index.
LLVM_ABI void runOnMachineFunction(const MachineFunction &MF, bool Rev=false)
runOnFunction - Prepare to answer questions about MF.
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
constexpr unsigned id() const
constexpr bool isPhysical() const
Return true if the specified register number is in the physical register namespace.
Represents one node in the SelectionDAG.
bool isMachineOpcode() const
Test if this node has a post-isel opcode, directly corresponding to a MachineInstr opcode.
unsigned getMachineOpcode() const
This may only be called if isMachineOpcode returns true.
const SDValue & getOperand(unsigned Num) const
uint64_t getConstantOperandVal(unsigned Num) const
Helper method returns the integer value of a ConstantSDNode operand.
@ Anti
A register anti-dependence (aka WAR).
This class represents the scheduled code.
unsigned getMaxStageCount()
Return the maximum stage count needed for this schedule.
int stageScheduled(SUnit *SU) const
Return the stage for a scheduled instruction.
int getInitiationInterval() const
Return the initiation interval for this schedule.
std::deque< SUnit * > & getInstructions(int cycle)
Return the instructions that are scheduled at the specified cycle.
int getFirstCycle() const
Return the first cycle in the completed schedule.
int getFinalCycle() const
Return the last cycle in the finalized schedule.
Scheduling unit. This is a node in the scheduling DAG.
ScheduleDAGMI is an implementation of ScheduleDAGInstrs that simply schedules machine instructions ac...
virtual bool hasVRegLiveness() const
Return true if this DAG supports VReg liveness and RegPressure.
std::vector< SUnit > SUnits
The scheduling units.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool erase(PtrType Ptr)
Remove pointer from the set.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
This class builds the dependence graph for the instructions in a loop, and attempts to schedule the i...
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual ScheduleHazardRecognizer * CreateTargetMIHazardRecognizer(const InstrItineraryData *, const ScheduleDAGMI *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual std::optional< ParamLoadedValue > describeLoadedValue(const MachineInstr &MI, Register Reg) const
Produce the expression describing the MI loading a value into the physical register Reg.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool isReMaterializableImpl(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual MachineInstr & duplicate(MachineBasicBlock &MBB, MachineBasicBlock::iterator InsertBefore, const MachineInstr &Orig) const
Clones instruction or the whole instruction bundle Orig and insert into MBB before InsertBefore.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
virtual std::string createMIROperandComment(const MachineInstr &MI, const MachineOperand &Op, unsigned OpIdx, const TargetRegisterInfo *TRI) const
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
Provide an instruction scheduling machine model to CodeGen passes.
LLVM_ABI unsigned computeOperandLatency(const MachineInstr *DefMI, unsigned DefOperIdx, const MachineInstr *UseMI, unsigned UseOperIdx) const
Compute operand latency based on the available machine model.
const InstrItineraryData * getInstrItineraries() const
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetRegisterInfo * getRegisterInfo() const =0
Return the target's register information.
Wrapper class representing a virtual register or register unit.
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr char Align[]
Key for Kernel::Arg::Metadata::mAlign.
static CondCodes getOppositeCondition(CondCodes CC)
ARMII - This namespace holds all of the target specific flags that instruction info tracks.
@ MO_OPTION_MASK
MO_OPTION_MASK - Most flags are mutually exclusive; this mask selects just that part of the flag set.
@ MO_NONLAZY
MO_NONLAZY - This is an independent flag, on a symbol operand "FOO" it represents a symbol which,...
@ MO_DLLIMPORT
MO_DLLIMPORT - On a symbol operand, this represents that the reference to the symbol is for an import...
@ MO_GOT
MO_GOT - On a symbol operand, this represents a GOT relative relocation.
@ MO_COFFSTUB
MO_COFFSTUB - On a symbol operand "FOO", this indicates that the reference is actually to the "....
AddrMode
ARM Addressing Modes.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned char getAM5FP16Offset(unsigned AM5Opc)
unsigned getSORegOffset(unsigned Op)
int getSOImmVal(unsigned Arg)
getSOImmVal - Given a 32-bit immediate, if it is something that can fit into an shifter_operand immed...
ShiftOpc getAM2ShiftOpc(unsigned AM2Opc)
unsigned getAM2Offset(unsigned AM2Opc)
unsigned getSOImmValRotate(unsigned Imm)
getSOImmValRotate - Try to handle Imm with an immediate shifter operand, computing the rotate amount ...
bool isThumbImmShiftedVal(unsigned V)
isThumbImmShiftedVal - Return true if the specified value can be obtained by left shifting a 8-bit im...
int getT2SOImmVal(unsigned Arg)
getT2SOImmVal - Given a 32-bit immediate, if it is something that can fit into a Thumb-2 shifter_oper...
ShiftOpc getSORegShOp(unsigned Op)
AddrOpc getAM5Op(unsigned AM5Opc)
bool isSOImmTwoPartValNeg(unsigned V)
isSOImmTwoPartValNeg - Return true if the specified value can be obtained by two SOImmVal,...
unsigned getSOImmTwoPartSecond(unsigned V)
getSOImmTwoPartSecond - If V is a value that satisfies isSOImmTwoPartVal, return the second chunk of ...
bool isSOImmTwoPartVal(unsigned V)
isSOImmTwoPartVal - Return true if the specified value can be obtained by or'ing together two SOImmVa...
AddrOpc getAM5FP16Op(unsigned AM5Opc)
unsigned getT2SOImmTwoPartSecond(unsigned Imm)
unsigned getT2SOImmTwoPartFirst(unsigned Imm)
bool isT2SOImmTwoPartVal(unsigned Imm)
unsigned char getAM5Offset(unsigned AM5Opc)
unsigned getSOImmTwoPartFirst(unsigned V)
getSOImmTwoPartFirst - If V is a value that satisfies isSOImmTwoPartVal, return the first chunk of it...
AddrOpc getAM2Op(unsigned AM2Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
Define some predicates that are used for node matching.
@ C
The default llvm calling convention, compatible with C.
InstrType
Represents how an instruction should be mapped by the outliner.
NodeAddr< NodeBase * > Node
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
constexpr T rotr(T V, int R)
static bool isIndirectCall(const MachineInstr &MI)
MachineInstr * findCMPToFoldIntoCBZ(MachineInstr *Br, const TargetRegisterInfo *TRI)
Search backwards from a tBcc to find a tCMPi8 against 0, meaning we can convert them to a tCBZ or tCB...
static bool isCondBranchOpcode(int Opc)
bool HasLowerConstantMaterializationCost(unsigned Val1, unsigned Val2, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns true if Val1 has a lower Constant Materialization Cost than Val2.
static bool isPushOpcode(int Opc)
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
void addPredicatedMveVpredNOp(MachineInstrBuilder &MIB, unsigned Cond)
static bool isVCTP(const MachineInstr *MI)
RegState
Flags to represent properties of register accesses.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
@ Define
Register definition.
bool IsCPSRDead< MachineInstr >(const MachineInstr *MI)
constexpr RegState getKillRegState(bool B)
unsigned getBLXpredOpcode(const MachineFunction &MF)
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
static bool isIndirectBranchOpcode(int Opc)
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
void append_range(Container &C, Range &&R)
Wrapper function to append range R to container C.
bool registerDefinedBetween(unsigned Reg, MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI)
Return true if Reg is defd between From and To.
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
static bool isSEHInstruction(const MachineInstr &MI)
static bool isCalleeSavedRegister(MCRegister Reg, const MCPhysReg *CSRegs)
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
bool tryFoldSPUpdateIntoPushPop(const ARMSubtarget &Subtarget, MachineFunction &MF, MachineInstr *MI, unsigned NumBytes)
Tries to add registers to the reglist of a given base-updating push/pop instruction to adjust the sta...
auto reverse(ContainerTy &&C)
static bool isJumpTableBranchOpcode(int Opc)
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
void sort(IteratorTy Start, IteratorTy End)
LLVM_ABI raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
static bool isPopOpcode(int Opc)
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
void addPredicatedMveVpredROp(MachineInstrBuilder &MIB, unsigned Cond, unsigned Inactive)
auto drop_end(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the last N elements excluded.
constexpr T divideCeil(U Numerator, V Denominator)
Returns the integer ceil(Numerator / Denominator).
void addUnpredicatedMveVpredROp(MachineInstrBuilder &MIB, Register DestReg)
unsigned ConstantMaterializationCost(unsigned Val, const ARMSubtarget *Subtarget, bool ForCodesize=false)
Returns the number of instructions required to materialize the given constant in a register,...
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
bool rewriteARMFrameIndex(MachineInstr &MI, unsigned FrameRegIdx, Register FrameReg, int &Offset, const ARMBaseInstrInfo &TII)
rewriteARMFrameIndex / rewriteT2FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
static bool isIndirectControlFlowNotComingBack(const MachineInstr &MI)
uint64_t alignTo(uint64_t Size, Align A)
Returns a multiple of A needed to store Size bytes.
FunctionAddr VTableAddr Next
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
unsigned getMatchingCondBranchOpcode(unsigned Opc)
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static bool isUncondBranchOpcode(int Opc)
auto partition(R &&Range, UnaryPredicate P)
Provide wrappers to std::partition which take ranges instead of having to pass begin/end explicitly.
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
static const char * ARMCondCodeToString(ARMCC::CondCodes CC)
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
unsigned gettBLXrOpcode(const MachineFunction &MF)
static bool isSpeculationBarrierEndBBOpcode(int Opc)
unsigned getBLXOpcode(const MachineFunction &MF)
void addUnpredicatedMveVpredNOp(MachineInstrBuilder &MIB)
bool isV8EligibleForIT(const InstrType *Instr)
void emitARMRegPlusImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, const DebugLoc &dl, Register DestReg, Register BaseReg, int NumBytes, ARMCC::CondCodes Pred, Register PredReg, const ARMBaseInstrInfo &TII, unsigned MIFlags=0)
emitARMRegPlusImmediate / emitT2RegPlusImmediate - Emits a series of instructions to materializea des...
constexpr RegState getUndefRegState(bool B)
unsigned convertAddSubFlagsOpcode(unsigned OldOpc)
Map pseudo instructions that imply an 'S' bit onto real opcodes.
ARM_MLxEntry - Record information about MLA / MLS instructions.
Map pseudo instructions that imply an 'S' bit onto real opcodes.
OutlinerCosts(const ARMSubtarget &target)
This struct is a compact representation of a valid (non-zero power of two) alignment.
constexpr uint64_t value() const
This is a hole in the type system and should not be abused.
static constexpr LaneBitmask getAll()
static constexpr LaneBitmask getNone()
static LLVM_ABI MachinePointerInfo getGOT(MachineFunction &MF)
Return a MachinePointerInfo record that refers to a GOT entry.
static LLVM_ABI MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Used to describe a register and immediate addition.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
An individual sequence of instructions to be replaced with a call to an outlined function.
The information necessary to create an outlined function for some class of candidate.