74#define DEBUG_TYPE "arm-ldst-opt"
76STATISTIC(NumLDMGened ,
"Number of ldm instructions generated");
77STATISTIC(NumSTMGened ,
"Number of stm instructions generated");
78STATISTIC(NumVLDMGened,
"Number of vldm instructions generated");
79STATISTIC(NumVSTMGened,
"Number of vstm instructions generated");
80STATISTIC(NumLdStMoved,
"Number of load / store instructions moved");
81STATISTIC(NumLDRDFormed,
"Number of ldrd created before allocation");
82STATISTIC(NumSTRDFormed,
"Number of strd created before allocation");
83STATISTIC(NumLDRD2LDM,
"Number of ldrd instructions turned back into ldm");
84STATISTIC(NumSTRD2STM,
"Number of strd instructions turned back into stm");
85STATISTIC(NumLDRD2LDR,
"Number of ldrd instructions turned back into ldr's");
86STATISTIC(NumSTRD2STR,
"Number of strd instructions turned back into str's");
97#define ARM_LOAD_STORE_OPT_NAME "ARM load / store optimization pass"
116 bool RegClassInfoValid;
117 bool isThumb1, isThumb2;
125 MachineFunctionProperties::Property::NoVRegs);
133 struct MemOpQueueEntry {
139 :
MI(&
MI), Offset(Offset), Position(Position) {}
145 struct MergeCandidate {
150 unsigned LatestMIIdx;
153 unsigned EarliestMIIdx;
160 bool CanMergeToLSMulti;
163 bool CanMergeToLSDouble;
174 unsigned Base,
unsigned WordOffset,
178 int Offset,
unsigned Base,
bool BaseKill,
unsigned Opcode,
180 ArrayRef<std::pair<unsigned, bool>> Regs,
184 int Offset,
unsigned Base,
bool BaseKill,
unsigned Opcode,
186 ArrayRef<std::pair<unsigned, bool>> Regs,
188 void FormCandidates(
const MemOpQueue &MemOps);
189 MachineInstr *MergeOpsUpdate(
const MergeCandidate &Cand);
202char ARMLoadStoreOpt::ID = 0;
208 for (
const auto &MO :
MI.operands()) {
211 if (MO.isDef() && MO.getReg() == ARM::CPSR && !MO.isDead())
221 unsigned Opcode =
MI.getOpcode();
222 bool isAM3 = Opcode == ARM::LDRD || Opcode == ARM::STRD;
223 unsigned NumOperands =
MI.getDesc().getNumOperands();
224 unsigned OffField =
MI.getOperand(NumOperands - 3).getImm();
226 if (Opcode == ARM::t2LDRi12 || Opcode == ARM::t2LDRi8 ||
227 Opcode == ARM::t2STRi12 || Opcode == ARM::t2STRi8 ||
228 Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8 ||
229 Opcode == ARM::LDRi12 || Opcode == ARM::STRi12)
233 if (Opcode == ARM::tLDRi || Opcode == ARM::tSTRi ||
234 Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi)
249 return MI.getOperand(1);
253 return MI.getOperand(0);
350 case ARM::tLDMIA_UPD:
351 case ARM::tSTMIA_UPD:
352 case ARM::t2LDMIA_RET:
354 case ARM::t2LDMIA_UPD:
356 case ARM::t2STMIA_UPD:
358 case ARM::VLDMSIA_UPD:
360 case ARM::VSTMSIA_UPD:
362 case ARM::VLDMDIA_UPD:
364 case ARM::VSTMDIA_UPD:
378 case ARM::t2LDMDB_UPD:
380 case ARM::t2STMDB_UPD:
381 case ARM::VLDMSDB_UPD:
382 case ARM::VSTMSDB_UPD:
383 case ARM::VLDMDDB_UPD:
384 case ARM::VSTMDDB_UPD:
396 return Opc == ARM::tLDRi || Opc == ARM::tLDRspi;
400 return Opc == ARM::t2LDRi12 || Opc == ARM::t2LDRi8;
408 return Opc == ARM::tSTRi || Opc == ARM::tSTRspi;
412 return Opc == ARM::t2STRi12 || Opc == ARM::t2STRi8;
420 return isi32Load(Opc) || Opc == ARM::VLDRS || Opc == ARM::VLDRD;
441 switch (
MI->getOpcode()) {
468 case ARM::tLDMIA_UPD:
469 case ARM::tSTMIA_UPD:
476 return (
MI->getNumOperands() -
MI->getDesc().getNumOperands() + 1) * 4;
479 return (
MI->getNumOperands() -
MI->getDesc().getNumOperands() + 1) * 8;
491 assert(isThumb1 &&
"Can only update base register uses for Thumb1!");
495 bool InsertSub =
false;
496 unsigned Opc =
MBBI->getOpcode();
501 Opc == ARM::tLDRi || Opc == ARM::tLDRHi || Opc == ARM::tLDRBi;
503 Opc == ARM::tSTRi || Opc == ARM::tSTRHi || Opc == ARM::tSTRBi;
505 if (IsLoad || IsStore) {
511 MBBI->getOperand(
MBBI->getDesc().getNumOperands() - 3);
518 if (
Offset >= 0 && !(IsStore && InstrSrcReg ==
Base))
522 }
else if ((Opc == ARM::tSUBi8 || Opc == ARM::tADDi8) &&
523 !definesCPSR(*
MBBI)) {
528 MBBI->getOperand(
MBBI->getDesc().getNumOperands() - 3);
529 Offset = (Opc == ARM::tSUBi8) ?
530 MO.
getImm() + WordOffset * 4 :
531 MO.
getImm() - WordOffset * 4 ;
545 }
else if (definesCPSR(*
MBBI) ||
MBBI->isCall() ||
MBBI->isBranch()) {
586 if (!RegClassInfoValid) {
587 RegClassInfo.runOnMachineFunction(*MF);
588 RegClassInfoValid =
true;
591 for (
unsigned Reg : RegClassInfo.getOrder(&RegClass))
592 if (LiveRegs.available(Reg) && !MF->getRegInfo().isReserved(Reg))
603 if (!LiveRegsValid) {
605 LiveRegs.addLiveOuts(
MBB);
607 LiveRegsValid =
true;
610 while (LiveRegPos !=
Before) {
612 LiveRegs.stepBackward(*LiveRegPos);
618 for (
const std::pair<unsigned, bool> &R : Regs)
629 int Offset,
unsigned Base,
bool BaseKill,
unsigned Opcode,
631 ArrayRef<std::pair<unsigned, bool>> Regs,
633 unsigned NumRegs = Regs.size();
638 bool SafeToClobberCPSR = !isThumb1 ||
642 bool Writeback = isThumb1;
648 assert(
Base != ARM::SP &&
"Thumb1 does not allow SP in register list");
649 if (Opcode == ARM::tLDRi)
651 else if (Opcode == ARM::tSTRi)
658 bool haveIBAndDA = isNotVFP && !isThumb2 && !isThumb1;
660 if (
Offset == 4 && haveIBAndDA) {
662 }
else if (
Offset == -4 * (
int)NumRegs + 4 && haveIBAndDA) {
664 }
else if (
Offset == -4 * (
int)NumRegs && isNotVFP && !isThumb1) {
667 }
else if (
Offset != 0 || Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
680 if (!SafeToClobberCPSR)
687 NewBase = Regs[NumRegs-1].first;
691 moveLiveRegsBefore(
MBB, InsertBefore);
695 for (
const std::pair<unsigned, bool> &R : Regs)
696 LiveRegs.addReg(
R.first);
698 NewBase = findFreeReg(isThumb1 ? ARM::tGPRRegClass : ARM::GPRRegClass);
703 int BaseOpc = isThumb2 ? (BaseKill &&
Base == ARM::SP ? ARM::t2ADDspImm
705 : (isThumb1 &&
Base == ARM::SP)
707 : (isThumb1 &&
Offset < 8)
709 : isThumb1 ? ARM::tADDi8 : ARM::ADDri;
715 BaseOpc = isThumb2 ? (BaseKill &&
Base == ARM::SP ? ARM::t2SUBspImm
719 : isThumb1 ? ARM::tSUBi8 : ARM::SUBri;
722 if (!TL->isLegalAddImmediate(
Offset))
728 bool KillOldBase = BaseKill &&
737 if (
Base != NewBase &&
738 (BaseOpc == ARM::tADDi8 || BaseOpc == ARM::tSUBi8)) {
756 if (BaseOpc == ARM::tADDrSPi) {
757 assert(
Offset % 4 == 0 &&
"tADDrSPi offset is scaled by 4");
795 if (isThumb1 && !SafeToClobberCPSR && Writeback && !BaseKill)
801 assert(isThumb1 &&
"expected Writeback only inThumb1");
802 if (Opcode == ARM::tLDMIA) {
805 Opcode = ARM::tLDMIA_UPD;
817 UpdateBaseRegUses(
MBB, InsertBefore,
DL,
Base, NumRegs, Pred, PredReg);
826 for (
const std::pair<unsigned, bool> &R : Regs)
836 int Offset,
unsigned Base,
bool BaseKill,
unsigned Opcode,
838 ArrayRef<std::pair<unsigned, bool>> Regs,
841 assert((IsLoad ||
isi32Store(Opcode)) &&
"Must have integer load or store");
842 unsigned LoadStoreOpcode = IsLoad ? ARM::t2LDRDi8 : ARM::t2STRDi8;
846 TII->get(LoadStoreOpcode));
860MachineInstr *ARMLoadStoreOpt::MergeOpsUpdate(
const MergeCandidate &Cand) {
862 unsigned Opcode =
First->getOpcode();
872 bool IsKill = MO.
isKill();
875 Regs.
push_back(std::make_pair(Reg, IsKill));
891 if (
MI->readsRegister(DefReg))
902 iterator InsertBefore = std::next(iterator(LatestMI));
911 if (Cand.CanMergeToLSDouble)
912 Merged = CreateLoadStoreDouble(
MBB, InsertBefore,
Offset,
Base, BaseKill,
913 Opcode, Pred, PredReg,
DL, Regs,
915 if (!Merged && Cand.CanMergeToLSMulti)
916 Merged = CreateLoadStoreMulti(
MBB, InsertBefore,
Offset,
Base, BaseKill,
917 Opcode, Pred, PredReg,
DL, Regs, Cand.Instrs);
923 iterator EarliestI(Cand.Instrs[Cand.EarliestMIIdx]);
924 bool EarliestAtBegin =
false;
926 EarliestAtBegin =
true;
928 EarliestI = std::prev(EarliestI);
939 EarliestI = std::next(EarliestI);
940 auto FixupRange =
make_range(EarliestI, iterator(Merged));
946 for (
unsigned &ImpDefReg : ImpDefs) {
959 for (
unsigned ImpDef : ImpDefs)
991 unsigned Opcode =
MI.getOpcode();
1004void ARMLoadStoreOpt::FormCandidates(
const MemOpQueue &MemOps) {
1010 unsigned SIndex = 0;
1011 unsigned EIndex = MemOps.size();
1015 int Offset = MemOps[SIndex].Offset;
1018 unsigned PRegNum = PMO.
isUndef() ? std::numeric_limits<unsigned>::max()
1019 :
TRI->getEncodingValue(PReg);
1020 unsigned Latest = SIndex;
1021 unsigned Earliest = SIndex;
1023 bool CanMergeToLSDouble =
1027 if (STI->isCortexM3() &&
isi32Load(Opcode) &&
1029 CanMergeToLSDouble =
false;
1031 bool CanMergeToLSMulti =
true;
1034 if (STI->hasSlowOddRegister() && !isNotVFP && (PRegNum % 2) == 1)
1035 CanMergeToLSMulti =
false;
1039 if (PReg == ARM::SP || PReg == ARM::PC)
1040 CanMergeToLSMulti = CanMergeToLSDouble =
false;
1044 CanMergeToLSMulti = CanMergeToLSDouble =
false;
1059 for (
unsigned I = SIndex+1;
I < EIndex; ++
I, ++Count) {
1060 int NewOffset = MemOps[
I].Offset;
1065 if (Reg == ARM::SP || Reg == ARM::PC)
1071 unsigned RegNum = MO.
isUndef() ? std::numeric_limits<unsigned>::max()
1072 :
TRI->getEncodingValue(
Reg);
1073 bool PartOfLSMulti = CanMergeToLSMulti;
1074 if (PartOfLSMulti) {
1076 if (RegNum <= PRegNum)
1077 PartOfLSMulti =
false;
1081 else if (!isNotVFP && RegNum != PRegNum+1)
1082 PartOfLSMulti =
false;
1085 bool PartOfLSDouble = CanMergeToLSDouble && Count <= 1;
1087 if (!PartOfLSMulti && !PartOfLSDouble)
1089 CanMergeToLSMulti &= PartOfLSMulti;
1090 CanMergeToLSDouble &= PartOfLSDouble;
1093 unsigned Position = MemOps[
I].Position;
1094 if (Position < MemOps[Latest].Position)
1096 else if (Position > MemOps[Earliest].Position)
1104 MergeCandidate *Candidate =
new(
Allocator.Allocate()) MergeCandidate;
1105 for (
unsigned C = SIndex, CE = SIndex + Count;
C <
CE; ++
C)
1106 Candidate->Instrs.push_back(MemOps[
C].MI);
1107 Candidate->LatestMIIdx = Latest - SIndex;
1108 Candidate->EarliestMIIdx = Earliest - SIndex;
1109 Candidate->InsertPos = MemOps[Latest].Position;
1111 CanMergeToLSMulti = CanMergeToLSDouble =
false;
1112 Candidate->CanMergeToLSMulti = CanMergeToLSMulti;
1113 Candidate->CanMergeToLSDouble = CanMergeToLSDouble;
1114 Candidates.push_back(Candidate);
1117 }
while (SIndex < EIndex);
1194 switch (
MI.getOpcode()) {
1195 case ARM::tADDi8: Scale = 4; CheckCPSRDef =
true;
break;
1196 case ARM::tSUBi8: Scale = -4; CheckCPSRDef =
true;
break;
1198 case ARM::t2SUBspImm:
1199 case ARM::SUBri: Scale = -1; CheckCPSRDef =
true;
break;
1201 case ARM::t2ADDspImm:
1202 case ARM::ADDri: Scale = 1; CheckCPSRDef =
true;
break;
1203 case ARM::tADDspi: Scale = 4; CheckCPSRDef =
false;
break;
1204 case ARM::tSUBspi: Scale = -4; CheckCPSRDef =
false;
break;
1209 if (
MI.getOperand(0).getReg() != Reg ||
1210 MI.getOperand(1).getReg() != Reg ||
1212 MIPredReg != PredReg)
1215 if (CheckCPSRDef && definesCPSR(
MI))
1217 return MI.getOperand(2).getImm() * Scale;
1228 if (
MBBI == BeginMBBI)
1233 while (PrevMBBI->isDebugInstr() && PrevMBBI != BeginMBBI)
1237 return Offset == 0 ? EndMBBI : PrevMBBI;
1249 while (NextMBBI != EndMBBI) {
1251 while (NextMBBI != EndMBBI && NextMBBI->isDebugInstr())
1253 if (NextMBBI == EndMBBI)
1267 if (Reg == ARM::SP || NextMBBI->readsRegister(Reg,
TRI) ||
1268 NextMBBI->definesRegister(Reg,
TRI))
1288bool ARMLoadStoreOpt::MergeBaseUpdateLSMultiple(
MachineInstr *
MI) {
1290 if (isThumb1)
return false;
1295 bool BaseKill = BaseOP.
isKill();
1298 unsigned Opcode =
MI->getOpcode();
1327 if (!STI->hasMinSize() || !BaseKill)
1330 bool HighRegsUsed =
false;
1332 if (MO.
getReg() >= ARM::R8) {
1333 HighRegsUsed =
true;
1343 if (MergeInstr !=
MBB.
end()) {
1370 return ARM::LDR_PRE_IMM;
1372 return ARM::STR_PRE_IMM;
1374 return Mode ==
ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
1376 return Mode ==
ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
1378 return Mode ==
ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
1380 return Mode ==
ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
1383 return ARM::t2LDR_PRE;
1386 return ARM::t2STR_PRE;
1395 return ARM::LDR_POST_IMM;
1397 return ARM::STR_POST_IMM;
1399 return Mode ==
ARM_AM::add ? ARM::VLDMSIA_UPD : ARM::VLDMSDB_UPD;
1401 return Mode ==
ARM_AM::add ? ARM::VLDMDIA_UPD : ARM::VLDMDDB_UPD;
1403 return Mode ==
ARM_AM::add ? ARM::VSTMSIA_UPD : ARM::VSTMSDB_UPD;
1405 return Mode ==
ARM_AM::add ? ARM::VSTMDIA_UPD : ARM::VSTMDDB_UPD;
1408 return ARM::t2LDR_POST;
1410 case ARM::t2LDRBi12:
1411 return ARM::t2LDRB_POST;
1412 case ARM::t2LDRSBi8:
1413 case ARM::t2LDRSBi12:
1414 return ARM::t2LDRSB_POST;
1416 case ARM::t2LDRHi12:
1417 return ARM::t2LDRH_POST;
1418 case ARM::t2LDRSHi8:
1419 case ARM::t2LDRSHi12:
1420 return ARM::t2LDRSH_POST;
1423 return ARM::t2STR_POST;
1425 case ARM::t2STRBi12:
1426 return ARM::t2STRB_POST;
1428 case ARM::t2STRHi12:
1429 return ARM::t2STRH_POST;
1431 case ARM::MVE_VLDRBS16:
1432 return ARM::MVE_VLDRBS16_post;
1433 case ARM::MVE_VLDRBS32:
1434 return ARM::MVE_VLDRBS32_post;
1435 case ARM::MVE_VLDRBU16:
1436 return ARM::MVE_VLDRBU16_post;
1437 case ARM::MVE_VLDRBU32:
1438 return ARM::MVE_VLDRBU32_post;
1439 case ARM::MVE_VLDRHS32:
1440 return ARM::MVE_VLDRHS32_post;
1441 case ARM::MVE_VLDRHU32:
1442 return ARM::MVE_VLDRHU32_post;
1443 case ARM::MVE_VLDRBU8:
1444 return ARM::MVE_VLDRBU8_post;
1445 case ARM::MVE_VLDRHU16:
1446 return ARM::MVE_VLDRHU16_post;
1447 case ARM::MVE_VLDRWU32:
1448 return ARM::MVE_VLDRWU32_post;
1449 case ARM::MVE_VSTRB16:
1450 return ARM::MVE_VSTRB16_post;
1451 case ARM::MVE_VSTRB32:
1452 return ARM::MVE_VSTRB32_post;
1453 case ARM::MVE_VSTRH32:
1454 return ARM::MVE_VSTRH32_post;
1455 case ARM::MVE_VSTRBU8:
1456 return ARM::MVE_VSTRBU8_post;
1457 case ARM::MVE_VSTRHU16:
1458 return ARM::MVE_VSTRHU16_post;
1459 case ARM::MVE_VSTRWU32:
1460 return ARM::MVE_VSTRWU32_post;
1468bool ARMLoadStoreOpt::MergeBaseUpdateLoadStore(
MachineInstr *
MI) {
1471 if (isThumb1)
return false;
1476 unsigned Opcode =
MI->getOpcode();
1478 bool isAM5 = (Opcode == ARM::VLDRD || Opcode == ARM::VLDRS ||
1479 Opcode == ARM::VSTRD || Opcode == ARM::VSTRS);
1480 bool isAM2 = (Opcode == ARM::LDRi12 || Opcode == ARM::STRi12);
1482 if (
MI->getOperand(2).getImm() != 0)
1489 if (
MI->getOperand(0).getReg() ==
Base)
1501 if (!isAM5 &&
Offset == Bytes) {
1503 }
else if (
Offset == -Bytes) {
1507 if (MergeInstr ==
MBB.
end())
1511 if ((isAM5 &&
Offset != Bytes) ||
1543 if (NewOpc == ARM::LDR_PRE_IMM || NewOpc == ARM::LDRB_PRE_IMM) {
1584 if (isAM2 && NewOpc == ARM::STR_POST_IMM) {
1613bool ARMLoadStoreOpt::MergeBaseUpdateLSDouble(
MachineInstr &
MI)
const {
1614 unsigned Opcode =
MI.getOpcode();
1615 assert((Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8) &&
1616 "Must have t2STRDi8 or t2LDRDi8");
1617 if (
MI.getOperand(3).getImm() != 0)
1639 NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_PRE : ARM::t2STRD_PRE;
1642 if (MergeInstr ==
MBB.
end())
1644 NewOpc = Opcode == ARM::t2LDRDi8 ? ARM::t2LDRD_POST : ARM::t2STRD_POST;
1653 if (NewOpc == ARM::t2LDRD_PRE || NewOpc == ARM::t2LDRD_POST) {
1656 assert(NewOpc == ARM::t2STRD_PRE || NewOpc == ARM::t2STRD_POST);
1661 assert(
TII->get(Opcode).getNumOperands() == 6 &&
1662 TII->get(NewOpc).getNumOperands() == 7 &&
1663 "Unexpected number of operands in Opcode specification.");
1678 unsigned Opcode =
MI.getOpcode();
1698 if (!
MI.getOperand(1).isReg())
1703 if (!
MI.hasOneMemOperand())
1722 if (
MI.getOperand(0).isReg() &&
MI.getOperand(0).isUndef())
1726 if (
MI.getOperand(1).isUndef())
1734 bool isDef,
unsigned NewOpc,
unsigned Reg,
1735 bool RegDeadKill,
bool RegUndef,
unsigned BaseReg,
1763 unsigned Opcode =
MI->getOpcode();
1766 if (Opcode != ARM::LDRD && Opcode != ARM::STRD && Opcode != ARM::t2LDRDi8)
1771 Register EvenReg =
MI->getOperand(0).getReg();
1772 Register OddReg =
MI->getOperand(1).getReg();
1773 unsigned EvenRegNum =
TRI->getDwarfRegNum(EvenReg,
false);
1774 unsigned OddRegNum =
TRI->getDwarfRegNum(OddReg,
false);
1778 bool Errata602117 = EvenReg == BaseReg &&
1779 (Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8) && STI->isCortexM3();
1781 bool NonConsecutiveRegs = (Opcode == ARM::LDRD || Opcode == ARM::STRD) &&
1782 (EvenRegNum % 2 != 0 || EvenRegNum + 1 != OddRegNum);
1784 if (!Errata602117 && !NonConsecutiveRegs)
1787 bool isT2 = Opcode == ARM::t2LDRDi8 || Opcode == ARM::t2STRDi8;
1788 bool isLd = Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8;
1789 bool EvenDeadKill = isLd ?
1790 MI->getOperand(0).isDead() :
MI->getOperand(0).isKill();
1791 bool EvenUndef =
MI->getOperand(0).isUndef();
1792 bool OddDeadKill = isLd ?
1793 MI->getOperand(1).isDead() :
MI->getOperand(1).isKill();
1794 bool OddUndef =
MI->getOperand(1).isUndef();
1795 bool BaseKill = BaseOp.
isKill();
1796 bool BaseUndef = BaseOp.
isUndef();
1797 assert((isT2 ||
MI->getOperand(3).getReg() == ARM::NoRegister) &&
1798 "register offset not handled below");
1803 if (OddRegNum > EvenRegNum && OffImm == 0) {
1806 unsigned NewOpc = (isLd)
1807 ? (isT2 ? ARM::t2LDMIA : ARM::LDMIA)
1808 : (isT2 ? ARM::t2STMIA : ARM::STMIA);
1830 unsigned NewOpc = (isLd)
1831 ? (isT2 ? (OffImm < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
1832 : (isT2 ? (OffImm < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
1835 unsigned NewOpc2 = (isLd)
1836 ? (isT2 ? (OffImm+4 < 0 ? ARM::t2LDRi8 : ARM::t2LDRi12) : ARM::LDRi12)
1837 : (isT2 ? (OffImm+4 < 0 ? ARM::t2STRi8 : ARM::t2STRi12) : ARM::STRi12);
1840 if (isLd &&
TRI->regsOverlap(EvenReg, BaseReg)) {
1841 assert(!
TRI->regsOverlap(OddReg, BaseReg));
1843 false, BaseReg,
false, BaseUndef, Pred, PredReg,
TII,
MI);
1845 false, BaseReg, BaseKill, BaseUndef, Pred, PredReg,
TII,
1848 if (OddReg == EvenReg && EvenDeadKill) {
1852 EvenDeadKill =
false;
1856 if (EvenReg == BaseReg)
1857 EvenDeadKill =
false;
1859 EvenUndef, BaseReg,
false, BaseUndef, Pred, PredReg,
TII,
1862 OddUndef, BaseReg, BaseKill, BaseUndef, Pred, PredReg,
TII,
1879 unsigned CurrBase = 0;
1880 unsigned CurrOpc = ~0
u;
1882 unsigned Position = 0;
1883 assert(Candidates.size() == 0);
1884 assert(MergeBaseCandidates.size() == 0);
1885 LiveRegsValid =
false;
1890 MBBI = std::prev(
I);
1891 if (FixInvalidRegPairOp(
MBB,
MBBI))
1896 unsigned Opcode =
MBBI->getOpcode();
1903 if (CurrBase == 0) {
1908 MemOps.push_back(MemOpQueueEntry(*
MBBI,
Offset, Position));
1912 if (CurrOpc == Opcode && CurrBase ==
Base && CurrPred == Pred) {
1920 bool Overlap =
false;
1924 for (
const MemOpQueueEntry &E : MemOps) {
1925 if (
TRI->regsOverlap(Reg, E.MI->getOperand(0).getReg())) {
1935 if (
Offset > MemOps.back().Offset) {
1936 MemOps.push_back(MemOpQueueEntry(*
MBBI,
Offset, Position));
1939 MemOpQueue::iterator
MI, ME;
1940 for (
MI = MemOps.begin(), ME = MemOps.end();
MI != ME; ++
MI) {
1941 if (Offset < MI->
Offset) {
1951 if (
MI != MemOps.end()) {
1952 MemOps.insert(
MI, MemOpQueueEntry(*
MBBI,
Offset, Position));
1963 }
else if (
MBBI->isDebugInstr()) {
1965 }
else if (
MBBI->getOpcode() == ARM::t2LDRDi8 ||
1966 MBBI->getOpcode() == ARM::t2STRDi8) {
1969 MergeBaseCandidates.push_back(&*
MBBI);
1973 if (MemOps.size() > 0) {
1974 FormCandidates(MemOps);
1982 if (MemOps.size() > 0)
1983 FormCandidates(MemOps);
1987 auto LessThan = [](
const MergeCandidate*
M0,
const MergeCandidate *
M1) {
1988 return M0->InsertPos <
M1->InsertPos;
1993 bool Changed =
false;
1994 for (
const MergeCandidate *Candidate : Candidates) {
1995 if (Candidate->CanMergeToLSMulti || Candidate->CanMergeToLSDouble) {
2001 if (Opcode == ARM::t2STRDi8 || Opcode == ARM::t2LDRDi8)
2002 MergeBaseUpdateLSDouble(*Merged);
2004 MergeBaseUpdateLSMultiple(Merged);
2007 if (MergeBaseUpdateLoadStore(
MI))
2012 assert(Candidate->Instrs.size() == 1);
2013 if (MergeBaseUpdateLoadStore(Candidate->Instrs.front()))
2020 MergeBaseUpdateLSDouble(*
MI);
2021 MergeBaseCandidates.clear();
2038 if (isThumb1)
return false;
2043 (
MBBI->getOpcode() == ARM::BX_RET ||
2044 MBBI->getOpcode() == ARM::tBX_RET ||
2045 MBBI->getOpcode() == ARM::MOVPCLR)) {
2048 while (PrevI->isDebugInstr() && PrevI !=
MBB.
begin())
2052 if (Opcode == ARM::LDMIA_UPD || Opcode == ARM::LDMDA_UPD ||
2053 Opcode == ARM::LDMDB_UPD || Opcode == ARM::LDMIB_UPD ||
2054 Opcode == ARM::t2LDMIA_UPD || Opcode == ARM::t2LDMDB_UPD) {
2056 if (MO.
getReg() != ARM::LR)
2058 unsigned NewOpc = (isThumb2 ? ARM::t2LDMIA_RET : ARM::LDMIA_RET);
2059 assert(((isThumb2 && Opcode == ARM::t2LDMIA_UPD) ||
2060 Opcode == ARM::LDMIA_UPD) &&
"Unsupported multiple load-return!");
2074 MBBI->getOpcode() != ARM::tBX_RET)
2079 if (Prev->getOpcode() != ARM::tMOVr || !Prev->definesRegister(ARM::LR))
2082 for (
auto Use : Prev->uses())
2084 assert(STI->hasV4TOps());
2105 TII = STI->getInstrInfo();
2106 TRI = STI->getRegisterInfo();
2108 RegClassInfoValid =
false;
2109 isThumb2 = AFI->isThumb2Function();
2110 isThumb1 = AFI->isThumbFunction() && !isThumb2;
2112 bool Modified =
false, ModifiedLDMReturn =
false;
2115 if (STI->hasV5TOps() && !AFI->shouldSignReturnAddress())
2116 ModifiedLDMReturn |= MergeReturnIntoLDM(
MBB);
2126 if (ModifiedLDMReturn)
2133#define ARM_PREALLOC_LOAD_STORE_OPT_NAME \
2134 "ARM pre- register allocation load / store optimization pass"
2177 bool DistributeIncrements();
2183char ARMPreAllocLoadStoreOpt::ID = 0;
2194 cl::init(8), cl::Hidden);
2196bool ARMPreAllocLoadStoreOpt::runOnMachineFunction(
MachineFunction &Fn) {
2200 TD = &Fn.getDataLayout();
2202 TII = STI->getInstrInfo();
2203 TRI = STI->getRegisterInfo();
2204 MRI = &Fn.getRegInfo();
2205 DT = &getAnalysis<MachineDominatorTree>();
2207 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
2209 bool Modified = DistributeIncrements();
2211 Modified |= RescheduleLoadStoreInstrs(&MFI);
2226 if (
I->isDebugInstr() || MemOps.
count(&*
I))
2228 if (
I->isCall() ||
I->isTerminator() ||
I->hasUnmodeledSideEffects())
2230 if (
I->mayStore() || (!isLd &&
I->mayLoad()))
2232 if (
I->mayAlias(AA, *
MemOp,
false))
2234 for (
unsigned j = 0, NumOps =
I->getNumOperands(); j != NumOps; ++j) {
2242 AddedRegPressure.
insert(Reg);
2247 if (MemRegs.
size() <= 4)
2250 return AddedRegPressure.
size() <= MemRegs.
size() * 2;
2253bool ARMPreAllocLoadStoreOpt::CanFormLdStDWord(
2258 if (!STI->hasV5TEOps())
2264 if (Opcode == ARM::LDRi12) {
2266 }
else if (Opcode == ARM::STRi12) {
2268 }
else if (Opcode == ARM::t2LDRi8 || Opcode == ARM::t2LDRi12) {
2269 NewOpc = ARM::t2LDRDi8;
2272 }
else if (Opcode == ARM::t2STRi8 || Opcode == ARM::t2STRi12) {
2273 NewOpc = ARM::t2STRDi8;
2289 Align ReqAlign = STI->getDualLoadStoreAlignment();
2290 if (Alignment < ReqAlign)
2296 int Limit = (1 << 8) * Scale;
2297 if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1)))
2306 int Limit = (1 << 8) * Scale;
2307 if (OffImm >= Limit || (OffImm & (Scale-1)))
2313 if (FirstReg == SecondReg)
2321bool ARMPreAllocLoadStoreOpt::RescheduleOps(
2325 bool RetVal =
false;
2331 assert(LHS == RHS || LOffset != ROffset);
2332 return LOffset > ROffset;
2339 while (Ops.
size() > 1) {
2340 unsigned FirstLoc = ~0
U;
2341 unsigned LastLoc = 0;
2345 unsigned LastOpcode = 0;
2346 unsigned LastBytes = 0;
2347 unsigned NumMove = 0;
2352 if (LastOpcode && LSMOpcode != LastOpcode)
2359 if (Bytes != LastBytes ||
Offset != (LastOffset + (
int)Bytes))
2371 LastOpcode = LSMOpcode;
2373 unsigned Loc = MI2LocMap[
Op];
2374 if (Loc <= FirstLoc) {
2378 if (Loc >= LastLoc) {
2389 for (
size_t i = Ops.
size() - NumMove, e = Ops.
size(); i != e; ++i) {
2396 bool DoMove = (LastLoc - FirstLoc) <= NumMove*4;
2399 MemOps, MemRegs,
TRI, AA);
2401 for (
unsigned i = 0; i != NumMove; ++i)
2406 while (InsertPos !=
MBB->
end() &&
2407 (MemOps.
count(&*InsertPos) || InsertPos->isDebugInstr()))
2418 unsigned NewOpc = 0;
2421 if (NumMove == 2 && CanFormLdStDWord(Op0, Op1, dl, NewOpc,
2422 FirstReg, SecondReg, BaseReg,
2423 Offset, PredReg, Pred, isT2)) {
2429 MRI->constrainRegClass(FirstReg, TRC);
2430 MRI->constrainRegClass(SecondReg, TRC);
2471 for (
unsigned i = 0; i != NumMove; ++i) {
2483 NumLdStMoved += NumMove;
2494 if (
MI->isNonListDebugValue()) {
2495 auto &
Op =
MI->getOperand(0);
2499 for (
unsigned I = 2;
I <
MI->getNumOperands();
I++) {
2500 auto &
Op =
MI->getOperand(
I);
2514 auto RegIt = RegisterMap.find(
Op.getReg());
2515 if (RegIt == RegisterMap.end())
2517 auto &InstrVec = RegIt->getSecond();
2518 for (
unsigned I = 0;
I < InstrVec.size();
I++)
2519 if (InstrVec[
I] == InstrToReplace)
2520 InstrVec[
I] = DbgValueListInstr;
2526 MI->getDebugLoc()->getInlinedAt());
2532 bool RetVal =
false;
2538 Base2InstMap Base2LdsMap;
2539 Base2InstMap Base2StsMap;
2553 if (
MI.isCall() ||
MI.isTerminator()) {
2559 if (!
MI.isDebugInstr())
2560 MI2LocMap[&
MI] = ++Loc;
2568 int Opc =
MI.getOpcode();
2572 bool StopHere =
false;
2573 auto FindBases = [&] (Base2InstMap &Base2Ops, BaseVec &Bases) {
2574 MapIt BI = Base2Ops.find(
Base);
2575 if (BI == Base2Ops.end()) {
2576 Base2Ops[
Base].push_back(&
MI);
2577 Bases.push_back(
Base);
2580 for (
unsigned i = 0, e = BI->second.size(); i !=
e; ++i) {
2587 BI->second.push_back(&
MI);
2591 FindBases(Base2LdsMap, LdBases);
2593 FindBases(Base2StsMap, StBases);
2604 for (
unsigned Base : LdBases) {
2607 RetVal |= RescheduleOps(
MBB, Lds,
Base,
true, MI2LocMap, RegisterMap);
2611 for (
unsigned Base : StBases) {
2614 RetVal |= RescheduleOps(
MBB, Sts,
Base,
false, MI2LocMap, RegisterMap);
2618 Base2LdsMap.clear();
2619 Base2StsMap.clear();
2782 auto PopulateRegisterAndInstrMapForDebugInstr = [&](
Register Reg) {
2783 auto RegIt = RegisterMap.
find(Reg);
2784 if (RegIt == RegisterMap.
end())
2786 auto &InstrVec = RegIt->getSecond();
2787 InstrVec.push_back(&
MI);
2788 InstrMap[&
MI].push_back(Reg);
2791 if (
MI.isDebugValue()) {
2793 "DBG_VALUE or DBG_VALUE_LIST must contain a DILocalVariable");
2801 PopulateRegisterAndInstrMapForDebugInstr(
Op.getReg());
2809 auto InstrIt = DbgValueSinkCandidates.
find(DbgVar);
2810 if (InstrIt != DbgValueSinkCandidates.
end()) {
2811 auto *
Instr = InstrIt->getSecond();
2812 auto RegIt = InstrMap.
find(Instr);
2813 if (RegIt != InstrMap.
end()) {
2814 const auto &RegVec = RegIt->getSecond();
2817 for (
auto &Reg : RegVec) {
2818 auto RegIt = RegisterMap.
find(Reg);
2819 if (RegIt == RegisterMap.
end())
2821 auto &InstrVec = RegIt->getSecond();
2824 return Var == DbgVar;
2833 DbgValueSinkCandidates[DbgVar] = &
MI;
2837 auto Opc =
MI.getOpcode();
2840 auto Reg =
MI.getOperand(0).getReg();
2841 auto RegIt = RegisterMap.
find(Reg);
2842 if (RegIt == RegisterMap.
end())
2844 auto &DbgInstrVec = RegIt->getSecond();
2845 if (!DbgInstrVec.size())
2847 for (
auto *DbgInstr : DbgInstrVec) {
2849 auto *ClonedMI =
MI.getMF()->CloneMachineInstr(DbgInstr);
2855 auto DbgIt = DbgValueSinkCandidates.
find(DbgVar);
2859 if (DbgIt != DbgValueSinkCandidates.
end())
2860 DbgValueSinkCandidates.
erase(DbgIt);
2866 if (DbgInstr->isDebugValueList())
2880 switch (
MI.getOpcode()) {
2881 case ARM::MVE_VLDRBS16:
2882 case ARM::MVE_VLDRBS32:
2883 case ARM::MVE_VLDRBU16:
2884 case ARM::MVE_VLDRBU32:
2885 case ARM::MVE_VLDRHS32:
2886 case ARM::MVE_VLDRHU32:
2887 case ARM::MVE_VLDRBU8:
2888 case ARM::MVE_VLDRHU16:
2889 case ARM::MVE_VLDRWU32:
2890 case ARM::MVE_VSTRB16:
2891 case ARM::MVE_VSTRB32:
2892 case ARM::MVE_VSTRH32:
2893 case ARM::MVE_VSTRBU8:
2894 case ARM::MVE_VSTRHU16:
2895 case ARM::MVE_VSTRWU32:
2897 case ARM::t2LDRHi12:
2898 case ARM::t2LDRSHi8:
2899 case ARM::t2LDRSHi12:
2901 case ARM::t2LDRBi12:
2902 case ARM::t2LDRSBi8:
2903 case ARM::t2LDRSBi12:
2905 case ARM::t2STRBi12:
2907 case ARM::t2STRHi12:
2909 case ARM::MVE_VLDRBS16_post:
2910 case ARM::MVE_VLDRBS32_post:
2911 case ARM::MVE_VLDRBU16_post:
2912 case ARM::MVE_VLDRBU32_post:
2913 case ARM::MVE_VLDRHS32_post:
2914 case ARM::MVE_VLDRHU32_post:
2915 case ARM::MVE_VLDRBU8_post:
2916 case ARM::MVE_VLDRHU16_post:
2917 case ARM::MVE_VLDRWU32_post:
2918 case ARM::MVE_VSTRB16_post:
2919 case ARM::MVE_VSTRB32_post:
2920 case ARM::MVE_VSTRH32_post:
2921 case ARM::MVE_VSTRBU8_post:
2922 case ARM::MVE_VSTRHU16_post:
2923 case ARM::MVE_VSTRWU32_post:
2924 case ARM::MVE_VLDRBS16_pre:
2925 case ARM::MVE_VLDRBS32_pre:
2926 case ARM::MVE_VLDRBU16_pre:
2927 case ARM::MVE_VLDRBU32_pre:
2928 case ARM::MVE_VLDRHS32_pre:
2929 case ARM::MVE_VLDRHU32_pre:
2930 case ARM::MVE_VLDRBU8_pre:
2931 case ARM::MVE_VLDRHU16_pre:
2932 case ARM::MVE_VLDRWU32_pre:
2933 case ARM::MVE_VSTRB16_pre:
2934 case ARM::MVE_VSTRB32_pre:
2935 case ARM::MVE_VSTRH32_pre:
2936 case ARM::MVE_VSTRBU8_pre:
2937 case ARM::MVE_VSTRHU16_pre:
2938 case ARM::MVE_VSTRWU32_pre:
2945 switch (
MI.getOpcode()) {
2946 case ARM::MVE_VLDRBS16_post:
2947 case ARM::MVE_VLDRBS32_post:
2948 case ARM::MVE_VLDRBU16_post:
2949 case ARM::MVE_VLDRBU32_post:
2950 case ARM::MVE_VLDRHS32_post:
2951 case ARM::MVE_VLDRHU32_post:
2952 case ARM::MVE_VLDRBU8_post:
2953 case ARM::MVE_VLDRHU16_post:
2954 case ARM::MVE_VLDRWU32_post:
2955 case ARM::MVE_VSTRB16_post:
2956 case ARM::MVE_VSTRB32_post:
2957 case ARM::MVE_VSTRH32_post:
2958 case ARM::MVE_VSTRBU8_post:
2959 case ARM::MVE_VSTRHU16_post:
2960 case ARM::MVE_VSTRWU32_post:
2967 switch (
MI.getOpcode()) {
2968 case ARM::MVE_VLDRBS16_pre:
2969 case ARM::MVE_VLDRBS32_pre:
2970 case ARM::MVE_VLDRBU16_pre:
2971 case ARM::MVE_VLDRBU32_pre:
2972 case ARM::MVE_VLDRHS32_pre:
2973 case ARM::MVE_VLDRHU32_pre:
2974 case ARM::MVE_VLDRBU8_pre:
2975 case ARM::MVE_VLDRHU16_pre:
2976 case ARM::MVE_VLDRWU32_pre:
2977 case ARM::MVE_VSTRB16_pre:
2978 case ARM::MVE_VSTRB32_pre:
2979 case ARM::MVE_VSTRH32_pre:
2980 case ARM::MVE_VSTRBU8_pre:
2981 case ARM::MVE_VSTRHU16_pre:
2982 case ARM::MVE_VSTRWU32_pre:
2995 int &CodesizeEstimate) {
3004 CodesizeEstimate += 1;
3005 return Imm < 0 && -Imm < ((1 << 8) * 1);
3018 MI->getOperand(BaseOp).setReg(NewBaseReg);
3024 MRI.constrainRegClass(NewBaseReg, TRC);
3026 int OldOffset =
MI->getOperand(BaseOp + 1).getImm();
3028 MI->getOperand(BaseOp + 1).setImm(OldOffset -
Offset);
3030 unsigned ConvOpcode;
3031 switch (
MI->getOpcode()) {
3032 case ARM::t2LDRHi12:
3033 ConvOpcode = ARM::t2LDRHi8;
3035 case ARM::t2LDRSHi12:
3036 ConvOpcode = ARM::t2LDRSHi8;
3038 case ARM::t2LDRBi12:
3039 ConvOpcode = ARM::t2LDRBi8;
3041 case ARM::t2LDRSBi12:
3042 ConvOpcode = ARM::t2LDRSBi8;
3044 case ARM::t2STRHi12:
3045 ConvOpcode = ARM::t2STRHi8;
3047 case ARM::t2STRBi12:
3048 ConvOpcode = ARM::t2STRBi8;
3054 "Illegal Address Immediate after convert!");
3058 .
add(
MI->getOperand(0))
3059 .
add(
MI->getOperand(1))
3061 .
add(
MI->getOperand(3))
3062 .
add(
MI->getOperand(4))
3064 MI->eraseFromParent();
3081 MRI.constrainRegClass(NewReg, TRC);
3083 TRC =
TII->getRegClass(MCID, 2,
TRI, *MF);
3084 MRI.constrainRegClass(
MI->getOperand(1).getReg(), TRC);
3092 return BuildMI(*
MI->getParent(),
MI,
MI->getDebugLoc(), MCID)
3094 .
add(
MI->getOperand(0))
3095 .
add(
MI->getOperand(1))
3097 .
add(
MI->getOperand(3))
3098 .
add(
MI->getOperand(4))
3099 .
add(
MI->getOperand(5))
3102 if (
MI->mayLoad()) {
3103 return BuildMI(*
MI->getParent(),
MI,
MI->getDebugLoc(), MCID)
3104 .
add(
MI->getOperand(0))
3106 .
add(
MI->getOperand(1))
3108 .
add(
MI->getOperand(3))
3109 .
add(
MI->getOperand(4))
3112 return BuildMI(*
MI->getParent(),
MI,
MI->getDebugLoc(), MCID)
3114 .
add(
MI->getOperand(0))
3115 .
add(
MI->getOperand(1))
3117 .
add(
MI->getOperand(3))
3118 .
add(
MI->getOperand(4))
3142bool ARMPreAllocLoadStoreOpt::DistributeIncrements(
Register Base) {
3152 for (
auto &
Use :
MRI->use_nodbg_instructions(
Base)) {
3162 if (!
Use.getOperand(BaseOp).isReg() ||
3163 Use.getOperand(BaseOp).getReg() !=
Base)
3167 else if (
Use.getOperand(BaseOp + 1).getImm() == 0)
3173 int IncrementOffset;
3175 if (BaseAccess && Increment) {
3179 if (
Increment->definesRegister(ARM::CPSR) ||
3183 LLVM_DEBUG(
dbgs() <<
"\nAttempting to distribute increments on VirtualReg "
3184 <<
Base.virtRegIndex() <<
"\n");
3189 MRI->use_nodbg_instructions(
Increment->getOperand(0).getReg())) {
3190 if (&
Use == BaseAccess || (
Use.getOpcode() != TargetOpcode::PHI &&
3191 !DT->dominates(BaseAccess, &
Use))) {
3192 LLVM_DEBUG(
dbgs() <<
" BaseAccess doesn't dominate use of increment\n");
3202 LLVM_DEBUG(
dbgs() <<
" Illegal addressing mode immediate on postinc\n");
3206 else if (PrePostInc) {
3214 LLVM_DEBUG(
dbgs() <<
"\nAttempting to distribute increments on already "
3215 <<
"indexed VirtualReg " <<
Base.virtRegIndex() <<
"\n");
3218 BaseAccess = PrePostInc;
3233 int CodesizeEstimate = -1;
3234 for (
auto *
Use : OtherAccesses) {
3235 if (DT->dominates(BaseAccess,
Use)) {
3239 Use->getOperand(BaseOp + 1).getImm() -
3241 TII, CodesizeEstimate)) {
3242 LLVM_DEBUG(
dbgs() <<
" Illegal addressing mode immediate on use\n");
3245 }
else if (!DT->dominates(
Use, BaseAccess)) {
3247 dbgs() <<
" Unknown dominance relation between Base and Use\n");
3251 if (STI->hasMinSize() && CodesizeEstimate > 0) {
3252 LLVM_DEBUG(
dbgs() <<
" Expected to grow instructions under minsize\n");
3260 NewBaseReg =
Increment->getOperand(0).getReg();
3265 (void)BaseAccessPost;
3269 for (
auto *
Use : SuccessorAccesses) {
3278 Op.setIsKill(
false);
3282bool ARMPreAllocLoadStoreOpt::DistributeIncrements() {
3283 bool Changed =
false;
3285 for (
auto &
MBB : *MF) {
3286 for (
auto &
MI :
MBB) {
3288 if (BaseOp == -1 || !
MI.getOperand(BaseOp).isReg())
3299 for (
auto Base : Visited)
3300 Changed |= DistributeIncrements(
Base);
3308 return new ARMPreAllocLoadStoreOpt();
3309 return new ARMLoadStoreOpt();
unsigned const MachineRegisterInfo * MRI
for(const MachineOperand &MO :llvm::drop_begin(OldMI.operands(), Desc.getNumOperands()))
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static bool isLoadSingle(unsigned Opc)
static unsigned getPostIndexedLoadStoreOpcode(unsigned Opc, ARM_AM::AddrOpc Mode)
static bool IsSafeAndProfitableToMove(bool isLd, unsigned Base, MachineBasicBlock::iterator I, MachineBasicBlock::iterator E, SmallPtrSetImpl< MachineInstr * > &MemOps, SmallSet< unsigned, 4 > &MemRegs, const TargetRegisterInfo *TRI, AliasAnalysis *AA)
static bool isPreIndex(MachineInstr &MI)
static void forEachDbgRegOperand(MachineInstr *MI, std::function< void(MachineOperand &)> Fn)
static bool isPostIndex(MachineInstr &MI)
static int getLoadStoreMultipleOpcode(unsigned Opcode, ARM_AM::AMSubMode Mode)
static bool isMemoryOp(const MachineInstr &MI)
Returns true if instruction is a memory operation that this pass is capable of operating on.
static unsigned getLSMultipleTransferSize(const MachineInstr *MI)
static ARM_AM::AMSubMode getLoadStoreMultipleSubMode(unsigned Opcode)
static bool isT1i32Load(unsigned Opc)
static bool ContainsReg(const ArrayRef< std::pair< unsigned, bool > > &Regs, unsigned Reg)
static void AdjustBaseAndOffset(MachineInstr *MI, Register NewBaseReg, int Offset, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
static unsigned getPreIndexedLoadStoreOpcode(unsigned Opc, ARM_AM::AddrOpc Mode)
static MachineInstr * createPostIncLoadStore(MachineInstr *MI, int Offset, Register NewReg, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI)
static bool isi32Store(unsigned Opc)
static MachineBasicBlock::iterator findIncDecAfter(MachineBasicBlock::iterator MBBI, Register Reg, ARMCC::CondCodes Pred, Register PredReg, int &Offset, const TargetRegisterInfo *TRI)
Searches for a increment or decrement of Reg after MBBI.
static MachineBasicBlock::iterator findIncDecBefore(MachineBasicBlock::iterator MBBI, Register Reg, ARMCC::CondCodes Pred, Register PredReg, int &Offset)
Searches for an increment or decrement of Reg before MBBI.
static int getMemoryOpOffset(const MachineInstr &MI)
static const MachineOperand & getLoadStoreBaseOp(const MachineInstr &MI)
static void updateRegisterMapForDbgValueListAfterMove(SmallDenseMap< Register, SmallVector< MachineInstr * >, 8 > &RegisterMap, MachineInstr *DbgValueListInstr, MachineInstr *InstrToReplace)
arm prera ldst static false cl::opt< unsigned > InstReorderLimit("arm-prera-ldst-opt-reorder-limit", cl::init(8), cl::Hidden)
static void InsertLDR_STR(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI, int Offset, bool isDef, unsigned NewOpc, unsigned Reg, bool RegDeadKill, bool RegUndef, unsigned BaseReg, bool BaseKill, bool BaseUndef, ARMCC::CondCodes Pred, unsigned PredReg, const TargetInstrInfo *TII, MachineInstr *MI)
static int isIncrementOrDecrement(const MachineInstr &MI, Register Reg, ARMCC::CondCodes Pred, Register PredReg)
Check if the given instruction increments or decrements a register and return the amount it is increm...
static bool isT2i32Store(unsigned Opc)
static bool isLegalOrConvertableAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII, int &CodesizeEstimate)
static bool mayCombineMisaligned(const TargetSubtargetInfo &STI, const MachineInstr &MI)
Return true for loads/stores that can be combined to a double/multi operation without increasing the ...
static int getBaseOperandIndex(MachineInstr &MI)
static bool isT2i32Load(unsigned Opc)
static bool isi32Load(unsigned Opc)
static unsigned getImmScale(unsigned Opc)
static bool isT1i32Store(unsigned Opc)
#define ARM_PREALLOC_LOAD_STORE_OPT_NAME
#define ARM_LOAD_STORE_OPT_NAME
static unsigned getUpdatingLSMultipleOpcode(unsigned Opc, ARM_AM::AMSubMode Mode)
static const MachineOperand & getLoadStoreRegOp(const MachineInstr &MI)
static bool isValidLSDoubleOffset(int Offset)
static DebugVariable createDebugVariableFromMachineInstr(MachineInstr *MI)
static cl::opt< bool > AssumeMisalignedLoadStores("arm-assume-misaligned-load-store", cl::Hidden, cl::init(false), cl::desc("Be more conservative in ARM load/store opt"))
This switch disables formation of double/multi instructions that could potentially lead to (new) alig...
This file defines the BumpPtrAllocator interface.
This file defines the DenseMap class.
This file defines the DenseSet and SmallDenseSet classes.
const HexagonInstrInfo * TII
static MaybeAlign getAlign(Value *Ptr)
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
#define INITIALIZE_PASS_DEPENDENCY(depName)
#define INITIALIZE_PASS_END(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS_BEGIN(passName, arg, name, cfg, analysis)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file implements a set that has insertion order iteration characteristics.
This file defines the SmallPtrSet class.
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
This file describes how to lower LLVM code to machine code.
A wrapper pass to provide the legacy pass manager access to a suitably prepared AAResults object.
static void updateLRRestored(MachineFunction &MF)
Update the IsRestored flag on LR if it is spilled, based on the return instructions.
ARMFunctionInfo - This class is derived from MachineFunctionInfo and contains private ARM-specific in...
const ARMTargetLowering * getTargetLowering() const override
Represent the analysis usage information of a pass.
AnalysisUsage & addRequired()
AnalysisUsage & addPreserved()
Add the specified Pass class to the set of analyses preserved by this pass.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Identifies a unique instance of a variable.
iterator find(const_arg_type_t< KeyT > Val)
bool erase(const KeyT &Val)
Implements a dense probed hash-table based set.
FunctionPass class - This class is used to implement most global optimizations.
A set of register units used to track register liveness.
Describe properties that are true of each instruction in the target description file.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
LivenessQueryResult computeRegisterLiveness(const TargetRegisterInfo *TRI, MCRegister Reg, const_iterator Before, unsigned Neighborhood=10) const
Return whether (physical) register Reg has been defined and not killed as of just before Before.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
MachineInstrBundleIterator< MachineInstr > iterator
@ LQR_Dead
Register is known to be fully dead.
DominatorTree Class - Concrete subclass of DominatorTreeBase that is used to compute a normal dominat...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const MachineInstrBuilder & cloneMergedMemRefs(ArrayRef< const MachineInstr * > OtherMIs) const
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & copyImplicitOps(const MachineInstr &OtherMI) const
Copy all the implicit operands from OtherMI onto this one.
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
Representation of each machine instruction.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
const MachineBasicBlock * getParent() const
unsigned getNumOperands() const
Retuns the total number of operands.
void copyImplicitOps(MachineFunction &MF, const MachineInstr &MI)
Copy implicit register operands from specified instruction to this instruction.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const MachineOperand & getOperand(unsigned i) const
A description of a memory reference used in the backend.
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
Align getAlign() const
Return the minimum known alignment in bytes of the actual memory reference.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
bool readsReg() const
readsReg - Returns true if this operand reads the previous value of its register.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
void setIsUndef(bool Val=true)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
size_type count(const key_type &key) const
Count the number of elements of a given key in the SetVector.
bool insert(const value_type &X)
Insert a new element into the SetVector.
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
A BumpPtrAllocator that allows only elements of a specific type to be allocated.
StringRef - Represent a constant reference to a string, i.e.
Align getTransientStackAlign() const
getTransientStackAlignment - This method returns the number of bytes to which the stack pointer must ...
TargetInstrInfo - Interface to description of machine instruction set.
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
virtual const TargetFrameLowering * getFrameLowering() const
A Use represents the edge between a Value definition and its users.
LLVM Value Representation.
void dump() const
Support for debugging, callable in GDB: V->dump()
std::pair< iterator, bool > insert(const ValueT &V)
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
This provides a very simple, boring adaptor for a begin and end iterator into a range type.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned char getAM3Offset(unsigned AM3Opc)
unsigned getAM2Opc(AddrOpc Opc, unsigned Imm12, ShiftOpc SO, unsigned IdxMode=0)
AddrOpc getAM5Op(unsigned AM5Opc)
unsigned getAM3Opc(AddrOpc Opc, unsigned char Offset, unsigned IdxMode=0)
getAM3Opc - This function encodes the addrmode3 opc field.
unsigned char getAM5Offset(unsigned AM5Opc)
AddrOpc getAM3Op(unsigned AM3Opc)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ C
The default llvm calling convention, compatible with C.
@ Define
Register definition.
@ Kill
The last use of a register.
@ CE
Windows NT (Windows on ARM)
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
bool isLegalAddressImm(unsigned Opcode, int Imm, const TargetInstrInfo *TII)
unsigned getDeadRegState(bool B)
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
FunctionPass * createARMLoadStoreOptimizationPass(bool PreAlloc=false)
Returns an instance of the load / store optimization pass.
unsigned M1(unsigned Val)
static bool isARMLowRegister(unsigned Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
auto reverse(ContainerTy &&C)
void sort(IteratorTy Start, IteratorTy End)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
unsigned getUndefRegState(bool B)
unsigned getDefRegState(bool B)
unsigned getKillRegState(bool B)
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
DWARFExpression::Operation Op
unsigned M0(unsigned Val)
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
int getAddSubImmediate(MachineInstr &MI)
This struct is a compact representation of a valid (non-zero power of two) alignment.
Description of the encoding of one expression Op.