23#include <initializer_list>
32class InstructionInformation {
36 bool HasBRegAddrShift : 1;
38 bool IsInlineShiftALU : 1;
41 bool IsNonSubwordLoad : 1;
50 unsigned MVEIntMACMatched;
51 unsigned AddressOpMask;
58 MVEIntMACMatched(0), AddressOpMask(0) {}
60 typedef std::array<IInfo, ARM::INSTRUCTION_LIST_END> IInfoArray;
65 unsigned getAddressOpMask(
unsigned Op) {
return Info[
Op].AddressOpMask; }
66 bool hasBRegAddr(
unsigned Op) {
return Info[
Op].HasBRegAddr; }
67 bool hasBRegAddrShift(
unsigned Op) {
return Info[
Op].HasBRegAddrShift; }
68 bool isDivide(
unsigned Op) {
return Info[
Op].IsDivide; }
69 bool isInlineShiftALU(
unsigned Op) {
return Info[
Op].IsInlineShiftALU; }
70 bool isMultiply(
unsigned Op) {
return Info[
Op].IsMultiply; }
71 bool isMVEIntMAC(
unsigned Op) {
return Info[
Op].IsMVEIntMAC; }
72 bool isNonSubwordLoad(
unsigned Op) {
return Info[
Op].IsNonSubwordLoad; }
73 bool isRev(
unsigned Op) {
return Info[
Op].IsRev; }
74 bool isShift(
unsigned Op) {
return Info[
Op].IsShift; }
77 bool producesQP(
unsigned Op) {
return Info[
Op].ProducesQP; }
78 bool producesDP(
unsigned Op) {
return Info[
Op].ProducesDP; }
79 bool producesSP(
unsigned Op) {
return Info[
Op].ProducesSP; }
80 bool consumesQP(
unsigned Op) {
return Info[
Op].ConsumesQP; }
81 bool consumesDP(
unsigned Op) {
return Info[
Op].ConsumesDP; }
82 bool consumesSP(
unsigned Op) {
return Info[
Op].ConsumesSP; }
84 bool isMVEIntMACMatched(
unsigned SrcOp,
unsigned DstOp) {
85 return SrcOp == DstOp ||
Info[DstOp].MVEIntMACMatched == SrcOp;
88 InstructionInformation(
const ARMBaseInstrInfo *
TII);
91 void markDPProducersConsumers(
const ARMBaseInstrInfo *
TII);
94InstructionInformation::InstructionInformation(
const ARMBaseInstrInfo *
TII) {
97 std::initializer_list<unsigned> hasBRegAddrList = {
98 t2LDRs, t2LDRBs, t2LDRHs, t2STRs, t2STRBs, t2STRHs,
99 tLDRr, tLDRBr, tLDRHr, tSTRr, tSTRBr, tSTRHr,
101 for (
auto op : hasBRegAddrList) {
102 Info[
op].HasBRegAddr =
true;
105 std::initializer_list<unsigned> hasBRegAddrShiftList = {
106 t2LDRs, t2LDRBs, t2LDRHs, t2STRs, t2STRBs, t2STRHs,
108 for (
auto op : hasBRegAddrShiftList) {
109 Info[
op].HasBRegAddrShift =
true;
112 Info[t2SDIV].IsDivide =
Info[t2UDIV].IsDivide =
true;
114 std::initializer_list<unsigned> isInlineShiftALUList = {
115 t2ADCrs, t2ADDSrs, t2ADDrs, t2BICrs, t2EORrs,
116 t2ORNrs, t2RSBSrs, t2RSBrs, t2SBCrs, t2SUBrs,
117 t2SUBSrs, t2CMPrs, t2CMNzrs, t2TEQrs, t2TSTrs,
119 for (
auto op : isInlineShiftALUList) {
120 Info[
op].IsInlineShiftALU =
true;
123 Info[t2SDIV].IsDivide =
Info[t2UDIV].IsDivide =
true;
125 std::initializer_list<unsigned> isMultiplyList = {
126 t2MUL, t2MLA, t2MLS, t2SMLABB, t2SMLABT, t2SMLAD, t2SMLADX,
127 t2SMLAL, t2SMLALBB, t2SMLALBT, t2SMLALD, t2SMLALDX, t2SMLALTB, t2SMLALTT,
128 t2SMLATB, t2SMLATT, t2SMLAWT, t2SMLSD, t2SMLSDX, t2SMLSLD, t2SMLSLDX,
129 t2SMMLA, t2SMMLAR, t2SMMLS, t2SMMLSR, t2SMMUL, t2SMMULR, t2SMUAD,
130 t2SMUADX, t2SMULBB, t2SMULBT, t2SMULL, t2SMULTB, t2SMULTT, t2SMULWT,
131 t2SMUSD, t2SMUSDX, t2UMAAL, t2UMLAL, t2UMULL, tMUL,
133 for (
auto op : isMultiplyList) {
134 Info[
op].IsMultiply =
true;
137 std::initializer_list<unsigned> isMVEIntMACList = {
138 MVE_VMLAS_qr_i16, MVE_VMLAS_qr_i32, MVE_VMLAS_qr_i8,
139 MVE_VMLA_qr_i16, MVE_VMLA_qr_i32, MVE_VMLA_qr_i8,
140 MVE_VQDMLAH_qrs16, MVE_VQDMLAH_qrs32, MVE_VQDMLAH_qrs8,
141 MVE_VQDMLASH_qrs16, MVE_VQDMLASH_qrs32, MVE_VQDMLASH_qrs8,
142 MVE_VQRDMLAH_qrs16, MVE_VQRDMLAH_qrs32, MVE_VQRDMLAH_qrs8,
143 MVE_VQRDMLASH_qrs16, MVE_VQRDMLASH_qrs32, MVE_VQRDMLASH_qrs8,
144 MVE_VQDMLADHXs16, MVE_VQDMLADHXs32, MVE_VQDMLADHXs8,
145 MVE_VQDMLADHs16, MVE_VQDMLADHs32, MVE_VQDMLADHs8,
146 MVE_VQDMLSDHXs16, MVE_VQDMLSDHXs32, MVE_VQDMLSDHXs8,
147 MVE_VQDMLSDHs16, MVE_VQDMLSDHs32, MVE_VQDMLSDHs8,
148 MVE_VQRDMLADHXs16, MVE_VQRDMLADHXs32, MVE_VQRDMLADHXs8,
149 MVE_VQRDMLADHs16, MVE_VQRDMLADHs32, MVE_VQRDMLADHs8,
150 MVE_VQRDMLSDHXs16, MVE_VQRDMLSDHXs32, MVE_VQRDMLSDHXs8,
151 MVE_VQRDMLSDHs16, MVE_VQRDMLSDHs32, MVE_VQRDMLSDHs8,
153 for (
auto op : isMVEIntMACList) {
154 Info[
op].IsMVEIntMAC =
true;
157 std::initializer_list<unsigned> isNonSubwordLoadList = {
158 t2LDRi12, t2LDRi8, t2LDR_POST, t2LDR_PRE, t2LDRpci,
159 t2LDRs, t2LDRDi8, t2LDRD_POST, t2LDRD_PRE, tLDRi,
160 tLDRpci, tLDRr, tLDRspi,
162 for (
auto op : isNonSubwordLoadList) {
163 Info[
op].IsNonSubwordLoad =
true;
166 std::initializer_list<unsigned> isRevList = {
167 t2REV, t2REV16, t2REVSH, t2RBIT, tREV, tREV16, tREVSH,
169 for (
auto op : isRevList) {
173 std::initializer_list<unsigned> isShiftList = {
174 t2ASRri, t2ASRrr, t2LSLri, t2LSLrr, t2LSRri, t2LSRrr, t2RORri, t2RORrr,
175 tASRri, tASRrr, tLSLSri, tLSLri, tLSLrr, tLSRri, tLSRrr, tROR,
177 for (
auto op : isShiftList) {
181 std::initializer_list<unsigned> Address1List = {
316 std::initializer_list<unsigned> Address2List = {
406 std::initializer_list<unsigned> Address3List = {
413 for (
auto &
op : Address1List) {
414 Info[
op].AddressOpMask = 0x6;
416 for (
auto &
op : Address2List) {
417 Info[
op].AddressOpMask = 0xc;
419 for (
auto &
op : Address3List) {
420 Info[
op].AddressOpMask = 0x18;
422 for (
auto &
op : hasBRegAddrShiftList) {
423 Info[
op].AddressOpMask |= 0x8;
427void InstructionInformation::markDPProducersConsumers(
428 const ARMBaseInstrInfo *
TII) {
430 for (
unsigned MI = 0;
MI < ARM::INSTRUCTION_LIST_END; ++
MI) {
431 const MCInstrDesc &MID =
TII->get(
MI);
433 for (
unsigned OI = 0, OIE = MID.getNumOperands(); OI != OIE; ++OI) {
434 bool MarkQP =
false, MarkDP =
false, MarkSP =
false;
436 case ARM::MQPRRegClassID:
437 case ARM::DPRRegClassID:
438 case ARM::DPR_8RegClassID:
439 case ARM::DPR_VFP2RegClassID:
440 case ARM::DPairRegClassID:
441 case ARM::DPairSpcRegClassID:
442 case ARM::DQuadRegClassID:
443 case ARM::DQuadSpcRegClassID:
444 case ARM::DTripleRegClassID:
445 case ARM::DTripleSpcRegClassID:
448 case ARM::QPRRegClassID:
449 case ARM::QPR_8RegClassID:
450 case ARM::QPR_VFP2RegClassID:
451 case ARM::QQPRRegClassID:
452 case ARM::QQQQPRRegClassID:
455 case ARM::SPRRegClassID:
456 case ARM::SPR_8RegClassID:
457 case ARM::FPWithVPRRegClassID:
464 if (OI < MID.getNumDefs())
465 Info[
MI].ProducesQP =
true;
467 Info[
MI].ConsumesQP =
true;
470 if (OI < MID.getNumDefs())
471 Info[
MI].ProducesDP =
true;
473 Info[
MI].ConsumesDP =
true;
476 if (OI < MID.getNumDefs())
477 Info[
MI].ProducesSP =
true;
479 Info[
MI].ConsumesSP =
true;
488 return MI->getDesc().hasImplicitUseOfPhysReg(ARM::CPSR);
497 PDep.setLatency(latency);
507 return (a & 0xe) != (b & 0xe);
559 if (!SrcInst.mayStore() || !DstInst.mayLoad())
563 auto DstMO = *DstInst.memoperands().begin();
564 auto SrcVal = SrcMO->getValue();
565 auto DstVal = DstMO->getValue();
566 auto SrcPseudoVal = SrcMO->getPseudoValue();
567 auto DstPseudoVal = DstMO->getPseudoValue();
569 SrcMO->getOffset() == DstMO->getOffset()) {
572 }
else if (SrcPseudoVal && DstPseudoVal &&
573 SrcPseudoVal->kind() == DstPseudoVal->kind() &&
576 auto FS0 = cast<FixedStackPseudoSourceValue>(SrcPseudoVal);
577 auto FS1 = cast<FixedStackPseudoSourceValue>(DstPseudoVal);
588std::unique_ptr<InstructionInformation>
II;
590class CortexM7InstructionInformation :
public InstructionInformation {
593 : InstructionInformation(
TII) {}
596class CortexM7Overrides :
public ARMOverrideBypasses {
598 CortexM7Overrides(
const ARMBaseInstrInfo *
TII, AAResults *AA)
599 : ARMOverrideBypasses(
TII, AA) {
601 II.reset(
new CortexM7InstructionInformation(
TII));
604 void modifyBypasses(SUnit &)
override;
607void CortexM7Overrides::modifyBypasses(SUnit &ISU) {
608 const MachineInstr *SrcMI = ISU.getInstr();
609 unsigned SrcOpcode = SrcMI->getOpcode();
610 bool isNSWload =
II->isNonSubwordLoad(SrcOpcode);
613 for (SDep &Dep : ISU.Succs) {
617 if (zeroOutputDependences(ISU, Dep))
620 if (memoryRAWHazard(ISU, Dep, 4))
627 SUnit &DepSU = *Dep.getSUnit();
628 if (DepSU.isBoundaryNode())
631 if (makeBundleAssumptions(ISU, Dep) == 1)
634 const MachineInstr *DstMI = DepSU.getInstr();
635 unsigned DstOpcode = DstMI->getOpcode();
642 if (isNSWload && (
II->isMultiply(DstOpcode) ||
II->isDivide(DstOpcode)))
643 setBidirLatencies(ISU, Dep, Dep.getLatency() + 1);
648 if (isNSWload &&
II->hasBRegAddr(DstOpcode) &&
649 DstMI->getOperand(2).getReg() == Dep.getReg())
650 setBidirLatencies(ISU, Dep, Dep.getLatency() + 1);
654 if (
II->isMultiply(SrcOpcode)) {
655 unsigned OpMask =
II->getAddressOpMask(DstOpcode) >> 1;
656 for (
unsigned i = 1; OpMask; ++i, OpMask >>= 1) {
657 if ((OpMask & 1) && DstMI->getOperand(i).isReg() &&
658 DstMI->getOperand(i).getReg() == Dep.getReg()) {
659 setBidirLatencies(ISU, Dep, 4);
668 (SrcOpcode == ARM::BUNDLE ||
670 TII->getPredicate(*DstMI)))) {
673 if (
II->isInlineShiftALU(DstOpcode) && DstMI->getOperand(3).getImm() &&
674 DstMI->getOperand(1).getReg() == Dep.getReg())
676 Lat = std::min(3u, Dep.getLatency() + Lat);
677 setBidirLatencies(ISU, Dep, std::max(Dep.getLatency(), Lat));
684 if (Dep.isAssignedRegDep() && Dep.getReg() == ARM::CPSR &&
686 setBidirLatencies(ISU, Dep, 1);
691 if (
II->isRev(SrcOpcode)) {
692 if (
II->isInlineShiftALU(DstOpcode))
693 setBidirLatencies(ISU, Dep, 2);
694 else if (
II->isShift(DstOpcode))
695 setBidirLatencies(ISU, Dep, 1);
700class M85InstructionInformation :
public InstructionInformation {
702 M85InstructionInformation(
const ARMBaseInstrInfo *t)
703 : InstructionInformation(t) {
704 markDPProducersConsumers(t);
708class M85Overrides :
public ARMOverrideBypasses {
710 M85Overrides(
const ARMBaseInstrInfo *t, AAResults *a)
711 : ARMOverrideBypasses(t, a) {
713 II.reset(
new M85InstructionInformation(t));
716 void modifyBypasses(SUnit &)
override;
719 unsigned computeBypassStage(
const MCSchedClassDesc *SCD);
720 signed modifyMixedWidthFP(
const MachineInstr *SrcMI,
721 const MachineInstr *DstMI,
unsigned RegID,
722 const MCSchedClassDesc *SCD);
725unsigned M85Overrides::computeBypassStage(
const MCSchedClassDesc *SCDesc) {
726 auto SM = DAG->getSchedModel();
728 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
730 const MCWriteLatencyEntry *WLEntry =
731 SM->getSubtargetInfo()->getWriteLatencyEntry(SCDesc, DefIdx);
732 unsigned Latency = WLEntry->Cycles >= 0 ? WLEntry->Cycles : 1000;
749signed M85Overrides::modifyMixedWidthFP(
const MachineInstr *SrcMI,
750 const MachineInstr *DstMI,
752 const MCSchedClassDesc *SCD) {
754 if (!
II->producesSP(SrcMI->getOpcode()) &&
755 !
II->producesDP(SrcMI->getOpcode()) &&
756 !
II->producesQP(SrcMI->getOpcode()))
760 if (
II->producesSP(SrcMI->getOpcode()) &&
761 II->consumesDP(DstMI->getOpcode())) {
762 for (
auto &
OP : SrcMI->operands())
763 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() == RegID &&
764 OP.getSubReg() == ARM::ssub_1)
765 return 5 - computeBypassStage(SCD);
766 }
else if (
II->producesSP(SrcMI->getOpcode()) &&
767 II->consumesQP(DstMI->getOpcode())) {
768 for (
auto &
OP : SrcMI->operands())
769 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() == RegID &&
770 (
OP.getSubReg() == ARM::ssub_1 ||
OP.getSubReg() == ARM::ssub_3))
771 return 5 - computeBypassStage(SCD) -
772 ((
OP.getSubReg() == ARM::ssub_2 ||
773 OP.getSubReg() == ARM::ssub_3)
776 }
else if (
II->producesDP(SrcMI->getOpcode()) &&
777 II->consumesQP(DstMI->getOpcode())) {
778 for (
auto &
OP : SrcMI->operands())
779 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() == RegID &&
780 OP.getSubReg() == ARM::ssub_1)
782 }
else if (
II->producesDP(SrcMI->getOpcode()) &&
783 II->consumesSP(DstMI->getOpcode())) {
784 for (
auto &
OP : DstMI->operands())
785 if (
OP.isReg() &&
OP.isUse() &&
OP.getReg() == RegID &&
786 OP.getSubReg() == ARM::ssub_1)
787 return 5 - computeBypassStage(SCD);
788 }
else if (
II->producesQP(SrcMI->getOpcode()) &&
789 II->consumesSP(DstMI->getOpcode())) {
790 for (
auto &
OP : DstMI->operands())
791 if (
OP.isReg() &&
OP.isUse() &&
OP.getReg() == RegID &&
792 (
OP.getSubReg() == ARM::ssub_1 ||
OP.getSubReg() == ARM::ssub_3))
793 return 5 - computeBypassStage(SCD) +
794 ((
OP.getSubReg() == ARM::ssub_2 ||
795 OP.getSubReg() == ARM::ssub_3)
798 }
else if (
II->producesQP(SrcMI->getOpcode()) &&
799 II->consumesDP(DstMI->getOpcode())) {
800 for (
auto &
OP : DstMI->operands())
801 if (
OP.isReg() &&
OP.isUse() &&
OP.getReg() == RegID &&
802 OP.getSubReg() == ARM::ssub_1)
812 if (
II->producesSP(SrcMI->getOpcode()) &&
813 II->consumesDP(DstMI->getOpcode())) {
814 for (
auto &
OP : SrcMI->operands())
815 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() >= ARM::S1 &&
816 OP.getReg() <= ARM::S31 && (
OP.getReg() - ARM::S0) % 2 &&
817 (
OP.getReg() == RegID ||
818 (
OP.getReg() - ARM::S0) / 2 + ARM::D0 == RegID ||
819 (
OP.getReg() - ARM::S0) / 4 + ARM::Q0 == RegID))
820 return 5 - computeBypassStage(SCD);
821 }
else if (
II->producesSP(SrcMI->getOpcode()) &&
822 II->consumesQP(DstMI->getOpcode())) {
823 for (
auto &
OP : SrcMI->operands())
824 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() >= ARM::S1 &&
825 OP.getReg() <= ARM::S31 && (
OP.getReg() - ARM::S0) % 2 &&
826 (
OP.getReg() == RegID ||
827 (
OP.getReg() - ARM::S0) / 2 + ARM::D0 == RegID ||
828 (
OP.getReg() - ARM::S0) / 4 + ARM::Q0 == RegID))
829 return 5 - computeBypassStage(SCD) -
830 (((
OP.getReg() - ARM::S0) / 2) % 2 ? 1 : 0);
831 }
else if (
II->producesDP(SrcMI->getOpcode()) &&
832 II->consumesQP(DstMI->getOpcode())) {
833 for (
auto &
OP : SrcMI->operands())
834 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() >= ARM::D0 &&
835 OP.getReg() <= ARM::D15 && (
OP.getReg() - ARM::D0) % 2 &&
836 (
OP.getReg() == RegID ||
837 (
OP.getReg() - ARM::D0) / 2 + ARM::Q0 == RegID))
839 }
else if (
II->producesDP(SrcMI->getOpcode()) &&
840 II->consumesSP(DstMI->getOpcode())) {
841 if (RegID >= ARM::S1 && RegID <= ARM::S31 && (RegID - ARM::S0) % 2)
842 return 5 - computeBypassStage(SCD);
843 }
else if (
II->producesQP(SrcMI->getOpcode()) &&
844 II->consumesSP(DstMI->getOpcode())) {
845 if (RegID >= ARM::S1 && RegID <= ARM::S31 && (RegID - ARM::S0) % 2)
846 return 5 - computeBypassStage(SCD) +
847 (((RegID - ARM::S0) / 2) % 2 ? 1 : 0);
848 }
else if (
II->producesQP(SrcMI->getOpcode()) &&
849 II->consumesDP(DstMI->getOpcode())) {
850 if (RegID >= ARM::D1 && RegID <= ARM::D15 && (RegID - ARM::D0) % 2)
857void M85Overrides::modifyBypasses(SUnit &ISU) {
858 const MachineInstr *SrcMI = ISU.getInstr();
859 unsigned SrcOpcode = SrcMI->getOpcode();
860 bool isNSWload =
II->isNonSubwordLoad(SrcOpcode);
863 for (SDep &Dep : ISU.Succs) {
867 if (zeroOutputDependences(ISU, Dep))
870 if (memoryRAWHazard(ISU, Dep, 3))
877 SUnit &DepSU = *Dep.getSUnit();
878 if (DepSU.isBoundaryNode())
881 if (makeBundleAssumptions(ISU, Dep) == 1)
884 const MachineInstr *DstMI = DepSU.getInstr();
885 unsigned DstOpcode = DstMI->getOpcode();
891 if (isNSWload &&
II->hasBRegAddrShift(DstOpcode) &&
892 DstMI->getOperand(3).getImm() != 0 &&
893 DstMI->getOperand(2).getReg() == Dep.getReg())
894 setBidirLatencies(ISU, Dep, Dep.getLatency() + 1);
897 setBidirLatencies(ISU, Dep, Dep.getLatency() + 1);
900 if (
II->isMVEIntMAC(DstOpcode) &&
901 II->isMVEIntMACMatched(SrcOpcode, DstOpcode) &&
902 DstMI->getOperand(0).isReg() &&
903 DstMI->getOperand(0).getReg() == Dep.getReg())
904 setBidirLatencies(ISU, Dep, Dep.getLatency() - 1);
908 if (Dep.isAssignedRegDep() && Dep.getReg() == ARM::CPSR &&
910 setBidirLatencies(ISU, Dep, 0);
912 if (
signed ALat = modifyMixedWidthFP(SrcMI, DstMI, Dep.getReg(),
913 DAG->getSchedClass(&ISU)))
914 setBidirLatencies(ISU, Dep, std::max(0,
signed(Dep.getLatency()) + ALat));
916 if (
II->isRev(SrcOpcode)) {
917 if (
II->isInlineShiftALU(DstOpcode))
918 setBidirLatencies(ISU, Dep, 1);
919 else if (
II->isShift(DstOpcode))
920 setBidirLatencies(ISU, Dep, 1);
927class CortexM55Overrides :
public ARMOverrideBypasses {
929 CortexM55Overrides(
const ARMBaseInstrInfo *
TII, AAResults *AA)
930 : ARMOverrideBypasses(
TII, AA) {}
932 void modifyBypasses(SUnit &SU)
override {
933 MachineInstr *SrcMI = SU.getInstr();
937 for (SDep &Dep : SU.Succs) {
940 SUnit &DepSU = *Dep.getSUnit();
941 if (DepSU.isBoundaryNode())
943 MachineInstr *DstMI = DepSU.getInstr();
946 setBidirLatencies(SU, Dep, 3);
961 modifyBypasses(DAGInstrs->
ExitSU);
964std::unique_ptr<ScheduleDAGMutation>
966 if (ST.isCortexM85())
967 return std::make_unique<M85Overrides>(ST.getInstrInfo(), AA);
968 else if (ST.isCortexM7())
969 return std::make_unique<CortexM7Overrides>(ST.getInstrInfo(), AA);
970 else if (ST.isCortexM55())
971 return std::make_unique<CortexM55Overrides>(ST.getInstrInfo(), AA);
Analysis containing CSE Info
const HexagonInstrInfo * TII
mir Rename Register Operands
uint64_t IntrinsicInst * II
AliasResult alias(const MemoryLocation &LocA, const MemoryLocation &LocB)
The main low level interface to the alias analysis implementation.
bool isPredicated(const MachineInstr &MI) const override
bool memoryRAWHazard(SUnit &ISU, SDep &Dep, unsigned latency)
static void setBidirLatencies(SUnit &SrcSU, SDep &SrcDep, unsigned latency)
static bool zeroOutputDependences(SUnit &ISU, SDep &Dep)
void apply(ScheduleDAGInstrs *DAGInstrs) override
unsigned makeBundleAssumptions(SUnit &ISU, SDep &Dep)
const ARMBaseInstrInfo * TII
@ MustAlias
The two locations precisely alias each other.
bool isPredicated(const MachineInstr &MI) const override
Returns true if the instruction is already predicated.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
Kind getKind() const
Returns an enum value representing the kind of the dependence.
@ Output
A register output-dependence (aka WAW).
@ Data
Regular data dependence (aka true-dependence).
void setLatency(unsigned Lat)
Sets the latency for this edge.
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
bool isNormalMemory() const
Tests if this is an Order dependence between two memory accesses where both sides of the dependence a...
unsigned getReg() const
Returns the register associated with this edge.
Scheduling unit. This is a node in the scheduling DAG.
void setHeightDirty()
Sets a flag in this node to indicate that its stored Height value will require recomputation the next...
bool isBoundaryNode() const
Boundary nodes are placeholders for the boundary of the scheduling region.
void setDepthDirty()
Sets a flag in this node to indicate that its stored Depth value will require recomputation the next ...
SmallVector< SDep, 4 > Preds
All sunit predecessors.
MachineInstr * getInstr() const
Returns the representative MachineInstr for this SUnit.
A ScheduleDAG for scheduling lists of MachineInstr.
std::vector< SUnit > SUnits
The scheduling units.
SUnit ExitSU
Special node for the region exit.
This is an optimization pass for GlobalISel generic memory operations.
std::unique_ptr< ScheduleDAGMutation > createARMLatencyMutations(const ARMSubtarget &ST, AAResults *AA)
bool isMVEVectorInstruction(const MachineInstr *MI)
static bool hasImplicitCPSRUse(const MachineInstr *MI)
DWARFExpression::Operation Op
static bool mismatchedPred(ARMCC::CondCodes a, ARMCC::CondCodes b)