23#include <initializer_list>
32class InstructionInformation {
36 bool HasBRegAddrShift : 1;
38 bool IsInlineShiftALU : 1;
41 bool IsNonSubwordLoad : 1;
50 unsigned MVEIntMACMatched;
51 unsigned AddressOpMask;
58 MVEIntMACMatched(0), AddressOpMask(0) {}
60 typedef std::array<IInfo, ARM::INSTRUCTION_LIST_END> IInfoArray;
65 unsigned getAddressOpMask(
unsigned Op) {
return Info[
Op].AddressOpMask; }
66 bool hasBRegAddr(
unsigned Op) {
return Info[
Op].HasBRegAddr; }
67 bool hasBRegAddrShift(
unsigned Op) {
return Info[
Op].HasBRegAddrShift; }
68 bool isDivide(
unsigned Op) {
return Info[
Op].IsDivide; }
69 bool isInlineShiftALU(
unsigned Op) {
return Info[
Op].IsInlineShiftALU; }
70 bool isMultiply(
unsigned Op) {
return Info[
Op].IsMultiply; }
71 bool isMVEIntMAC(
unsigned Op) {
return Info[
Op].IsMVEIntMAC; }
72 bool isNonSubwordLoad(
unsigned Op) {
return Info[
Op].IsNonSubwordLoad; }
73 bool isRev(
unsigned Op) {
return Info[
Op].IsRev; }
74 bool isShift(
unsigned Op) {
return Info[
Op].IsShift; }
77 bool producesQP(
unsigned Op) {
return Info[
Op].ProducesQP; }
78 bool producesDP(
unsigned Op) {
return Info[
Op].ProducesDP; }
79 bool producesSP(
unsigned Op) {
return Info[
Op].ProducesSP; }
80 bool consumesQP(
unsigned Op) {
return Info[
Op].ConsumesQP; }
81 bool consumesDP(
unsigned Op) {
return Info[
Op].ConsumesDP; }
82 bool consumesSP(
unsigned Op) {
return Info[
Op].ConsumesSP; }
84 bool isMVEIntMACMatched(
unsigned SrcOp,
unsigned DstOp) {
85 return SrcOp == DstOp || Info[DstOp].MVEIntMACMatched == SrcOp;
88 InstructionInformation(
const ARMBaseInstrInfo *
TII);
91 void markDPProducersConsumers(
const ARMBaseInstrInfo *
TII);
97 std::initializer_list<unsigned> hasBRegAddrList = {
98 t2LDRs, t2LDRBs, t2LDRHs, t2STRs, t2STRBs, t2STRHs,
99 tLDRr, tLDRBr, tLDRHr, tSTRr, tSTRBr, tSTRHr,
101 for (
auto op : hasBRegAddrList) {
102 Info[
op].HasBRegAddr =
true;
105 std::initializer_list<unsigned> hasBRegAddrShiftList = {
106 t2LDRs, t2LDRBs, t2LDRHs, t2STRs, t2STRBs, t2STRHs,
108 for (
auto op : hasBRegAddrShiftList) {
109 Info[
op].HasBRegAddrShift =
true;
112 Info[t2SDIV].IsDivide = Info[t2UDIV].IsDivide =
true;
114 std::initializer_list<unsigned> isInlineShiftALUList = {
115 t2ADCrs, t2ADDSrs, t2ADDrs, t2BICrs, t2EORrs, t2ORNrs, t2RSBSrs, t2RSBrs,
116 t2SBCrs, t2SUBrs, t2SUBSrs, t2CMPrs, t2CMNrs, t2TEQrs, t2TSTrs,
118 for (
auto op : isInlineShiftALUList) {
119 Info[
op].IsInlineShiftALU =
true;
122 Info[t2SDIV].IsDivide = Info[t2UDIV].IsDivide =
true;
124 std::initializer_list<unsigned> isMultiplyList = {
125 t2MUL, t2MLA, t2MLS, t2SMLABB, t2SMLABT, t2SMLAD, t2SMLADX,
126 t2SMLAL, t2SMLALBB, t2SMLALBT, t2SMLALD, t2SMLALDX, t2SMLALTB, t2SMLALTT,
127 t2SMLATB, t2SMLATT, t2SMLAWT, t2SMLSD, t2SMLSDX, t2SMLSLD, t2SMLSLDX,
128 t2SMMLA, t2SMMLAR, t2SMMLS, t2SMMLSR, t2SMMUL, t2SMMULR, t2SMUAD,
129 t2SMUADX, t2SMULBB, t2SMULBT, t2SMULL, t2SMULTB, t2SMULTT, t2SMULWT,
130 t2SMUSD, t2SMUSDX, t2UMAAL, t2UMLAL, t2UMULL, tMUL,
132 for (
auto op : isMultiplyList) {
133 Info[
op].IsMultiply =
true;
136 std::initializer_list<unsigned> isMVEIntMACList = {
137 MVE_VMLAS_qr_i16, MVE_VMLAS_qr_i32, MVE_VMLAS_qr_i8,
138 MVE_VMLA_qr_i16, MVE_VMLA_qr_i32, MVE_VMLA_qr_i8,
139 MVE_VQDMLAH_qrs16, MVE_VQDMLAH_qrs32, MVE_VQDMLAH_qrs8,
140 MVE_VQDMLASH_qrs16, MVE_VQDMLASH_qrs32, MVE_VQDMLASH_qrs8,
141 MVE_VQRDMLAH_qrs16, MVE_VQRDMLAH_qrs32, MVE_VQRDMLAH_qrs8,
142 MVE_VQRDMLASH_qrs16, MVE_VQRDMLASH_qrs32, MVE_VQRDMLASH_qrs8,
143 MVE_VQDMLADHXs16, MVE_VQDMLADHXs32, MVE_VQDMLADHXs8,
144 MVE_VQDMLADHs16, MVE_VQDMLADHs32, MVE_VQDMLADHs8,
145 MVE_VQDMLSDHXs16, MVE_VQDMLSDHXs32, MVE_VQDMLSDHXs8,
146 MVE_VQDMLSDHs16, MVE_VQDMLSDHs32, MVE_VQDMLSDHs8,
147 MVE_VQRDMLADHXs16, MVE_VQRDMLADHXs32, MVE_VQRDMLADHXs8,
148 MVE_VQRDMLADHs16, MVE_VQRDMLADHs32, MVE_VQRDMLADHs8,
149 MVE_VQRDMLSDHXs16, MVE_VQRDMLSDHXs32, MVE_VQRDMLSDHXs8,
150 MVE_VQRDMLSDHs16, MVE_VQRDMLSDHs32, MVE_VQRDMLSDHs8,
152 for (
auto op : isMVEIntMACList) {
153 Info[
op].IsMVEIntMAC =
true;
156 std::initializer_list<unsigned> isNonSubwordLoadList = {
157 t2LDRi12, t2LDRi8, t2LDR_POST, t2LDR_PRE, t2LDRpci,
158 t2LDRs, t2LDRDi8, t2LDRD_POST, t2LDRD_PRE, tLDRi,
159 tLDRpci, tLDRr, tLDRspi,
161 for (
auto op : isNonSubwordLoadList) {
162 Info[
op].IsNonSubwordLoad =
true;
165 std::initializer_list<unsigned> isRevList = {
166 t2REV, t2REV16, t2REVSH, t2RBIT, tREV, tREV16, tREVSH,
168 for (
auto op : isRevList) {
169 Info[
op].IsRev =
true;
172 std::initializer_list<unsigned> isShiftList = {
173 t2ASRri, t2ASRrr, t2LSLri, t2LSLrr, t2LSRri, t2LSRrr, t2RORri, t2RORrr,
174 tASRri, tASRrr, tLSLSri, tLSLri, tLSLrr, tLSRri, tLSRrr, tROR,
176 for (
auto op : isShiftList) {
177 Info[
op].IsShift =
true;
180 std::initializer_list<unsigned> Address1List = {
315 std::initializer_list<unsigned> Address2List = {
405 std::initializer_list<unsigned> Address3List = {
412 for (
auto &
op : Address1List) {
413 Info[
op].AddressOpMask = 0x6;
415 for (
auto &
op : Address2List) {
416 Info[
op].AddressOpMask = 0xc;
418 for (
auto &
op : Address3List) {
419 Info[
op].AddressOpMask = 0x18;
421 for (
auto &
op : hasBRegAddrShiftList) {
422 Info[
op].AddressOpMask |= 0x8;
426void InstructionInformation::markDPProducersConsumers(
427 const ARMBaseInstrInfo *
TII) {
429 for (
unsigned MI = 0;
MI < ARM::INSTRUCTION_LIST_END; ++
MI) {
430 const MCInstrDesc &MID =
TII->get(
MI);
431 auto Operands = MID.operands();
432 for (
unsigned OI = 0, OIE = MID.getNumOperands(); OI != OIE; ++OI) {
433 bool MarkQP =
false, MarkDP =
false, MarkSP =
false;
434 switch (Operands[OI].RegClass) {
435 case ARM::MQPRRegClassID:
436 case ARM::DPRRegClassID:
437 case ARM::DPR_8RegClassID:
438 case ARM::DPR_VFP2RegClassID:
439 case ARM::DPairRegClassID:
440 case ARM::DPairSpcRegClassID:
441 case ARM::DQuadRegClassID:
442 case ARM::DQuadSpcRegClassID:
443 case ARM::DTripleRegClassID:
444 case ARM::DTripleSpcRegClassID:
447 case ARM::QPRRegClassID:
448 case ARM::QPR_8RegClassID:
449 case ARM::QPR_VFP2RegClassID:
450 case ARM::QQPRRegClassID:
451 case ARM::QQQQPRRegClassID:
454 case ARM::SPRRegClassID:
455 case ARM::SPR_8RegClassID:
456 case ARM::FPWithVPRRegClassID:
463 if (OI < MID.getNumDefs())
464 Info[
MI].ProducesQP =
true;
466 Info[
MI].ConsumesQP =
true;
469 if (OI < MID.getNumDefs())
470 Info[
MI].ProducesDP =
true;
472 Info[
MI].ConsumesDP =
true;
475 if (OI < MID.getNumDefs())
476 Info[
MI].ProducesSP =
true;
478 Info[
MI].ConsumesSP =
true;
487 return MI->getDesc().hasImplicitUseOfPhysReg(ARM::CPSR);
496 PDep.setLatency(latency);
506 return (a & 0xe) != (b & 0xe);
536 if (DstOpcode == ARM::BUNDLE &&
TII->isPredicated(*DstMI)) {
542 if (SrcOpcode == ARM::BUNDLE &&
TII->isPredicated(*SrcMI) &&
558 if (!SrcInst.mayStore() || !DstInst.mayLoad())
562 auto DstMO = *DstInst.memoperands().begin();
563 auto SrcVal = SrcMO->getValue();
564 auto DstVal = DstMO->getValue();
565 auto SrcPseudoVal = SrcMO->getPseudoValue();
566 auto DstPseudoVal = DstMO->getPseudoValue();
568 SrcMO->getOffset() == DstMO->getOffset()) {
571 }
else if (SrcPseudoVal && DstPseudoVal &&
572 SrcPseudoVal->kind() == DstPseudoVal->kind() &&
587std::unique_ptr<InstructionInformation>
II;
589class CortexM7InstructionInformation :
public InstructionInformation {
592 : InstructionInformation(
TII) {}
595class CortexM7Overrides :
public ARMOverrideBypasses {
597 CortexM7Overrides(
const ARMBaseInstrInfo *
TII, AAResults *AA)
598 : ARMOverrideBypasses(
TII, AA) {
600 II.reset(
new CortexM7InstructionInformation(
TII));
603 void modifyBypasses(SUnit &)
override;
606void CortexM7Overrides::modifyBypasses(SUnit &ISU) {
607 const MachineInstr *SrcMI = ISU.getInstr();
608 unsigned SrcOpcode = SrcMI->getOpcode();
609 bool isNSWload =
II->isNonSubwordLoad(SrcOpcode);
612 for (SDep &Dep : ISU.Succs) {
616 if (zeroOutputDependences(ISU, Dep))
619 if (memoryRAWHazard(ISU, Dep, 4))
623 if (Dep.getKind() != SDep::Data)
626 SUnit &DepSU = *Dep.getSUnit();
627 if (DepSU.isBoundaryNode())
630 if (makeBundleAssumptions(ISU, Dep) == 1)
633 const MachineInstr *DstMI = DepSU.getInstr();
634 unsigned DstOpcode = DstMI->getOpcode();
641 if (isNSWload && (
II->isMultiply(DstOpcode) ||
II->isDivide(DstOpcode)))
642 setBidirLatencies(ISU, Dep, Dep.getLatency() + 1);
647 if (isNSWload &&
II->hasBRegAddr(DstOpcode) &&
648 DstMI->getOperand(2).getReg() == Dep.getReg())
649 setBidirLatencies(ISU, Dep, Dep.getLatency() + 1);
653 if (
II->isMultiply(SrcOpcode)) {
654 unsigned OpMask =
II->getAddressOpMask(DstOpcode) >> 1;
655 for (
unsigned i = 1; OpMask; ++i, OpMask >>= 1) {
656 if ((OpMask & 1) && DstMI->getOperand(i).isReg() &&
657 DstMI->getOperand(i).getReg() == Dep.getReg()) {
658 setBidirLatencies(ISU, Dep, 4);
666 if (
TII->isPredicated(*SrcMI) && Dep.isAssignedRegDep() &&
667 (SrcOpcode == ARM::BUNDLE ||
669 TII->getPredicate(*DstMI)))) {
672 if (
II->isInlineShiftALU(DstOpcode) && DstMI->getOperand(3).getImm() &&
673 DstMI->getOperand(1).getReg() == Dep.getReg())
675 Lat = std::min(3u, Dep.getLatency() + Lat);
676 setBidirLatencies(ISU, Dep, std::max(Dep.getLatency(), Lat));
683 if (Dep.isAssignedRegDep() && Dep.getReg() == ARM::CPSR &&
685 setBidirLatencies(ISU, Dep, 1);
690 if (
II->isRev(SrcOpcode)) {
691 if (
II->isInlineShiftALU(DstOpcode))
692 setBidirLatencies(ISU, Dep, 2);
693 else if (
II->isShift(DstOpcode))
694 setBidirLatencies(ISU, Dep, 1);
699class M85InstructionInformation :
public InstructionInformation {
701 M85InstructionInformation(
const ARMBaseInstrInfo *t)
702 : InstructionInformation(t) {
703 markDPProducersConsumers(t);
707class M85Overrides :
public ARMOverrideBypasses {
709 M85Overrides(
const ARMBaseInstrInfo *t, AAResults *a)
710 : ARMOverrideBypasses(t, a) {
712 II.reset(
new M85InstructionInformation(t));
715 void modifyBypasses(SUnit &)
override;
718 unsigned computeBypassStage(
const MCSchedClassDesc *SCD);
719 signed modifyMixedWidthFP(
const MachineInstr *SrcMI,
720 const MachineInstr *DstMI,
unsigned RegID,
721 const MCSchedClassDesc *SCD);
724unsigned M85Overrides::computeBypassStage(
const MCSchedClassDesc *SCDesc) {
725 auto SM = DAG->getSchedModel();
727 if (DefIdx < SCDesc->NumWriteLatencyEntries) {
729 const MCWriteLatencyEntry *WLEntry =
730 SM->getSubtargetInfo()->getWriteLatencyEntry(SCDesc, DefIdx);
731 unsigned Latency = WLEntry->Cycles >= 0 ? WLEntry->Cycles : 1000;
748signed M85Overrides::modifyMixedWidthFP(
const MachineInstr *SrcMI,
749 const MachineInstr *DstMI,
751 const MCSchedClassDesc *SCD) {
753 if (!
II->producesSP(SrcMI->getOpcode()) &&
754 !
II->producesDP(SrcMI->getOpcode()) &&
755 !
II->producesQP(SrcMI->getOpcode()))
758 if (Register::isVirtualRegister(RegID)) {
759 if (
II->producesSP(SrcMI->getOpcode()) &&
760 II->consumesDP(DstMI->getOpcode())) {
761 for (
auto &
OP : SrcMI->operands())
762 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() == RegID &&
763 OP.getSubReg() == ARM::ssub_1)
764 return 5 - computeBypassStage(SCD);
765 }
else if (
II->producesSP(SrcMI->getOpcode()) &&
766 II->consumesQP(DstMI->getOpcode())) {
767 for (
auto &
OP : SrcMI->operands())
768 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() == RegID &&
769 (
OP.getSubReg() == ARM::ssub_1 ||
OP.getSubReg() == ARM::ssub_3))
770 return 5 - computeBypassStage(SCD) -
771 ((
OP.getSubReg() == ARM::ssub_2 ||
772 OP.getSubReg() == ARM::ssub_3)
775 }
else if (
II->producesDP(SrcMI->getOpcode()) &&
776 II->consumesQP(DstMI->getOpcode())) {
777 for (
auto &
OP : SrcMI->operands())
778 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() == RegID &&
779 OP.getSubReg() == ARM::ssub_1)
781 }
else if (
II->producesDP(SrcMI->getOpcode()) &&
782 II->consumesSP(DstMI->getOpcode())) {
783 for (
auto &
OP : DstMI->operands())
784 if (
OP.isReg() &&
OP.isUse() &&
OP.getReg() == RegID &&
785 OP.getSubReg() == ARM::ssub_1)
786 return 5 - computeBypassStage(SCD);
787 }
else if (
II->producesQP(SrcMI->getOpcode()) &&
788 II->consumesSP(DstMI->getOpcode())) {
789 for (
auto &
OP : DstMI->operands())
790 if (
OP.isReg() &&
OP.isUse() &&
OP.getReg() == RegID &&
791 (
OP.getSubReg() == ARM::ssub_1 ||
OP.getSubReg() == ARM::ssub_3))
792 return 5 - computeBypassStage(SCD) +
793 ((
OP.getSubReg() == ARM::ssub_2 ||
794 OP.getSubReg() == ARM::ssub_3)
797 }
else if (
II->producesQP(SrcMI->getOpcode()) &&
798 II->consumesDP(DstMI->getOpcode())) {
799 for (
auto &
OP : DstMI->operands())
800 if (
OP.isReg() &&
OP.isUse() &&
OP.getReg() == RegID &&
801 OP.getSubReg() == ARM::ssub_1)
804 }
else if (Register::isPhysicalRegister(RegID)) {
811 if (
II->producesSP(SrcMI->getOpcode()) &&
812 II->consumesDP(DstMI->getOpcode())) {
813 for (
auto &
OP : SrcMI->operands())
814 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() >= ARM::S1 &&
815 OP.getReg() <= ARM::S31 && (
OP.getReg() - ARM::S0) % 2 &&
816 (
OP.getReg() == RegID ||
817 (
OP.getReg() - ARM::S0) / 2 + ARM::D0 == RegID ||
818 (
OP.getReg() - ARM::S0) / 4 + ARM::Q0 == RegID))
819 return 5 - computeBypassStage(SCD);
820 }
else if (
II->producesSP(SrcMI->getOpcode()) &&
821 II->consumesQP(DstMI->getOpcode())) {
822 for (
auto &
OP : SrcMI->operands())
823 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() >= ARM::S1 &&
824 OP.getReg() <= ARM::S31 && (
OP.getReg() - ARM::S0) % 2 &&
825 (
OP.getReg() == RegID ||
826 (
OP.getReg() - ARM::S0) / 2 + ARM::D0 == RegID ||
827 (
OP.getReg() - ARM::S0) / 4 + ARM::Q0 == RegID))
828 return 5 - computeBypassStage(SCD) -
829 (((
OP.getReg() - ARM::S0) / 2) % 2 ? 1 : 0);
830 }
else if (
II->producesDP(SrcMI->getOpcode()) &&
831 II->consumesQP(DstMI->getOpcode())) {
832 for (
auto &
OP : SrcMI->operands())
833 if (
OP.isReg() &&
OP.isDef() &&
OP.getReg() >= ARM::D0 &&
834 OP.getReg() <= ARM::D15 && (
OP.getReg() - ARM::D0) % 2 &&
835 (
OP.getReg() == RegID ||
836 (
OP.getReg() - ARM::D0) / 2 + ARM::Q0 == RegID))
838 }
else if (
II->producesDP(SrcMI->getOpcode()) &&
839 II->consumesSP(DstMI->getOpcode())) {
840 if (RegID >= ARM::S1 && RegID <= ARM::S31 && (RegID - ARM::S0) % 2)
841 return 5 - computeBypassStage(SCD);
842 }
else if (
II->producesQP(SrcMI->getOpcode()) &&
843 II->consumesSP(DstMI->getOpcode())) {
844 if (RegID >= ARM::S1 && RegID <= ARM::S31 && (RegID - ARM::S0) % 2)
845 return 5 - computeBypassStage(SCD) +
846 (((RegID - ARM::S0) / 2) % 2 ? 1 : 0);
847 }
else if (
II->producesQP(SrcMI->getOpcode()) &&
848 II->consumesDP(DstMI->getOpcode())) {
849 if (RegID >= ARM::D1 && RegID <= ARM::D15 && (RegID - ARM::D0) % 2)
856void M85Overrides::modifyBypasses(SUnit &ISU) {
857 const MachineInstr *SrcMI = ISU.getInstr();
858 unsigned SrcOpcode = SrcMI->getOpcode();
859 bool isNSWload =
II->isNonSubwordLoad(SrcOpcode);
862 for (SDep &Dep : ISU.Succs) {
866 if (zeroOutputDependences(ISU, Dep))
869 if (memoryRAWHazard(ISU, Dep, 3))
873 if (Dep.getKind() != SDep::Data)
876 SUnit &DepSU = *Dep.getSUnit();
877 if (DepSU.isBoundaryNode())
880 if (makeBundleAssumptions(ISU, Dep) == 1)
883 const MachineInstr *DstMI = DepSU.getInstr();
884 unsigned DstOpcode = DstMI->getOpcode();
890 if (isNSWload &&
II->hasBRegAddrShift(DstOpcode) &&
891 DstMI->getOperand(3).getImm() != 0 &&
892 DstMI->getOperand(2).getReg() == Dep.getReg())
893 setBidirLatencies(ISU, Dep, Dep.getLatency() + 1);
896 setBidirLatencies(ISU, Dep, Dep.getLatency() + 1);
899 if (
II->isMVEIntMAC(DstOpcode) &&
900 II->isMVEIntMACMatched(SrcOpcode, DstOpcode) &&
901 DstMI->getOperand(0).isReg() &&
902 DstMI->getOperand(0).getReg() == Dep.getReg())
903 setBidirLatencies(ISU, Dep, Dep.getLatency() - 1);
907 if (Dep.isAssignedRegDep() && Dep.getReg() == ARM::CPSR &&
909 setBidirLatencies(ISU, Dep, 0);
911 if (
signed ALat = modifyMixedWidthFP(SrcMI, DstMI, Dep.getReg(),
912 DAG->getSchedClass(&ISU)))
913 setBidirLatencies(ISU, Dep, std::max(0,
signed(Dep.getLatency()) + ALat));
915 if (
II->isRev(SrcOpcode)) {
916 if (
II->isInlineShiftALU(DstOpcode))
917 setBidirLatencies(ISU, Dep, 1);
918 else if (
II->isShift(DstOpcode))
919 setBidirLatencies(ISU, Dep, 1);
926class CortexM55Overrides :
public ARMOverrideBypasses {
928 CortexM55Overrides(
const ARMBaseInstrInfo *
TII, AAResults *AA)
929 : ARMOverrideBypasses(
TII, AA) {}
931 void modifyBypasses(SUnit &SU)
override {
932 MachineInstr *SrcMI = SU.getInstr();
933 if (!(SrcMI->getDesc().TSFlags & ARMII::HorizontalReduction))
936 for (SDep &Dep : SU.Succs) {
937 if (Dep.getKind() != SDep::Data)
939 SUnit &DepSU = *Dep.getSUnit();
940 if (DepSU.isBoundaryNode())
942 MachineInstr *DstMI = DepSU.getInstr();
945 setBidirLatencies(SU, Dep, 3);
960 modifyBypasses(DAGInstrs->
ExitSU);
963std::unique_ptr<ScheduleDAGMutation>
965 if (ST.isCortexM85())
966 return std::make_unique<M85Overrides>(ST.getInstrInfo(),
AA);
967 else if (ST.isCortexM7())
968 return std::make_unique<CortexM7Overrides>(ST.getInstrInfo(),
AA);
969 else if (ST.isCortexM55())
970 return std::make_unique<CortexM55Overrides>(ST.getInstrInfo(),
AA);
Function Alias Analysis false
const HexagonInstrInfo * TII
static constexpr unsigned SM(unsigned Version)
uint64_t IntrinsicInst * II
bool memoryRAWHazard(SUnit &ISU, SDep &Dep, unsigned latency)
static void setBidirLatencies(SUnit &SrcSU, SDep &SrcDep, unsigned latency)
static bool zeroOutputDependences(SUnit &ISU, SDep &Dep)
void apply(ScheduleDAGInstrs *DAGInstrs) override
unsigned makeBundleAssumptions(SUnit &ISU, SDep &Dep)
const ARMBaseInstrInfo * TII
@ MustAlias
The two locations precisely alias each other.
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
ArrayRef< MachineMemOperand * > memoperands() const
Access to memory operands of the instruction.
Kind getKind() const
Returns an enum value representing the kind of the dependence.
@ Output
A register output-dependence (aka WAW).
void setLatency(unsigned Lat)
Sets the latency for this edge.
bool isAssignedRegDep() const
Tests if this is a Data dependence that is associated with a register.
bool isNormalMemory() const
Tests if this is an Order dependence between two memory accesses where both sides of the dependence a...
Register getReg() const
Returns the register associated with this edge.
Scheduling unit. This is a node in the scheduling DAG.
LLVM_ABI void setHeightDirty()
Sets a flag in this node to indicate that its stored Height value will require recomputation the next...
bool isBoundaryNode() const
Boundary nodes are placeholders for the boundary of the scheduling region.
LLVM_ABI void setDepthDirty()
Sets a flag in this node to indicate that its stored Depth value will require recomputation the next ...
SmallVector< SDep, 4 > Preds
All sunit predecessors.
MachineInstr * getInstr() const
Returns the representative MachineInstr for this SUnit.
A ScheduleDAG for scheduling lists of MachineInstr.
std::vector< SUnit > SUnits
The scheduling units.
SUnit ExitSU
Special node for the region exit.
Abstract Attribute helper functions.
This is an optimization pass for GlobalISel generic memory operations.
std::unique_ptr< ScheduleDAGMutation > createARMLatencyMutations(const ARMSubtarget &ST, AAResults *AA)
bool isMVEVectorInstruction(const MachineInstr *MI)
static bool hasImplicitCPSRUse(const MachineInstr *MI)
DWARFExpression::Operation Op
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
static bool mismatchedPred(ARMCC::CondCodes a, ARMCC::CondCodes b)