51 #define GET_INSTRINFO_CTOR_DTOR
52 #include "AArch64GenInstrInfo.inc"
59 cl::desc(
"Restrict range of TB[N]Z instructions (DEBUG)"));
63 cl::desc(
"Restrict range of CB[N]Z instructions (DEBUG)"));
67 cl::desc(
"Restrict range of Bcc instructions (DEBUG)"));
71 RI(STI.getTargetTriple()), Subtarget(STI) {}
85 unsigned NumBytes = 0;
92 case TargetOpcode::DBG_VALUE:
94 case TargetOpcode::IMPLICIT_DEF:
98 case TargetOpcode::STACKMAP:
101 assert(NumBytes % 4 == 0 &&
"Invalid number of NOP bytes requested!");
103 case TargetOpcode::PATCHPOINT:
106 assert(NumBytes % 4 == 0 &&
"Invalid number of NOP bytes requested!");
170 int64_t BrOffset)
const {
172 assert(Bits >= 3 &&
"max branch displacement must be enough to jump"
173 "over conditional branch expansion");
174 return isIntN(Bits, BrOffset / 4);
203 bool AllowModify)
const {
209 if (!isUnpredicatedTerminator(*I))
216 unsigned LastOpc = LastInst->
getOpcode();
217 if (I == MBB.
begin() || !isUnpredicatedTerminator(*--I)) {
232 unsigned SecondLastOpc = SecondLastInst->
getOpcode();
239 LastInst = SecondLastInst;
241 if (I == MBB.
begin() || !isUnpredicatedTerminator(*--I)) {
246 SecondLastInst = &*
I;
247 SecondLastOpc = SecondLastInst->
getOpcode();
253 if (SecondLastInst && I != MBB.
begin() && isUnpredicatedTerminator(*--I))
269 I->eraseFromParent();
278 I->eraseFromParent();
288 if (Cond[0].getImm() != -1) {
294 switch (Cond[1].getImm()) {
298 Cond[1].setImm(AArch64::CBNZW);
301 Cond[1].setImm(AArch64::CBZW);
304 Cond[1].setImm(AArch64::CBNZX);
307 Cond[1].setImm(AArch64::CBZX);
310 Cond[1].setImm(AArch64::TBNZW);
313 Cond[1].setImm(AArch64::TBZW);
316 Cond[1].setImm(AArch64::TBNZX);
319 Cond[1].setImm(AArch64::TBZX);
328 int *BytesRemoved)
const {
338 I->eraseFromParent();
342 if (I == MBB.
begin()) {
355 I->eraseFromParent();
362 void AArch64InstrInfo::instantiateCondBranch(
365 if (Cond[0].getImm() != -1) {
374 MIB.
addImm(Cond[3].getImm());
384 int *BytesAdded)
const {
386 assert(TBB &&
"insertBranch must not be told to insert a fallthrough");
392 instantiateCondBranch(MBB, DL, TBB, Cond);
401 instantiateCondBranch(MBB, DL, TBB, Cond);
425 unsigned *NewVReg =
nullptr) {
430 bool Is64Bit = AArch64::GPR64allRegClass.hasSubClassEq(MRI.
getRegClass(VReg));
433 unsigned SrcOpNum = 0;
435 case AArch64::ADDSXri:
436 case AArch64::ADDSWri:
442 case AArch64::ADDXri:
443 case AArch64::ADDWri:
449 Opc = Is64Bit ? AArch64::CSINCXr : AArch64::CSINCWr;
452 case AArch64::ORNXrr:
453 case AArch64::ORNWrr: {
456 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
459 Opc = Is64Bit ? AArch64::CSINVXr : AArch64::CSINVWr;
463 case AArch64::SUBSXrr:
464 case AArch64::SUBSWrr:
470 case AArch64::SUBXrr:
471 case AArch64::SUBWrr: {
474 if (ZReg != AArch64::XZR && ZReg != AArch64::WZR)
477 Opc = Is64Bit ? AArch64::CSNEGXr : AArch64::CSNEGWr;
483 assert(Opc && SrcOpNum &&
"Missing parameters");
492 unsigned TrueReg,
unsigned FalseReg,
int &CondCycles,
int &TrueCycles,
493 int &FalseCycles)
const {
502 unsigned ExtraCondLat = Cond.
size() != 1;
506 if (AArch64::GPR64allRegClass.hasSubClassEq(RC) ||
507 AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
509 CondCycles = 1 + ExtraCondLat;
510 TrueCycles = FalseCycles = 1;
520 if (AArch64::FPR64RegClass.hasSubClassEq(RC) ||
521 AArch64::FPR32RegClass.hasSubClassEq(RC)) {
522 CondCycles = 5 + ExtraCondLat;
523 TrueCycles = FalseCycles = 2;
533 const DebugLoc &DL,
unsigned DstReg,
535 unsigned TrueReg,
unsigned FalseReg)
const {
540 switch (Cond.
size()) {
549 switch (Cond[1].getImm()) {
569 unsigned SrcReg = Cond[2].getReg();
573 BuildMI(MBB, I, DL,
get(AArch64::SUBSXri), AArch64::XZR)
579 BuildMI(MBB, I, DL,
get(AArch64::SUBSWri), AArch64::WZR)
588 switch (Cond[1].getImm()) {
601 if (Cond[1].getImm() == AArch64::TBZW || Cond[1].getImm() == AArch64::TBNZW)
602 BuildMI(MBB, I, DL,
get(AArch64::ANDSWri), AArch64::WZR)
607 BuildMI(MBB, I, DL,
get(AArch64::ANDSXri), AArch64::XZR)
617 bool TryFold =
false;
619 RC = &AArch64::GPR64RegClass;
620 Opc = AArch64::CSELXr;
623 RC = &AArch64::GPR32RegClass;
624 Opc = AArch64::CSELWr;
627 RC = &AArch64::FPR64RegClass;
628 Opc = AArch64::FCSELDrrr;
630 RC = &AArch64::FPR32RegClass;
631 Opc = AArch64::FCSELSrrr;
633 assert(RC &&
"Unsupported regclass");
637 unsigned NewVReg = 0;
668 uint64_t UImm = Imm << (64 - BitSize) >> (64 - BitSize);
686 case AArch64::ADDWri:
687 case AArch64::ADDXri:
688 case AArch64::SUBWri:
689 case AArch64::SUBXri:
694 case AArch64::ADDWrs:
695 case AArch64::ADDXrs:
696 case AArch64::SUBWrs:
697 case AArch64::SUBXrs:
703 case AArch64::ANDWri:
704 case AArch64::ANDXri:
705 case AArch64::EORWri:
706 case AArch64::EORXri:
707 case AArch64::ORRWri:
708 case AArch64::ORRXri:
712 case AArch64::ANDWrr:
713 case AArch64::ANDXrr:
714 case AArch64::BICWrr:
715 case AArch64::BICXrr:
716 case AArch64::EONWrr:
717 case AArch64::EONXrr:
718 case AArch64::EORWrr:
719 case AArch64::EORXrr:
720 case AArch64::ORNWrr:
721 case AArch64::ORNXrr:
722 case AArch64::ORRWrr:
723 case AArch64::ORRXrr:
727 case AArch64::ANDWrs:
728 case AArch64::ANDXrs:
729 case AArch64::BICWrs:
730 case AArch64::BICXrs:
731 case AArch64::EONWrs:
732 case AArch64::EONXrs:
733 case AArch64::EORWrs:
734 case AArch64::EORXrs:
735 case AArch64::ORNWrs:
736 case AArch64::ORNXrs:
737 case AArch64::ORRWrs:
738 case AArch64::ORRXrs:
746 case AArch64::MOVi32imm:
748 case AArch64::MOVi64imm:
753 case AArch64::FMOVS0:
754 case AArch64::FMOVD0:
756 case TargetOpcode::COPY:
766 unsigned &SrcReg,
unsigned &DstReg,
767 unsigned &SubIdx)
const {
771 case AArch64::SBFMXri:
772 case AArch64::UBFMXri:
780 SubIdx = AArch64::sub_32;
788 unsigned BaseRegA = 0, BaseRegB = 0;
789 int64_t OffsetA = 0, OffsetB = 0;
790 unsigned WidthA = 0, WidthB = 0;
806 if (BaseRegA == BaseRegB) {
807 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
808 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
809 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
810 if (LowOffset + LowWidth <= HighOffset)
821 unsigned &SrcReg2,
int &CmpMask,
822 int &CmpValue)
const {
826 case AArch64::SUBSWrr:
827 case AArch64::SUBSWrs:
828 case AArch64::SUBSWrx:
829 case AArch64::SUBSXrr:
830 case AArch64::SUBSXrs:
831 case AArch64::SUBSXrx:
832 case AArch64::ADDSWrr:
833 case AArch64::ADDSWrs:
834 case AArch64::ADDSWrx:
835 case AArch64::ADDSXrr:
836 case AArch64::ADDSXrs:
837 case AArch64::ADDSXrx:
844 case AArch64::SUBSWri:
845 case AArch64::ADDSWri:
846 case AArch64::SUBSXri:
847 case AArch64::ADDSXri:
854 case AArch64::ANDSWri:
855 case AArch64::ANDSXri:
868 MI.
getOpcode() == AArch64::ANDSWri ? 32 : 64) != 0;
877 assert(MBB &&
"Can't get MachineBasicBlock here");
879 assert(MF &&
"Can't get MachineFunction here");
884 for (
unsigned OpIdx = 0, EndIdx = Instr.
getNumOperands(); OpIdx < EndIdx;
899 "Operand has register constraints without being a register!");
919 bool MIDefinesZeroReg =
false;
921 MIDefinesZeroReg =
true;
926 case AArch64::ADDSWrr:
927 return AArch64::ADDWrr;
928 case AArch64::ADDSWri:
929 return MIDefinesZeroReg ? AArch64::ADDSWri : AArch64::ADDWri;
930 case AArch64::ADDSWrs:
931 return MIDefinesZeroReg ? AArch64::ADDSWrs : AArch64::ADDWrs;
932 case AArch64::ADDSWrx:
933 return AArch64::ADDWrx;
934 case AArch64::ADDSXrr:
935 return AArch64::ADDXrr;
936 case AArch64::ADDSXri:
937 return MIDefinesZeroReg ? AArch64::ADDSXri : AArch64::ADDXri;
938 case AArch64::ADDSXrs:
939 return MIDefinesZeroReg ? AArch64::ADDSXrs : AArch64::ADDXrs;
940 case AArch64::ADDSXrx:
941 return AArch64::ADDXrx;
942 case AArch64::SUBSWrr:
943 return AArch64::SUBWrr;
944 case AArch64::SUBSWri:
945 return MIDefinesZeroReg ? AArch64::SUBSWri : AArch64::SUBWri;
946 case AArch64::SUBSWrs:
947 return MIDefinesZeroReg ? AArch64::SUBSWrs : AArch64::SUBWrs;
948 case AArch64::SUBSWrx:
949 return AArch64::SUBWrx;
950 case AArch64::SUBSXrr:
951 return AArch64::SUBXrr;
952 case AArch64::SUBSXri:
953 return MIDefinesZeroReg ? AArch64::SUBSXri : AArch64::SUBXri;
954 case AArch64::SUBSXrs:
955 return MIDefinesZeroReg ? AArch64::SUBSXrs : AArch64::SUBXrs;
956 case AArch64::SUBSXrx:
957 return AArch64::SUBXrx;
976 if (To == To->getParent()->begin())
981 if (To->getParent() != From->getParent())
987 return MI.getIterator() == From;
988 }) != To->getParent()->rend());
991 for (--To; To != From; --To) {
1011 MachineInstr &CmpInstr,
unsigned SrcReg,
unsigned SrcReg2,
int CmpMask,
1018 if (DeadNZCVIdx != -1) {
1033 assert(succeeded &&
"Some operands reg class are incompatible!");
1040 assert((CmpValue == 0 || CmpValue == 1) &&
"CmpValue must be 0 or 1!");
1041 if (CmpValue != 0 || SrcReg2 != 0)
1048 return substituteCmpToZero(CmpInstr, SrcReg, MRI);
1058 return AArch64::INSTRUCTION_LIST_END;
1060 case AArch64::ADDSWrr:
1061 case AArch64::ADDSWri:
1062 case AArch64::ADDSXrr:
1063 case AArch64::ADDSXri:
1064 case AArch64::SUBSWrr:
1065 case AArch64::SUBSWri:
1066 case AArch64::SUBSXrr:
1067 case AArch64::SUBSXri:
1070 case AArch64::ADDWrr:
return AArch64::ADDSWrr;
1071 case AArch64::ADDWri:
return AArch64::ADDSWri;
1072 case AArch64::ADDXrr:
return AArch64::ADDSXrr;
1073 case AArch64::ADDXri:
return AArch64::ADDSXri;
1074 case AArch64::ADCWr:
return AArch64::ADCSWr;
1075 case AArch64::ADCXr:
return AArch64::ADCSXr;
1076 case AArch64::SUBWrr:
return AArch64::SUBSWrr;
1077 case AArch64::SUBWri:
return AArch64::SUBSWri;
1078 case AArch64::SUBXrr:
return AArch64::SUBSXrr;
1079 case AArch64::SUBXri:
return AArch64::SUBSXri;
1080 case AArch64::SBCWr:
return AArch64::SBCSWr;
1081 case AArch64::SBCXr:
return AArch64::SBCSXr;
1082 case AArch64::ANDWri:
return AArch64::ANDSWri;
1083 case AArch64::ANDXri:
return AArch64::ANDSXri;
1090 if (BB->isLiveIn(AArch64::NZCV))
1103 UsedNZCV() =
default;
1105 UsedNZCV&
operator |=(
const UsedNZCV& UsedFlags) {
1106 this->
N |= UsedFlags.N;
1107 this->Z |= UsedFlags.Z;
1108 this->
C |= UsedFlags.C;
1109 this->V |= UsedFlags.V;
1124 case AArch64::Bcc: {
1130 case AArch64::CSINVWr:
1131 case AArch64::CSINVXr:
1132 case AArch64::CSINCWr:
1133 case AArch64::CSINCXr:
1134 case AArch64::CSELWr:
1135 case AArch64::CSELXr:
1136 case AArch64::CSNEGWr:
1137 case AArch64::CSNEGXr:
1138 case AArch64::FCSELSrrr:
1139 case AArch64::FCSELDrrr: {
1190 return Opcode == AArch64::ADDSWri || Opcode == AArch64::ADDSXri;
1194 return Opcode == AArch64::SUBSWri || Opcode == AArch64::SUBSXri;
1211 assert(
sForm(*MI) != AArch64::INSTRUCTION_LIST_END);
1214 const unsigned CmpOpcode = CmpInstr->
getOpcode();
1230 UsedNZCV NZCVUsedAfterCmp;
1245 return !NZCVUsedAfterCmp.C && !NZCVUsedAfterCmp.V;
1252 bool AArch64InstrInfo::substituteCmpToZero(
1263 unsigned NewOpc =
sForm(*MI);
1264 if (NewOpc == AArch64::INSTRUCTION_LIST_END)
1275 assert(succeeded &&
"Some operands reg class are incompatible!");
1281 if (MI.
getOpcode() != TargetOpcode::LOAD_STACK_GUARD)
1296 BuildMI(MBB, MI, DL,
get(AArch64::LDRXui), Reg)
1301 BuildMI(MBB, MI, DL,
get(AArch64::MOVZXi), Reg)
1303 BuildMI(MBB, MI, DL,
get(AArch64::MOVKXi), Reg)
1306 BuildMI(MBB, MI, DL,
get(AArch64::MOVKXi), Reg)
1309 BuildMI(MBB, MI, DL,
get(AArch64::MOVKXi), Reg)
1312 BuildMI(MBB, MI, DL,
get(AArch64::LDRXui), Reg)
1320 BuildMI(MBB, MI, DL,
get(AArch64::LDRXui), Reg)
1336 case AArch64::ADDSWrs:
1337 case AArch64::ADDSXrs:
1338 case AArch64::ADDWrs:
1339 case AArch64::ADDXrs:
1340 case AArch64::ANDSWrs:
1341 case AArch64::ANDSXrs:
1342 case AArch64::ANDWrs:
1343 case AArch64::ANDXrs:
1344 case AArch64::BICSWrs:
1345 case AArch64::BICSXrs:
1346 case AArch64::BICWrs:
1347 case AArch64::BICXrs:
1348 case AArch64::CRC32Brr:
1349 case AArch64::CRC32CBrr:
1350 case AArch64::CRC32CHrr:
1351 case AArch64::CRC32CWrr:
1352 case AArch64::CRC32CXrr:
1353 case AArch64::CRC32Hrr:
1354 case AArch64::CRC32Wrr:
1355 case AArch64::CRC32Xrr:
1356 case AArch64::EONWrs:
1357 case AArch64::EONXrs:
1358 case AArch64::EORWrs:
1359 case AArch64::EORXrs:
1360 case AArch64::ORNWrs:
1361 case AArch64::ORNXrs:
1362 case AArch64::ORRWrs:
1363 case AArch64::ORRXrs:
1364 case AArch64::SUBSWrs:
1365 case AArch64::SUBSXrs:
1366 case AArch64::SUBWrs:
1367 case AArch64::SUBXrs:
1382 case AArch64::ADDSWrx:
1383 case AArch64::ADDSXrx:
1384 case AArch64::ADDSXrx64:
1385 case AArch64::ADDWrx:
1386 case AArch64::ADDXrx:
1387 case AArch64::ADDXrx64:
1388 case AArch64::SUBSWrx:
1389 case AArch64::SUBSXrx:
1390 case AArch64::SUBSXrx64:
1391 case AArch64::SUBWrx:
1392 case AArch64::SUBXrx:
1393 case AArch64::SUBXrx64:
1410 case AArch64::MOVZWi:
1411 case AArch64::MOVZXi:
1418 case AArch64::ANDWri:
1420 case AArch64::ANDXri:
1422 case TargetOpcode::COPY:
1434 case TargetOpcode::COPY: {
1437 return (AArch64::GPR32RegClass.
contains(DstReg) ||
1438 AArch64::GPR64RegClass.
contains(DstReg));
1440 case AArch64::ORRXrs:
1447 case AArch64::ADDXri:
1464 case TargetOpcode::COPY: {
1467 return (AArch64::FPR64RegClass.
contains(DstReg) ||
1468 AArch64::FPR128RegClass.
contains(DstReg));
1470 case AArch64::ORRv16i8:
1473 "invalid ORRv16i8 operands");
1486 case AArch64::LDRWui:
1487 case AArch64::LDRXui:
1488 case AArch64::LDRBui:
1489 case AArch64::LDRHui:
1490 case AArch64::LDRSui:
1491 case AArch64::LDRDui:
1492 case AArch64::LDRQui:
1509 case AArch64::STRWui:
1510 case AArch64::STRXui:
1511 case AArch64::STRBui:
1512 case AArch64::STRHui:
1513 case AArch64::STRSui:
1514 case AArch64::STRDui:
1515 case AArch64::STRQui:
1533 case AArch64::LDRBBroW:
1534 case AArch64::LDRBroW:
1535 case AArch64::LDRDroW:
1536 case AArch64::LDRHHroW:
1537 case AArch64::LDRHroW:
1538 case AArch64::LDRQroW:
1539 case AArch64::LDRSBWroW:
1540 case AArch64::LDRSBXroW:
1541 case AArch64::LDRSHWroW:
1542 case AArch64::LDRSHXroW:
1543 case AArch64::LDRSWroW:
1544 case AArch64::LDRSroW:
1545 case AArch64::LDRWroW:
1546 case AArch64::LDRXroW:
1547 case AArch64::STRBBroW:
1548 case AArch64::STRBroW:
1549 case AArch64::STRDroW:
1550 case AArch64::STRHHroW:
1551 case AArch64::STRHroW:
1552 case AArch64::STRQroW:
1553 case AArch64::STRSroW:
1554 case AArch64::STRWroW:
1555 case AArch64::STRXroW:
1556 case AArch64::LDRBBroX:
1557 case AArch64::LDRBroX:
1558 case AArch64::LDRDroX:
1559 case AArch64::LDRHHroX:
1560 case AArch64::LDRHroX:
1561 case AArch64::LDRQroX:
1562 case AArch64::LDRSBWroX:
1563 case AArch64::LDRSBXroX:
1564 case AArch64::LDRSHWroX:
1565 case AArch64::LDRSHXroX:
1566 case AArch64::LDRSWroX:
1567 case AArch64::LDRSroX:
1568 case AArch64::LDRWroX:
1569 case AArch64::LDRXroX:
1570 case AArch64::STRBBroX:
1571 case AArch64::STRBroX:
1572 case AArch64::STRDroX:
1573 case AArch64::STRHHroX:
1574 case AArch64::STRHroX:
1575 case AArch64::STRQroX:
1576 case AArch64::STRSroX:
1577 case AArch64::STRWroX:
1578 case AArch64::STRXroX:
1605 case AArch64::STURSi:
1606 case AArch64::STURDi:
1607 case AArch64::STURQi:
1608 case AArch64::STURBBi:
1609 case AArch64::STURHHi:
1610 case AArch64::STURWi:
1611 case AArch64::STURXi:
1612 case AArch64::LDURSi:
1613 case AArch64::LDURDi:
1614 case AArch64::LDURQi:
1615 case AArch64::LDURWi:
1616 case AArch64::LDURXi:
1617 case AArch64::LDURSWi:
1618 case AArch64::LDURHHi:
1619 case AArch64::LDURBBi:
1620 case AArch64::LDURSBWi:
1621 case AArch64::LDURSHWi:
1659 case AArch64::LDURQi:
1660 case AArch64::STURQi:
1661 case AArch64::LDRQui:
1662 case AArch64::STRQui:
1700 case AArch64::LDURQi:
1701 case AArch64::STURQi:
1705 case AArch64::LDURXi:
1706 case AArch64::LDURDi:
1707 case AArch64::STURXi:
1708 case AArch64::STURDi:
1712 case AArch64::LDURWi:
1713 case AArch64::LDURSi:
1714 case AArch64::LDURSWi:
1715 case AArch64::STURWi:
1716 case AArch64::STURSi:
1720 case AArch64::LDURHi:
1721 case AArch64::LDURHHi:
1722 case AArch64::LDURSHXi:
1723 case AArch64::LDURSHWi:
1724 case AArch64::STURHi:
1725 case AArch64::STURHHi:
1729 case AArch64::LDURBi:
1730 case AArch64::LDURBBi:
1731 case AArch64::LDURSBXi:
1732 case AArch64::LDURSBWi:
1733 case AArch64::STURBi:
1734 case AArch64::STURBBi:
1738 case AArch64::LDPQi:
1739 case AArch64::LDNPQi:
1740 case AArch64::STPQi:
1741 case AArch64::STNPQi:
1745 case AArch64::LDRQui:
1746 case AArch64::STRQui:
1749 case AArch64::LDPXi:
1750 case AArch64::LDPDi:
1751 case AArch64::LDNPXi:
1752 case AArch64::LDNPDi:
1753 case AArch64::STPXi:
1754 case AArch64::STPDi:
1755 case AArch64::STNPXi:
1756 case AArch64::STNPDi:
1760 case AArch64::LDRXui:
1761 case AArch64::LDRDui:
1762 case AArch64::STRXui:
1763 case AArch64::STRDui:
1766 case AArch64::LDPWi:
1767 case AArch64::LDPSi:
1768 case AArch64::LDNPWi:
1769 case AArch64::LDNPSi:
1770 case AArch64::STPWi:
1771 case AArch64::STPSi:
1772 case AArch64::STNPWi:
1773 case AArch64::STNPSi:
1777 case AArch64::LDRWui:
1778 case AArch64::LDRSui:
1779 case AArch64::LDRSWui:
1780 case AArch64::STRWui:
1781 case AArch64::STRSui:
1784 case AArch64::LDRHui:
1785 case AArch64::LDRHHui:
1786 case AArch64::STRHui:
1787 case AArch64::STRHHui:
1790 case AArch64::LDRBui:
1791 case AArch64::LDRBBui:
1792 case AArch64::STRBui:
1793 case AArch64::STRBBui:
1812 unsigned OffsetStride = 1;
1816 case AArch64::LDURQi:
1817 case AArch64::STURQi:
1820 case AArch64::LDURXi:
1821 case AArch64::LDURDi:
1822 case AArch64::STURXi:
1823 case AArch64::STURDi:
1826 case AArch64::LDURWi:
1827 case AArch64::LDURSi:
1828 case AArch64::LDURSWi:
1829 case AArch64::STURWi:
1830 case AArch64::STURSi:
1836 if (Offset % OffsetStride != 0)
1841 Offset /= OffsetStride;
1846 if (FirstOpc == SecondOpc)
1852 case AArch64::LDRWui:
1853 case AArch64::LDURWi:
1854 return SecondOpc == AArch64::LDRSWui || SecondOpc == AArch64::LDURSWi;
1855 case AArch64::LDRSWui:
1856 case AArch64::LDURSWi:
1857 return SecondOpc == AArch64::LDRWui || SecondOpc == AArch64::LDURWi;
1868 unsigned NumLoads)
const {
1877 unsigned FirstOpc = FirstLdSt.
getOpcode();
1878 unsigned SecondOpc = SecondLdSt.
getOpcode();
1898 if (Offset1 > 63 || Offset1 < -64)
1902 assert(Offset1 <= Offset2 &&
"Caller should have ordered offsets.");
1903 return Offset1 + 1 == Offset2;
1910 unsigned SecondOpcode = Second.
getOpcode();
1911 if (SecondOpcode == AArch64::Bcc) {
1915 case AArch64::ADDSWri:
1916 case AArch64::ADDSWrr:
1917 case AArch64::ADDSXri:
1918 case AArch64::ADDSXrr:
1919 case AArch64::ANDSWri:
1920 case AArch64::ANDSWrr:
1921 case AArch64::ANDSXri:
1922 case AArch64::ANDSXrr:
1923 case AArch64::SUBSWri:
1924 case AArch64::SUBSWrr:
1925 case AArch64::SUBSXri:
1926 case AArch64::SUBSXrr:
1927 case AArch64::BICSWrr:
1928 case AArch64::BICSXrr:
1930 case AArch64::ADDSWrs:
1931 case AArch64::ADDSXrs:
1932 case AArch64::ANDSWrs:
1933 case AArch64::ANDSXrs:
1934 case AArch64::SUBSWrs:
1935 case AArch64::SUBSXrs:
1936 case AArch64::BICSWrs:
1937 case AArch64::BICSXrs:
1945 unsigned SecondOpcode = Second.
getOpcode();
1946 if (SecondOpcode == AArch64::CBNZW || SecondOpcode == AArch64::CBNZX ||
1947 SecondOpcode == AArch64::CBZW || SecondOpcode == AArch64::CBZX) {
1951 case AArch64::ADDWri:
1952 case AArch64::ADDWrr:
1953 case AArch64::ADDXri:
1954 case AArch64::ADDXrr:
1955 case AArch64::ANDWri:
1956 case AArch64::ANDWrr:
1957 case AArch64::ANDXri:
1958 case AArch64::ANDXrr:
1959 case AArch64::EORWri:
1960 case AArch64::EORWrr:
1961 case AArch64::EORXri:
1962 case AArch64::EORXrr:
1963 case AArch64::ORRWri:
1964 case AArch64::ORRWrr:
1965 case AArch64::ORRXri:
1966 case AArch64::ORRXrr:
1967 case AArch64::SUBWri:
1968 case AArch64::SUBWrr:
1969 case AArch64::SUBXri:
1970 case AArch64::SUBXrr:
1972 case AArch64::ADDWrs:
1973 case AArch64::ADDXrs:
1974 case AArch64::ANDWrs:
1975 case AArch64::ANDXrs:
1976 case AArch64::SUBWrs:
1977 case AArch64::SUBXrs:
1978 case AArch64::BICWrs:
1979 case AArch64::BICXrs:
2001 unsigned Reg,
unsigned SubIdx,
2005 return MIB.
addReg(Reg, State);
2009 return MIB.
addReg(Reg, State, SubIdx);
2016 return ((DestReg - SrcReg) & 0x1f) < NumRegs;
2021 unsigned DestReg,
unsigned SrcReg,
bool KillSrc,
unsigned Opcode,
2024 "Unexpected register copy without NEON");
2026 uint16_t DestEncoding = TRI->getEncodingValue(DestReg);
2027 uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg);
2028 unsigned NumRegs = Indices.
size();
2030 int SubReg = 0,
End = NumRegs, Incr = 1;
2032 SubReg = NumRegs - 1;
2037 for (; SubReg !=
End; SubReg += Incr) {
2040 AddSubReg(MIB, SrcReg, Indices[SubReg], 0, TRI);
2047 const DebugLoc &DL,
unsigned DestReg,
2048 unsigned SrcReg,
bool KillSrc)
const {
2049 if (AArch64::GPR32spRegClass.
contains(DestReg) &&
2050 (AArch64::GPR32spRegClass.
contains(SrcReg) || SrcReg == AArch64::WZR)) {
2053 if (DestReg == AArch64::WSP || SrcReg == AArch64::WSP) {
2058 &AArch64::GPR64spRegClass);
2060 &AArch64::GPR64spRegClass);
2065 BuildMI(MBB, I, DL,
get(AArch64::ADDXri), DestRegX)
2071 BuildMI(MBB, I, DL,
get(AArch64::ADDWri), DestReg)
2083 &AArch64::GPR64spRegClass);
2085 &AArch64::GPR64spRegClass);
2090 BuildMI(MBB, I, DL,
get(AArch64::ORRXrr), DestRegX)
2096 BuildMI(MBB, I, DL,
get(AArch64::ORRWrr), DestReg)
2104 if (AArch64::GPR64spRegClass.
contains(DestReg) &&
2105 (AArch64::GPR64spRegClass.
contains(SrcReg) || SrcReg == AArch64::XZR)) {
2106 if (DestReg == AArch64::SP || SrcReg == AArch64::SP) {
2108 BuildMI(MBB, I, DL,
get(AArch64::ADDXri), DestReg)
2117 BuildMI(MBB, I, DL,
get(AArch64::ORRXrr), DestReg)
2125 if (AArch64::DDDDRegClass.
contains(DestReg) &&
2126 AArch64::DDDDRegClass.
contains(SrcReg)) {
2127 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2128 AArch64::dsub2, AArch64::dsub3 };
2135 if (AArch64::DDDRegClass.
contains(DestReg) &&
2136 AArch64::DDDRegClass.
contains(SrcReg)) {
2137 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1,
2145 if (AArch64::DDRegClass.
contains(DestReg) &&
2146 AArch64::DDRegClass.
contains(SrcReg)) {
2147 static const unsigned Indices[] = { AArch64::dsub0, AArch64::dsub1 };
2154 if (AArch64::QQQQRegClass.
contains(DestReg) &&
2155 AArch64::QQQQRegClass.
contains(SrcReg)) {
2156 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2157 AArch64::qsub2, AArch64::qsub3 };
2164 if (AArch64::QQQRegClass.
contains(DestReg) &&
2165 AArch64::QQQRegClass.
contains(SrcReg)) {
2166 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1,
2174 if (AArch64::QQRegClass.
contains(DestReg) &&
2175 AArch64::QQRegClass.
contains(SrcReg)) {
2176 static const unsigned Indices[] = { AArch64::qsub0, AArch64::qsub1 };
2182 if (AArch64::FPR128RegClass.
contains(DestReg) &&
2183 AArch64::FPR128RegClass.
contains(SrcReg)) {
2185 BuildMI(MBB, I, DL,
get(AArch64::ORRv16i8), DestReg)
2189 BuildMI(MBB, I, DL,
get(AArch64::STRQpre))
2194 BuildMI(MBB, I, DL,
get(AArch64::LDRQpre))
2203 if (AArch64::FPR64RegClass.
contains(DestReg) &&
2204 AArch64::FPR64RegClass.
contains(SrcReg)) {
2206 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::dsub,
2207 &AArch64::FPR128RegClass);
2208 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::dsub,
2209 &AArch64::FPR128RegClass);
2210 BuildMI(MBB, I, DL,
get(AArch64::ORRv16i8), DestReg)
2214 BuildMI(MBB, I, DL,
get(AArch64::FMOVDr), DestReg)
2220 if (AArch64::FPR32RegClass.
contains(DestReg) &&
2221 AArch64::FPR32RegClass.
contains(SrcReg)) {
2223 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::ssub,
2224 &AArch64::FPR128RegClass);
2225 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::ssub,
2226 &AArch64::FPR128RegClass);
2227 BuildMI(MBB, I, DL,
get(AArch64::ORRv16i8), DestReg)
2231 BuildMI(MBB, I, DL,
get(AArch64::FMOVSr), DestReg)
2237 if (AArch64::FPR16RegClass.
contains(DestReg) &&
2238 AArch64::FPR16RegClass.
contains(SrcReg)) {
2240 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2241 &AArch64::FPR128RegClass);
2242 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2243 &AArch64::FPR128RegClass);
2244 BuildMI(MBB, I, DL,
get(AArch64::ORRv16i8), DestReg)
2248 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::hsub,
2249 &AArch64::FPR32RegClass);
2250 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::hsub,
2251 &AArch64::FPR32RegClass);
2252 BuildMI(MBB, I, DL,
get(AArch64::FMOVSr), DestReg)
2258 if (AArch64::FPR8RegClass.
contains(DestReg) &&
2259 AArch64::FPR8RegClass.
contains(SrcReg)) {
2261 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2262 &AArch64::FPR128RegClass);
2263 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2264 &AArch64::FPR128RegClass);
2265 BuildMI(MBB, I, DL,
get(AArch64::ORRv16i8), DestReg)
2269 DestReg = RI.getMatchingSuperReg(DestReg, AArch64::bsub,
2270 &AArch64::FPR32RegClass);
2271 SrcReg = RI.getMatchingSuperReg(SrcReg, AArch64::bsub,
2272 &AArch64::FPR32RegClass);
2273 BuildMI(MBB, I, DL,
get(AArch64::FMOVSr), DestReg)
2280 if (AArch64::FPR64RegClass.
contains(DestReg) &&
2281 AArch64::GPR64RegClass.
contains(SrcReg)) {
2282 BuildMI(MBB, I, DL,
get(AArch64::FMOVXDr), DestReg)
2286 if (AArch64::GPR64RegClass.
contains(DestReg) &&
2287 AArch64::FPR64RegClass.
contains(SrcReg)) {
2288 BuildMI(MBB, I, DL,
get(AArch64::FMOVDXr), DestReg)
2293 if (AArch64::FPR32RegClass.
contains(DestReg) &&
2294 AArch64::GPR32RegClass.
contains(SrcReg)) {
2295 BuildMI(MBB, I, DL,
get(AArch64::FMOVWSr), DestReg)
2299 if (AArch64::GPR32RegClass.
contains(DestReg) &&
2300 AArch64::FPR32RegClass.
contains(SrcReg)) {
2301 BuildMI(MBB, I, DL,
get(AArch64::FMOVSWr), DestReg)
2306 if (DestReg == AArch64::NZCV) {
2307 assert(AArch64::GPR64RegClass.
contains(SrcReg) &&
"Invalid NZCV copy");
2308 BuildMI(MBB, I, DL,
get(AArch64::MSR))
2309 .
addImm(AArch64SysReg::NZCV)
2315 if (SrcReg == AArch64::NZCV) {
2316 assert(AArch64::GPR64RegClass.
contains(DestReg) &&
"Invalid NZCV copy");
2317 BuildMI(MBB, I, DL,
get(AArch64::MRS), DestReg)
2318 .
addImm(AArch64SysReg::NZCV)
2331 if (MBBI != MBB.
end())
2332 DL = MBBI->getDebugLoc();
2344 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2345 Opc = AArch64::STRBui;
2348 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2349 Opc = AArch64::STRHui;
2352 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2353 Opc = AArch64::STRWui;
2357 assert(SrcReg != AArch64::WSP);
2358 }
else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2359 Opc = AArch64::STRSui;
2362 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2363 Opc = AArch64::STRXui;
2367 assert(SrcReg != AArch64::SP);
2368 }
else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2369 Opc = AArch64::STRDui;
2372 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2373 Opc = AArch64::STRQui;
2374 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2376 "Unexpected register store without NEON");
2377 Opc = AArch64::ST1Twov1d;
2382 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2384 "Unexpected register store without NEON");
2385 Opc = AArch64::ST1Threev1d;
2390 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2392 "Unexpected register store without NEON");
2393 Opc = AArch64::ST1Fourv1d;
2395 }
else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2397 "Unexpected register store without NEON");
2398 Opc = AArch64::ST1Twov2d;
2403 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2405 "Unexpected register store without NEON");
2406 Opc = AArch64::ST1Threev2d;
2411 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2413 "Unexpected register store without NEON");
2414 Opc = AArch64::ST1Fourv2d;
2419 assert(Opc &&
"Unknown register class");
2435 if (MBBI != MBB.
end())
2436 DL = MBBI->getDebugLoc();
2448 if (AArch64::FPR8RegClass.hasSubClassEq(RC))
2449 Opc = AArch64::LDRBui;
2452 if (AArch64::FPR16RegClass.hasSubClassEq(RC))
2453 Opc = AArch64::LDRHui;
2456 if (AArch64::GPR32allRegClass.hasSubClassEq(RC)) {
2457 Opc = AArch64::LDRWui;
2461 assert(DestReg != AArch64::WSP);
2462 }
else if (AArch64::FPR32RegClass.hasSubClassEq(RC))
2463 Opc = AArch64::LDRSui;
2466 if (AArch64::GPR64allRegClass.hasSubClassEq(RC)) {
2467 Opc = AArch64::LDRXui;
2471 assert(DestReg != AArch64::SP);
2472 }
else if (AArch64::FPR64RegClass.hasSubClassEq(RC))
2473 Opc = AArch64::LDRDui;
2476 if (AArch64::FPR128RegClass.hasSubClassEq(RC))
2477 Opc = AArch64::LDRQui;
2478 else if (AArch64::DDRegClass.hasSubClassEq(RC)) {
2480 "Unexpected register load without NEON");
2481 Opc = AArch64::LD1Twov1d;
2486 if (AArch64::DDDRegClass.hasSubClassEq(RC)) {
2488 "Unexpected register load without NEON");
2489 Opc = AArch64::LD1Threev1d;
2494 if (AArch64::DDDDRegClass.hasSubClassEq(RC)) {
2496 "Unexpected register load without NEON");
2497 Opc = AArch64::LD1Fourv1d;
2499 }
else if (AArch64::QQRegClass.hasSubClassEq(RC)) {
2501 "Unexpected register load without NEON");
2502 Opc = AArch64::LD1Twov2d;
2507 if (AArch64::QQQRegClass.hasSubClassEq(RC)) {
2509 "Unexpected register load without NEON");
2510 Opc = AArch64::LD1Threev2d;
2515 if (AArch64::QQQQRegClass.hasSubClassEq(RC)) {
2517 "Unexpected register load without NEON");
2518 Opc = AArch64::LD1Fourv2d;
2523 assert(Opc &&
"Unknown register class");
2535 unsigned DestReg,
unsigned SrcReg,
int Offset,
2538 if (DestReg == SrcReg && Offset == 0)
2541 assert((DestReg != AArch64::SP || Offset % 16 == 0) &&
2542 "SP increment/decrement not 16-byte aligned");
2544 bool isSub = Offset < 0;
2561 Opc = isSub ? AArch64::SUBSXri : AArch64::ADDSXri;
2563 Opc = isSub ? AArch64::SUBXri : AArch64::ADDXri;
2564 const unsigned MaxEncoding = 0xfff;
2565 const unsigned ShiftSize = 12;
2566 const unsigned MaxEncodableValue = MaxEncoding << ShiftSize;
2567 while (((
unsigned)Offset) >= (1 << ShiftSize)) {
2569 if (((
unsigned)Offset) > MaxEncodableValue) {
2570 ThisVal = MaxEncodableValue;
2572 ThisVal = Offset & MaxEncodableValue;
2574 assert((ThisVal >> ShiftSize) <= MaxEncoding &&
2575 "Encoding cannot handle value that big");
2576 BuildMI(MBB, MBBI, DL, TII->
get(Opc), DestReg)
2578 .
addImm(ThisVal >> ShiftSize)
2587 BuildMI(MBB, MBBI, DL, TII->
get(Opc), DestReg)
2614 if (SrcReg == AArch64::SP &&
2619 if (DstReg == AArch64::SP &&
2652 (Ops[0] == 0 || Ops[0] == 1)) {
2653 bool IsSpill = Ops[0] == 0;
2654 bool IsFill = !IsSpill;
2660 unsigned DstReg = DstMO.
getReg();
2661 unsigned SrcReg = SrcMO.
getReg();
2672 "Mismatched register size in non subreg COPY");
2679 return &*--InsertPt;
2691 if (IsSpill && DstMO.
isUndef() &&
2694 "Unexpected subreg on physical register");
2696 unsigned SpillSubreg;
2701 case AArch64::sub_32:
2703 if (AArch64::GPR32RegClass.
contains(SrcReg)) {
2704 SpillRC = &AArch64::GPR64RegClass;
2705 SpillSubreg = AArch64::sub_32;
2706 }
else if (AArch64::FPR32RegClass.
contains(SrcReg)) {
2707 SpillRC = &AArch64::FPR64RegClass;
2708 SpillSubreg = AArch64::ssub;
2713 if (AArch64::FPR64RegClass.
contains(SrcReg)) {
2714 SpillRC = &AArch64::FPR128RegClass;
2715 SpillSubreg = AArch64::dsub;
2722 if (
unsigned WidenedSrcReg =
2726 return &*--InsertPt;
2745 case AArch64::sub_32:
2746 FillRC = &AArch64::GPR32RegClass;
2749 FillRC = &AArch64::FPR32RegClass;
2752 FillRC = &AArch64::FPR64RegClass;
2758 "Mismatched regclass size on folded subreg COPY");
2775 bool *OutUseUnscaledOp,
2776 unsigned *OutUnscaledOp,
2777 int *EmittableOffset) {
2779 bool IsSigned =
false;
2781 unsigned ImmIdx = 2;
2782 unsigned UnscaledOp = 0;
2784 if (EmittableOffset)
2785 *EmittableOffset = 0;
2786 if (OutUseUnscaledOp)
2787 *OutUseUnscaledOp =
false;
2794 case AArch64::LD1Twov2d:
2795 case AArch64::LD1Threev2d:
2796 case AArch64::LD1Fourv2d:
2797 case AArch64::LD1Twov1d:
2798 case AArch64::LD1Threev1d:
2799 case AArch64::LD1Fourv1d:
2800 case AArch64::ST1Twov2d:
2801 case AArch64::ST1Threev2d:
2802 case AArch64::ST1Fourv2d:
2803 case AArch64::ST1Twov1d:
2804 case AArch64::ST1Threev1d:
2805 case AArch64::ST1Fourv1d:
2807 case AArch64::PRFMui:
2809 UnscaledOp = AArch64::PRFUMi;
2811 case AArch64::LDRXui:
2813 UnscaledOp = AArch64::LDURXi;
2815 case AArch64::LDRWui:
2817 UnscaledOp = AArch64::LDURWi;
2819 case AArch64::LDRBui:
2821 UnscaledOp = AArch64::LDURBi;
2823 case AArch64::LDRHui:
2825 UnscaledOp = AArch64::LDURHi;
2827 case AArch64::LDRSui:
2829 UnscaledOp = AArch64::LDURSi;
2831 case AArch64::LDRDui:
2833 UnscaledOp = AArch64::LDURDi;
2835 case AArch64::LDRQui:
2837 UnscaledOp = AArch64::LDURQi;
2839 case AArch64::LDRBBui:
2841 UnscaledOp = AArch64::LDURBBi;
2843 case AArch64::LDRHHui:
2845 UnscaledOp = AArch64::LDURHHi;
2847 case AArch64::LDRSBXui:
2849 UnscaledOp = AArch64::LDURSBXi;
2851 case AArch64::LDRSBWui:
2853 UnscaledOp = AArch64::LDURSBWi;
2855 case AArch64::LDRSHXui:
2857 UnscaledOp = AArch64::LDURSHXi;
2859 case AArch64::LDRSHWui:
2861 UnscaledOp = AArch64::LDURSHWi;
2863 case AArch64::LDRSWui:
2865 UnscaledOp = AArch64::LDURSWi;
2868 case AArch64::STRXui:
2870 UnscaledOp = AArch64::STURXi;
2872 case AArch64::STRWui:
2874 UnscaledOp = AArch64::STURWi;
2876 case AArch64::STRBui:
2878 UnscaledOp = AArch64::STURBi;
2880 case AArch64::STRHui:
2882 UnscaledOp = AArch64::STURHi;
2884 case AArch64::STRSui:
2886 UnscaledOp = AArch64::STURSi;
2888 case AArch64::STRDui:
2890 UnscaledOp = AArch64::STURDi;
2892 case AArch64::STRQui:
2894 UnscaledOp = AArch64::STURQi;
2896 case AArch64::STRBBui:
2898 UnscaledOp = AArch64::STURBBi;
2900 case AArch64::STRHHui:
2902 UnscaledOp = AArch64::STURHHi;
2905 case AArch64::LDPXi:
2906 case AArch64::LDPDi:
2907 case AArch64::STPXi:
2908 case AArch64::STPDi:
2909 case AArch64::LDNPXi:
2910 case AArch64::LDNPDi:
2911 case AArch64::STNPXi:
2912 case AArch64::STNPDi:
2917 case AArch64::LDPQi:
2918 case AArch64::STPQi:
2919 case AArch64::LDNPQi:
2920 case AArch64::STNPQi:
2925 case AArch64::LDPWi:
2926 case AArch64::LDPSi:
2927 case AArch64::STPWi:
2928 case AArch64::STPSi:
2929 case AArch64::LDNPWi:
2930 case AArch64::LDNPSi:
2931 case AArch64::STNPWi:
2932 case AArch64::STNPSi:
2938 case AArch64::LDURXi:
2939 case AArch64::LDURWi:
2940 case AArch64::LDURBi:
2941 case AArch64::LDURHi:
2942 case AArch64::LDURSi:
2943 case AArch64::LDURDi:
2944 case AArch64::LDURQi:
2945 case AArch64::LDURHHi:
2946 case AArch64::LDURBBi:
2947 case AArch64::LDURSBXi:
2948 case AArch64::LDURSBWi:
2949 case AArch64::LDURSHXi:
2950 case AArch64::LDURSHWi:
2951 case AArch64::LDURSWi:
2952 case AArch64::STURXi:
2953 case AArch64::STURWi:
2954 case AArch64::STURBi:
2955 case AArch64::STURHi:
2956 case AArch64::STURSi:
2957 case AArch64::STURDi:
2958 case AArch64::STURQi:
2959 case AArch64::STURBBi:
2960 case AArch64::STURHHi:
2967 bool useUnscaledOp =
false;
2971 if ((Offset & (Scale - 1)) != 0 || (Offset < 0 && UnscaledOp != 0))
2972 useUnscaledOp =
true;
2981 }
else if (UnscaledOp == 0 || useUnscaledOp) {
2992 int MaxOff = (1 << (MaskBits - IsSigned)) - 1;
2993 int MinOff = (IsSigned ? (-MaxOff - 1) : 0);
2994 if (Offset >= MinOff && Offset <= MaxOff) {
2995 if (EmittableOffset)
2996 *EmittableOffset =
Offset;
2999 int NewOff = Offset < 0 ? MinOff : MaxOff;
3000 if (EmittableOffset)
3001 *EmittableOffset = NewOff;
3002 Offset = (Offset - NewOff) * Scale;
3004 if (OutUseUnscaledOp)
3005 *OutUseUnscaledOp = useUnscaledOp;
3007 *OutUnscaledOp = UnscaledOp;
3013 unsigned FrameReg,
int &
Offset,
3016 unsigned ImmIdx = FrameRegIdx + 1;
3018 if (Opcode == AArch64::ADDSXri || Opcode == AArch64::ADDXri) {
3029 unsigned UnscaledOp;
3032 &UnscaledOp, &NewOffset);
3038 MI.
setDesc(TII->get(UnscaledOp));
3061 case AArch64::ADDSWrr:
3062 case AArch64::ADDSWri:
3063 case AArch64::ADDSXrr:
3064 case AArch64::ADDSXri:
3065 case AArch64::SUBSWrr:
3066 case AArch64::SUBSXrr:
3068 case AArch64::SUBSWri:
3069 case AArch64::SUBSXri:
3080 case AArch64::ADDWrr:
3081 case AArch64::ADDWri:
3082 case AArch64::SUBWrr:
3083 case AArch64::ADDSWrr:
3084 case AArch64::ADDSWri:
3085 case AArch64::SUBSWrr:
3087 case AArch64::SUBWri:
3088 case AArch64::SUBSWri:
3099 case AArch64::ADDXrr:
3100 case AArch64::ADDXri:
3101 case AArch64::SUBXrr:
3102 case AArch64::ADDSXrr:
3103 case AArch64::ADDSXri:
3104 case AArch64::SUBSXrr:
3106 case AArch64::SUBXri:
3107 case AArch64::SUBSXri:
3120 case AArch64::FADDSrr:
3121 case AArch64::FADDDrr:
3122 case AArch64::FADDv2f32:
3123 case AArch64::FADDv2f64:
3124 case AArch64::FADDv4f32:
3125 case AArch64::FSUBSrr:
3126 case AArch64::FSUBDrr:
3127 case AArch64::FSUBv2f32:
3128 case AArch64::FSUBv2f64:
3129 case AArch64::FSUBv4f32:
3146 unsigned CombineOpc,
unsigned ZeroReg = 0,
3147 bool CheckZeroReg =
false) {
3175 unsigned MulOpc,
unsigned ZeroReg) {
3176 return canCombine(MBB, MO, MulOpc, ZeroReg,
true);
3192 case AArch64::FADDDrr:
3193 case AArch64::FADDSrr:
3194 case AArch64::FADDv2f32:
3195 case AArch64::FADDv2f64:
3196 case AArch64::FADDv4f32:
3197 case AArch64::FMULDrr:
3198 case AArch64::FMULSrr:
3199 case AArch64::FMULX32:
3200 case AArch64::FMULX64:
3201 case AArch64::FMULXv2f32:
3202 case AArch64::FMULXv2f64:
3203 case AArch64::FMULXv4f32:
3204 case AArch64::FMULv2f32:
3205 case AArch64::FMULv2f64:
3206 case AArch64::FMULv4f32:
3238 case AArch64::ADDWrr:
3240 "ADDWrr does not have register operands");
3252 case AArch64::ADDXrr:
3264 case AArch64::SUBWrr:
3276 case AArch64::SUBXrr:
3288 case AArch64::ADDWri:
3295 case AArch64::ADDXri:
3302 case AArch64::SUBWri:
3309 case AArch64::SUBXri:
3333 assert(
false &&
"Unsupported FP instruction in combiner\n");
3335 case AArch64::FADDSrr:
3337 "FADDWrr does not have register operands");
3342 AArch64::FMULv1i32_indexed)) {
3350 AArch64::FMULv1i32_indexed)) {
3355 case AArch64::FADDDrr:
3360 AArch64::FMULv1i64_indexed)) {
3368 AArch64::FMULv1i64_indexed)) {
3373 case AArch64::FADDv2f32:
3375 AArch64::FMULv2i32_indexed)) {
3379 AArch64::FMULv2f32)) {
3384 AArch64::FMULv2i32_indexed)) {
3388 AArch64::FMULv2f32)) {
3393 case AArch64::FADDv2f64:
3395 AArch64::FMULv2i64_indexed)) {
3399 AArch64::FMULv2f64)) {
3404 AArch64::FMULv2i64_indexed)) {
3408 AArch64::FMULv2f64)) {
3413 case AArch64::FADDv4f32:
3415 AArch64::FMULv4i32_indexed)) {
3419 AArch64::FMULv4f32)) {
3424 AArch64::FMULv4i32_indexed)) {
3428 AArch64::FMULv4f32)) {
3434 case AArch64::FSUBSrr:
3443 AArch64::FMULv1i32_indexed)) {
3448 case AArch64::FSUBDrr:
3457 AArch64::FMULv1i64_indexed)) {
3462 case AArch64::FSUBv2f32:
3464 AArch64::FMULv2i32_indexed)) {
3468 AArch64::FMULv2f32)) {
3473 case AArch64::FSUBv2f64:
3475 AArch64::FMULv2i64_indexed)) {
3479 AArch64::FMULv2f64)) {
3484 case AArch64::FSUBv4f32:
3486 AArch64::FMULv4i32_indexed)) {
3490 AArch64::FMULv4f32)) {
3580 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3582 unsigned IdxOtherOpd = IdxMulOpd == 1 ? 2 : 1;
3619 assert(
false &&
"Invalid FMA instruction kind \n");
3643 unsigned IdxMulOpd,
unsigned MaddOpc,
3645 assert(IdxMulOpd == 1 || IdxMulOpd == 2);
3693 DelInstrs, InstrIdxForVirtReg);
3702 Opc = AArch64::MADDWrrr;
3703 RC = &AArch64::GPR32RegClass;
3705 Opc = AArch64::MADDXrrr;
3706 RC = &AArch64::GPR64RegClass;
3717 Opc = AArch64::MADDWrrr;
3718 RC = &AArch64::GPR32RegClass;
3720 Opc = AArch64::MADDXrrr;
3721 RC = &AArch64::GPR64RegClass;
3733 unsigned BitSize, OrrOpc, ZeroReg;
3735 OrrOpc = AArch64::ORRWri;
3736 OrrRC = &AArch64::GPR32spRegClass;
3738 ZeroReg = AArch64::WZR;
3739 Opc = AArch64::MADDWrrr;
3740 RC = &AArch64::GPR32RegClass;
3742 OrrOpc = AArch64::ORRXri;
3743 OrrRC = &AArch64::GPR64spRegClass;
3745 ZeroReg = AArch64::XZR;
3746 Opc = AArch64::MADDXrrr;
3747 RC = &AArch64::GPR64RegClass;
3764 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
3765 MUL =
genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3777 unsigned SubOpc, ZeroReg;
3779 SubOpc = AArch64::SUBWrr;
3780 SubRC = &AArch64::GPR32spRegClass;
3781 ZeroReg = AArch64::WZR;
3782 Opc = AArch64::MADDWrrr;
3783 RC = &AArch64::GPR32RegClass;
3785 SubOpc = AArch64::SUBXrr;
3786 SubRC = &AArch64::GPR64spRegClass;
3787 ZeroReg = AArch64::XZR;
3788 Opc = AArch64::MADDXrrr;
3789 RC = &AArch64::GPR64RegClass;
3798 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
3799 MUL =
genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3809 Opc = AArch64::MSUBWrrr;
3810 RC = &AArch64::GPR32RegClass;
3812 Opc = AArch64::MSUBXrrr;
3813 RC = &AArch64::GPR64RegClass;
3825 unsigned BitSize, OrrOpc, ZeroReg;
3827 OrrOpc = AArch64::ORRWri;
3828 OrrRC = &AArch64::GPR32spRegClass;
3830 ZeroReg = AArch64::WZR;
3831 Opc = AArch64::MADDWrrr;
3832 RC = &AArch64::GPR32RegClass;
3834 OrrOpc = AArch64::ORRXri;
3835 OrrRC = &AArch64::GPR64spRegClass;
3837 ZeroReg = AArch64::XZR;
3838 Opc = AArch64::MADDXrrr;
3839 RC = &AArch64::GPR64RegClass;
3855 InstrIdxForVirtReg.
insert(std::make_pair(NewVR, 0));
3856 MUL =
genMaddR(MF, MRI, TII, Root, InsInstrs, 1, Opc, NewVR, RC);
3868 Opc = AArch64::FMADDSrrr;
3869 RC = &AArch64::FPR32RegClass;
3871 Opc = AArch64::FMADDDrrr;
3872 RC = &AArch64::FPR64RegClass;
3883 Opc = AArch64::FMADDSrrr;
3884 RC = &AArch64::FPR32RegClass;
3886 Opc = AArch64::FMADDDrrr;
3887 RC = &AArch64::FPR64RegClass;
3893 Opc = AArch64::FMLAv1i32_indexed;
3894 RC = &AArch64::FPR32RegClass;
3899 Opc = AArch64::FMLAv1i32_indexed;
3900 RC = &AArch64::FPR32RegClass;
3906 Opc = AArch64::FMLAv1i64_indexed;
3907 RC = &AArch64::FPR64RegClass;
3912 Opc = AArch64::FMLAv1i64_indexed;
3913 RC = &AArch64::FPR64RegClass;
3920 RC = &AArch64::FPR64RegClass;
3922 Opc = AArch64::FMLAv2i32_indexed;
3926 Opc = AArch64::FMLAv2f32;
3933 RC = &AArch64::FPR64RegClass;
3935 Opc = AArch64::FMLAv2i32_indexed;
3939 Opc = AArch64::FMLAv2f32;
3947 RC = &AArch64::FPR128RegClass;
3949 Opc = AArch64::FMLAv2i64_indexed;
3953 Opc = AArch64::FMLAv2f64;
3960 RC = &AArch64::FPR128RegClass;
3962 Opc = AArch64::FMLAv2i64_indexed;
3966 Opc = AArch64::FMLAv2f64;
3974 RC = &AArch64::FPR128RegClass;
3976 Opc = AArch64::FMLAv4i32_indexed;
3980 Opc = AArch64::FMLAv4f32;
3988 RC = &AArch64::FPR128RegClass;
3990 Opc = AArch64::FMLAv4i32_indexed;
3994 Opc = AArch64::FMLAv4f32;
4007 Opc = AArch64::FNMSUBSrrr;
4008 RC = &AArch64::FPR32RegClass;
4010 Opc = AArch64::FNMSUBDrrr;
4011 RC = &AArch64::FPR64RegClass;
4023 Opc = AArch64::FMSUBSrrr;
4024 RC = &AArch64::FPR32RegClass;
4026 Opc = AArch64::FMSUBDrrr;
4027 RC = &AArch64::FPR64RegClass;
4033 Opc = AArch64::FMLSv1i32_indexed;
4034 RC = &AArch64::FPR32RegClass;
4040 Opc = AArch64::FMLSv1i64_indexed;
4041 RC = &AArch64::FPR64RegClass;
4048 RC = &AArch64::FPR64RegClass;
4050 Opc = AArch64::FMLSv2i32_indexed;
4054 Opc = AArch64::FMLSv2f32;
4062 RC = &AArch64::FPR128RegClass;
4064 Opc = AArch64::FMLSv2i64_indexed;
4068 Opc = AArch64::FMLSv2f64;
4076 RC = &AArch64::FPR128RegClass;
4078 Opc = AArch64::FMLSv4i32_indexed;
4082 Opc = AArch64::FMLSv4f32;
4122 bool IsNegativeBranch =
false;
4123 bool IsTestAndBranch =
false;
4124 unsigned TargetBBInMI = 0;
4134 case AArch64::CBNZW:
4135 case AArch64::CBNZX:
4137 IsNegativeBranch =
true;
4142 IsTestAndBranch =
true;
4144 case AArch64::TBNZW:
4145 case AArch64::TBNZX:
4147 IsNegativeBranch =
true;
4148 IsTestAndBranch =
true;
4169 while (DefMI->
isCopy()) {
4182 case AArch64::ANDWri:
4183 case AArch64::ANDXri: {
4184 if (IsTestAndBranch)
4191 bool Is32Bit = (DefMI->
getOpcode() == AArch64::ANDWri);
4198 unsigned NewReg = MO.
getReg();
4208 unsigned Opc = (Imm < 32)
4209 ? (IsNegativeBranch ? AArch64::TBNZW : AArch64::TBZW)
4210 : (IsNegativeBranch ? AArch64::TBNZX : AArch64::TBZX);
4223 if (!Is32Bit && Imm < 32)
4229 case AArch64::CSINCWr:
4230 case AArch64::CSINCXr: {
4249 if (IsNegativeBranch)
4258 std::pair<unsigned, unsigned>
4261 return std::make_pair(TF & Mask, TF & ~Mask);
4266 using namespace AArch64II;
4268 static const std::pair<unsigned, const char *>
TargetFlags[] = {
4271 {
MO_G3,
"aarch64-g3"},
4272 {
MO_G2,
"aarch64-g2"},
4273 {
MO_G1,
"aarch64-g1"},
4274 {
MO_G0,
"aarch64-g0"},
4281 using namespace AArch64II;
4283 static const std::pair<unsigned, const char *>
TargetFlags[] = {
4285 {
MO_NC,
"aarch64-nc"},
4286 {
MO_TLS,
"aarch64-tls"}};
static const MachineMemOperand::Flags MOSuppressPair
ArrayRef< std::pair< unsigned, const char * > > getSerializableBitmaskMachineOperandTargetFlags() const override
const MachineInstrBuilder & addMetadata(const MDNode *MD) const
void push_back(const T &Elt)
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address...
const AArch64RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
bool isGPRZero(const MachineInstr &MI) const
Does this instruction set its full destination register to zero?
static unsigned getArithShiftValue(unsigned Imm)
getArithShiftValue - get the arithmetic shift value.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
instr_iterator instr_end()
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
bool getMemOpBaseRegImmOfs(MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI) const override
void ChangeToRegister(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root...
MachineBasicBlock * getMBB() const
static bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns)
Floating-Point Support.
static unsigned sForm(MachineInstr &Instr)
Get opcode of S version of Instr.
unsigned createVirtualRegister(const TargetRegisterClass *RegClass)
createVirtualRegister - Create and return a new virtual register in the function with the specified r...
bool hasShiftedReg(const MachineInstr &MI) const
Returns true if there is a shiftable register and that the shift value is non-zero.
bool hasSubClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a sub-class of or equal to this class.
static CondCode getInvertedCondCode(CondCode Code)
bool hasZeroCycleRegMove() const
Describe properties that are true of each instruction in the target description file.
void setIsUndef(bool Val=true)
MachineInstrBuilder MachineInstrBuilder &DefMI const MCInstrDesc & Desc
static bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
void suppressLdStPair(MachineInstr &MI) const
Hint that pairing the given load or store is unprofitable.
static bool isSUBSRegImm(unsigned Opcode)
static bool processLogicalImmediate(uint64_t Imm, unsigned RegSize, uint64_t &Encoding)
processLogicalImmediate - Determine if an immediate value can be encoded as the immediate operand of ...
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr) const override
bool hasZeroCycleZeroing() const
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
Offset can apply, at least partly.
bool use_nodbg_empty(unsigned RegNo) const
use_nodbg_empty - Return true if there are no non-Debug instructions using the specified register...
bool hasCustomCheapAsMoveHandling() const
bool shouldScheduleAdjacent(const MachineInstr &First, const MachineInstr &Second) const override
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
const char * getSymbolName() const
bool shouldClusterMemOps(MachineInstr &FirstLdSt, MachineInstr &SecondLdSt, unsigned NumLoads) const override
Detect opportunities for ldp/stp formation.
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address...
void getNoopForMachoTarget(MCInst &NopInst) const override
iterator_range< mmo_iterator > memoperands()
bool hasExtendedReg(const MachineInstr &MI) const
Returns true if there is an extendable register and that the extending value is non-zero.
return AArch64::GPR64RegClass contains(Reg)
iterator_range< succ_iterator > successors()
static const MachineInstrBuilder & AddSubReg(const MachineInstrBuilder &MIB, unsigned Reg, unsigned SubIdx, unsigned State, const TargetRegisterInfo *TRI)
MO_G2 - A symbol operand with this flag (granule 2) represents the bits 32-47 of a 64-bit address...
static bool getMemDoShift(unsigned Imm)
getMemDoShift - Extract the "do shift" flag value for load/store instructions.
static MachineInstr * genMaddR(MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII, MachineInstr &Root, SmallVectorImpl< MachineInstr * > &InsInstrs, unsigned IdxMulOpd, unsigned MaddOpc, unsigned VR, const TargetRegisterClass *RC)
genMaddR - Generate madd instruction and combine mul and add using an extra virtual register Example ...
bool isPaired128Slow() const
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
unsigned getSize() const
Return the size of the register in bytes, which is also the size of a stack slot allocated to hold a ...
bool isUnscaledLdSt(unsigned Opc) const
Return true if this is an unscaled load/store.
A description of a memory reference used in the backend.
bool isAssociativeAndCommutative(const MachineInstr &Inst) const override
Return true when Inst is associative and commutative so that it can be reassociated.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const HexagonInstrInfo * TII
static AArch64CC::CondCode findCondCodeUsedByInstr(const MachineInstr &Instr)
Find a condition code used by the instruction.
static bool canBeExpandedToORR(const MachineInstr &MI, unsigned BitSize)
Returns true if a MOVi32imm or MOVi64imm can be expanded to an ORRxx.
ArrayRef< T > makeArrayRef(const T &OneElt)
Construct an ArrayRef from a single element.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
const TargetRegisterClass * getRegClass(unsigned Reg) const
Return the register class of the specified virtual register.
bool useMachineCombiner() const override
AArch64 supports MachineCombiner.
Reg
All possible values of the reg field in the ModR/M byte.
static int getRegClass(RegisterKind Is, unsigned RegWidth)
ARMProcFamilyEnum getProcFamily() const
Returns ARM processor family.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC)
static bool canCombineWithMUL(MachineBasicBlock &MBB, MachineOperand &MO, unsigned MulOpc, unsigned ZeroReg)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
INLINEASM - Represents an inline asm block.
static bool isCombineInstrCandidate64(unsigned Opc)
unsigned getNumOperands() const
Access to explicit operands of the instruction.
int isAArch64FrameOffsetLegal(const MachineInstr &MI, int &Offset, bool *OutUseUnscaledOp=nullptr, unsigned *OutUnscaledOp=nullptr, int *EmittableOffset=nullptr)
Check if the Offset is a valid frame offset for MI.
void RemoveOperand(unsigned i)
Erase an operand from an instruction, leaving it with one fewer operand than it started with...
AArch64InstrInfo(const AArch64Subtarget &STI)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
static AArch64_AM::ShiftExtendType getMemExtendType(unsigned Imm)
getExtendType - Extract the extend type for the offset operand of loads/stores.
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const TargetRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg...
static bool canCombineWithFMUL(MachineBasicBlock &MBB, MachineOperand &MO, unsigned MulOpc)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
iterator getLastNonDebugInstr()
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, unsigned, unsigned, int &, int &, int &) const override
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
static bool canCombine(MachineBasicBlock &MBB, MachineOperand &MO, unsigned CombineOpc, unsigned ZeroReg=0, bool CheckZeroReg=false)
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
size_t size() const
size - Get the array size.
static bool canPairLdStOpc(unsigned FirstOpc, unsigned SecondOpc)
const TargetRegisterClass * constrainRegClass(unsigned Reg, const TargetRegisterClass *RC, unsigned MinNumRegs=0)
constrainRegClass - Constrain the register class of the specified virtual register to be a common sub...
Instances of this class represent a single low-level machine instruction.
unsigned getKillRegState(bool B)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static GCRegistry::Add< CoreCLRGC > E("coreclr","CoreCLR-compatible GC")
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, int Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
Flag
These should be considered private to the implementation of the MCInstrDesc class.
const MachineBasicBlock * getParent() const
TargetInstrInfo - Interface to description of machine instruction set.
static bool isCondBranchOpcode(int Opc)
This class is intended to be used as a base class for asm properties and features specific to the tar...
unsigned getDefRegState(bool B)
static bool isCombineInstrCandidateFP(const MachineInstr &Inst)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
bool operator|=(SparseBitVector< ElementSize > &LHS, const SparseBitVector< ElementSize > *RHS)
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
initializer< Ty > init(const Ty &Val)
unsigned UnsafeFPMath
UnsafeFPMath - This flag is enabled when the -enable-unsafe-fp-math flag is specified on the command ...
FPOpFusion::FPOpFusionMode AllowFPOpFusion
AllowFPOpFusion - This flag is set by the -fuse-fp-ops=xxx option.
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address...
unsigned const MachineRegisterInfo * MRI
bool isFPRCopy(const MachineInstr &MI) const
Does this instruction rename an FPR without modifying bits?
static cl::opt< unsigned > BCCDisplacementBits("aarch64-bcc-offset-bits", cl::Hidden, cl::init(19), cl::desc("Restrict range of Bcc instructions (DEBUG)"))
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isAsCheapAsAMove(QueryType Type=AllInBundle) const
Returns true if this instruction has the same cost (or less) than a move instruction.
int findRegisterDefOperandIdx(unsigned Reg, bool isDead=false, bool Overlap=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a def of the specified register or -1 if it is not found...
MachineInstr * emitFrameIndexDebugValue(MachineFunction &MF, int FrameIx, uint64_t Offset, const MDNode *Var, const MDNode *Expr, const DebugLoc &DL) const
const MachineOperand & getOperand(unsigned i) const
bool rewriteAArch64FrameIndex(MachineInstr &MI, unsigned FrameRegIdx, unsigned FrameReg, int &Offset, const AArch64InstrInfo *TII)
rewriteAArch64FrameIndex - Rewrite MI to access 'Offset' bytes from the FP.
unsigned getSubReg(unsigned Reg, unsigned Idx) const
Returns the physical register number of sub-register "Index" for physical register RegNo...
static uint64_t decodeLogicalImmediate(uint64_t val, unsigned regSize)
decodeLogicalImmediate - Decode a logical immediate value in the form "N:immr:imms" (where the immr a...
static cl::opt< unsigned > CBZDisplacementBits("aarch64-cbz-offset-bits", cl::Hidden, cl::init(19), cl::desc("Restrict range of CB[N]Z instructions (DEBUG)"))
static bool canInstrSubstituteCmpInstr(MachineInstr *MI, MachineInstr *CmpInstr, const TargetRegisterInfo *TRI)
Check if CmpInstr can be substituted by MI.
constexpr bool isPowerOf2_64(uint64_t Value)
isPowerOf2_64 - This function returns true if the argument is a power of two 0 (64 bit edition...
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool any_of(R &&Range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
static bool isPairableLdStInst(const MachineInstr &MI)
static const unsigned End
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore...
MI-level patchpoint operands.
self_iterator getIterator()
bool areMemAccessesTriviallyDisjoint(MachineInstr &MIa, MachineInstr &MIb, AliasAnalysis *AA=nullptr) const override
bool isBranchOffsetInRange(unsigned BranchOpc, int64_t BrOffset) const override
bool isAsCheapAsAMove(const MachineInstr &MI) const override
bool expandPostRAPseudo(MachineInstr &MI) const override
bool empty() const
empty - Check if the array is empty.
bool isThroughputPattern(MachineCombinerPattern Pattern) const override
Return true when a code sequence can improve throughput.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in <Root...
unsigned getSubReg() const
MO_HI12 - This flag indicates that a symbol operand represents the bits 13-24 of a 64-bit address...
bool isIntN(unsigned N, int64_t x)
isIntN - Checks if an signed integer fits into the given (dynamic) bit width.
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
reverse_iterator getReverse() const
Get a reverse iterator to the same node.
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, unsigned DstReg, ArrayRef< MachineOperand > Cond, unsigned TrueReg, unsigned FalseReg) const override
This class contains a discriminated union of information about pointers in memory operands...
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
bool isCoalescableExtInstr(const MachineInstr &MI, unsigned &SrcReg, unsigned &DstReg, unsigned &SubIdx) const override
unsigned char ClassifyGlobalReference(const GlobalValue *GV, const TargetMachine &TM) const
ClassifyGlobalReference - Find the target operand flags that describe how a global value should be re...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode...
bool definesRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
EH_LABEL - Represents a label in mid basic block used to track locations needed for debug and excepti...
void setIsKill(bool Val=true)
unsigned getOpcode() const
Return the opcode number for this descriptor.
The memory access writes data.
static unsigned canFoldIntoCSel(const MachineRegisterInfo &MRI, unsigned VReg, unsigned *NewVReg=nullptr)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr reads the specified register.
int findRegisterUseOperandIdx(unsigned Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr) const
Returns the operand index that is a use of the specific register or -1 if it is not found...
bool memoperands_empty() const
Return true if we don't have any memory operands which described the the memory access done by this i...
static bool isIndirectBranchOpcode(int Opc)
void setOpcode(unsigned Op)
static bool isUncondBranchOpcode(int Opc)
void setDesc(const MCInstrDesc &tid)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one...
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
MachineOperand class - Representation of each machine instruction operand.
unsigned getObjectAlignment(int ObjectIdx) const
Return the alignment of the specified stack object.
static void parseCondBranch(MachineInstr *LastInst, MachineBasicBlock *&Target, SmallVectorImpl< MachineOperand > &Cond)
static bool isCombineInstrCandidate(unsigned Opc)
const MachineInstrBuilder & addFrameIndex(int Idx) const
bool optimizeCondBranch(MachineInstr &MI) const override
Replace csincr-branch sequence by simple conditional branch.
static GCRegistry::Add< ShadowStackGC > C("shadow-stack","Very portable GC for uncooperative code generators")
void addRegisterDefined(unsigned Reg, const TargetRegisterInfo *RegInfo=nullptr)
We have determined MI defines a register.
unsigned isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, unsigned base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SynchronizationScope SynchScope=CrossThread, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
Target - Wrapper for Target specific information.
const TargetRegisterClass * getRegClassConstraint(unsigned OpIdx, const TargetInstrInfo *TII, const TargetRegisterInfo *TRI) const
Compute the static register class constraint for operand OpIdx.
bool hasArithmeticBccFusion() const
static bool getMaddPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns)
Find instructions that can be turned into madd.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
const TargetRegisterClass * getMinimalPhysRegClass(unsigned Reg, MVT VT=MVT::Other) const
Returns the Register Class of a physical register of the given type, picking the most sub register cl...
MachineInstr * getUniqueVRegDef(unsigned Reg) const
getUniqueVRegDef - Return the unique machine instr that defines the specified virtual register or nul...
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
static cl::opt< unsigned > TBZDisplacementBits("aarch64-tbz-offset-bits", cl::Hidden, cl::init(14), cl::desc("Restrict range of TB[N]Z instructions (DEBUG)"))
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned char TargetFlags=0) const
Flags
Flags values. These may be or'd together.
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
The memory access reads data.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
Representation of each machine instruction.
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify=false) const override
static bool isPhysicalRegister(unsigned Reg)
Return true if the specified register number is in the physical register namespace.
bool hasOneDef(unsigned RegNo) const
Return true if there is exactly one operand defining the specified register.
bool optimizeCompareInstr(MachineInstr &CmpInstr, unsigned SrcReg, unsigned SrcReg2, int CmpMask, int CmpValue, const MachineRegisterInfo *MRI) const override
optimizeCompareInstr - Convert the instruction supplying the argument to the comparison into one that...
bool analyzeCompare(const MachineInstr &MI, unsigned &SrcReg, unsigned &SrcReg2, int &CmpMask, int &CmpValue) const override
analyzeCompare - For a comparison instruction, return the source registers in SrcReg and SrcReg2...
bool isScaledAddr(const MachineInstr &MI) const
Return true if this is load/store scales or extends its register offset.
static AArch64_AM::ShiftExtendType getShiftType(unsigned Imm)
getShiftType - Extract the shift type.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MI-level stackmap operands.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
static bool areCFlagsAliveInSuccessors(MachineBasicBlock *MBB)
Check if AArch64::NZCV should be alive in successors of MBB.
bool hasArithmeticCbzFusion() const
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
bool hasOneNonDBGUse(unsigned RegNo) const
hasOneNonDBGUse - Return true if there is exactly one non-Debug instruction using the specified regis...
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
static MachineOperand CreateImm(int64_t Val)
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page...
void setSubReg(unsigned subReg)
bool isLdStPairSuppressed(const MachineInstr &MI) const
Return true if pairing the given load or store is hinted to be unprofitable.
void clearKillFlags(unsigned Reg) const
clearKillFlags - Iterate over all the uses of the given register and clear the kill flag from the Mac...
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
static unsigned getBranchDisplacementBits(unsigned Opc)
static unsigned convertFlagSettingOpcode(const MachineInstr &MI)
Return the opcode that does not set flags when possible - otherwise return the original opcode...
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const override
bool getMemOpBaseRegImmOfsWidth(MachineInstr &LdSt, unsigned &BaseReg, int64_t &Offset, unsigned &Width, const TargetRegisterInfo *TRI) const
constexpr int64_t SignExtend64(uint64_t x)
Sign-extend the number in the bottom B bits of X to a 64-bit integer.
bool isGPRCopy(const MachineInstr &MI) const
Does this instruction rename a GPR without modifying bits?
MachineInstr * getVRegDef(unsigned Reg) const
getVRegDef - Return the machine instr that defines the specified virtual register or null if none is ...
static unsigned removeCopies(const MachineRegisterInfo &MRI, unsigned VReg)
static bool isCombineInstrCandidate32(unsigned Opc)
unsigned getReg() const
getReg - Returns the register number.
static bool scaleOffset(unsigned Opc, int64_t &Offset)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineBasicBlock * getBranchDestBlock(const MachineInstr &MI) const override
void copyPhysRegTuple(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc, unsigned Opcode, llvm::ArrayRef< unsigned > Indices) const
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual const TargetInstrInfo * getInstrInfo() const
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
#define LLVM_FALLTHROUGH
LLVM_FALLTHROUGH - Mark fallthrough cases in switch statements.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
static bool areCFlagsAccessedBetweenInstrs(MachineBasicBlock::iterator From, MachineBasicBlock::iterator To, const TargetRegisterInfo *TRI, const AccessKind AccessToCheck=AK_All)
True when condition flags are accessed (either by writing or reading) on the instruction trace starti...
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool def_empty(unsigned RegNo) const
def_empty - Return true if there are no instructions defining the specified register (it may be live-...
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
static MachineInstr * genFusedMultiply(MachineFunction &MF, MachineRegisterInfo &MRI, const TargetInstrInfo *TII, MachineInstr &Root, SmallVectorImpl< MachineInstr * > &InsInstrs, unsigned IdxMulOpd, unsigned MaddOpc, const TargetRegisterClass *RC, FMAInstKind kind=FMAInstKind::Default)
genFusedMultiply - Generate fused multiply instructions.
Primary interface to the complete machine description for the target machine.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
void addOperand(const MCOperand &Op)
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow...
bool isCandidateToMergeOrPair(MachineInstr &MI) const
Return true if this is a load/store that can be potentially paired/merged.
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
static bool isCombineInstrSettingFlag(unsigned Opc)
auto find_if(R &&Range, UnaryPredicate P) -> decltype(std::begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
static MCOperand createImm(int64_t Val)
unsigned Log2_64(uint64_t Value)
Log2_64 - This function returns the floor log base 2 of the specified value, -1 if the value is zero...
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
static bool forwardCopyWillClobberTuple(unsigned DestReg, unsigned SrcReg, unsigned NumRegs)
bool modifiesRegister(unsigned Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register...
static bool UpdateOperandRegClass(MachineInstr &Instr)
static bool isADDSRegImm(unsigned Opcode)
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool contains(unsigned Reg) const
Return true if the specified register is included in this register class.