43#define AARCH64_EXPAND_PSEUDO_NAME "AArch64 pseudo instruction expansion pass"
69 unsigned ContiguousOpc,
unsigned StridedOpc);
80 unsigned LdarOp,
unsigned StlrOp,
unsigned CmpOp,
81 unsigned ExtendImm,
unsigned ZeroReg,
105char AArch64ExpandPseudo::ID = 0;
117 assert(MO.isReg() && MO.getReg());
136 if (DstReg == AArch64::XZR || DstReg == AArch64::WZR) {
139 MI.eraseFromParent();
148 for (
auto I =
Insn.begin(), E =
Insn.end();
I != E; ++
I) {
149 bool LastItem = std::next(
I) == E;
154 case AArch64::ORRWri:
155 case AArch64::ORRXri:
158 .
add(
MI.getOperand(0))
159 .
addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
163 bool DstIsDead =
MI.getOperand(0).isDead();
173 case AArch64::ORRWrs:
174 case AArch64::ORRXrs: {
176 bool DstIsDead =
MI.getOperand(0).isDead();
186 case AArch64::ANDXri:
187 case AArch64::EORXri:
190 .
add(
MI.getOperand(0))
191 .
addReg(BitSize == 32 ? AArch64::WZR : AArch64::XZR)
195 bool DstIsDead =
MI.getOperand(0).isDead();
205 case AArch64::MOVNWi:
206 case AArch64::MOVNXi:
207 case AArch64::MOVZWi:
208 case AArch64::MOVZXi: {
209 bool DstIsDead =
MI.getOperand(0).isDead();
217 case AArch64::MOVKWi:
218 case AArch64::MOVKXi: {
220 bool DstIsDead =
MI.getOperand(0).isDead();
233 MI.eraseFromParent();
237bool AArch64ExpandPseudo::expandCMP_SWAP(
239 unsigned StlrOp,
unsigned CmpOp,
unsigned ExtendImm,
unsigned ZeroReg,
244 Register StatusReg =
MI.getOperand(1).getReg();
245 bool StatusDead =
MI.getOperand(1).isDead();
248 assert(!
MI.getOperand(2).isUndef() &&
"cannot handle undef");
250 Register DesiredReg =
MI.getOperand(3).getReg();
259 MF->
insert(++LoadCmpBB->getIterator(), StoreBB);
260 MF->
insert(++StoreBB->getIterator(), DoneBB);
268 BuildMI(LoadCmpBB, MIMD,
TII->get(AArch64::MOVZWi), StatusReg)
272 BuildMI(LoadCmpBB, MIMD,
TII->get(CmpOp), ZeroReg)
276 BuildMI(LoadCmpBB, MIMD,
TII->get(AArch64::Bcc))
280 LoadCmpBB->addSuccessor(DoneBB);
281 LoadCmpBB->addSuccessor(StoreBB);
286 BuildMI(StoreBB, MIMD,
TII->get(StlrOp), StatusReg)
289 BuildMI(StoreBB, MIMD,
TII->get(AArch64::CBNZW))
292 StoreBB->addSuccessor(LoadCmpBB);
293 StoreBB->addSuccessor(DoneBB);
295 DoneBB->splice(DoneBB->end(), &
MBB,
MI,
MBB.
end());
296 DoneBB->transferSuccessors(&
MBB);
301 MI.eraseFromParent();
309 StoreBB->clearLiveIns();
311 LoadCmpBB->clearLiveIns();
317bool AArch64ExpandPseudo::expandCMP_SWAP_128(
324 Register StatusReg =
MI.getOperand(2).getReg();
325 bool StatusDead =
MI.getOperand(2).isDead();
328 assert(!
MI.getOperand(3).isUndef() &&
"cannot handle undef");
330 Register DesiredLoReg =
MI.getOperand(4).getReg();
331 Register DesiredHiReg =
MI.getOperand(5).getReg();
332 Register NewLoReg =
MI.getOperand(6).getReg();
333 Register NewHiReg =
MI.getOperand(7).getReg();
335 unsigned LdxpOp, StxpOp;
337 switch (
MI.getOpcode()) {
338 case AArch64::CMP_SWAP_128_MONOTONIC:
339 LdxpOp = AArch64::LDXPX;
340 StxpOp = AArch64::STXPX;
342 case AArch64::CMP_SWAP_128_RELEASE:
343 LdxpOp = AArch64::LDXPX;
344 StxpOp = AArch64::STLXPX;
346 case AArch64::CMP_SWAP_128_ACQUIRE:
347 LdxpOp = AArch64::LDAXPX;
348 StxpOp = AArch64::STXPX;
350 case AArch64::CMP_SWAP_128:
351 LdxpOp = AArch64::LDAXPX;
352 StxpOp = AArch64::STLXPX;
365 MF->
insert(++LoadCmpBB->getIterator(), StoreBB);
366 MF->
insert(++StoreBB->getIterator(), FailBB);
367 MF->
insert(++FailBB->getIterator(), DoneBB);
378 BuildMI(LoadCmpBB, MIMD,
TII->get(AArch64::SUBSXrs), AArch64::XZR)
382 BuildMI(LoadCmpBB, MIMD,
TII->get(AArch64::CSINCWr), StatusReg)
386 BuildMI(LoadCmpBB, MIMD,
TII->get(AArch64::SUBSXrs), AArch64::XZR)
390 BuildMI(LoadCmpBB, MIMD,
TII->get(AArch64::CSINCWr), StatusReg)
394 BuildMI(LoadCmpBB, MIMD,
TII->get(AArch64::CBNZW))
397 LoadCmpBB->addSuccessor(FailBB);
398 LoadCmpBB->addSuccessor(StoreBB);
403 BuildMI(StoreBB, MIMD,
TII->get(StxpOp), StatusReg)
407 BuildMI(StoreBB, MIMD,
TII->get(AArch64::CBNZW))
411 StoreBB->addSuccessor(LoadCmpBB);
412 StoreBB->addSuccessor(DoneBB);
417 BuildMI(FailBB, MIMD,
TII->get(StxpOp), StatusReg)
421 BuildMI(FailBB, MIMD,
TII->get(AArch64::CBNZW))
424 FailBB->addSuccessor(LoadCmpBB);
425 FailBB->addSuccessor(DoneBB);
427 DoneBB->splice(DoneBB->end(), &
MBB,
MI,
MBB.
end());
428 DoneBB->transferSuccessors(&
MBB);
433 MI.eraseFromParent();
443 FailBB->clearLiveIns();
445 StoreBB->clearLiveIns();
447 LoadCmpBB->clearLiveIns();
491bool AArch64ExpandPseudo::expand_DestructiveOp(
500 bool DstIsDead =
MI.getOperand(0).isDead();
502 unsigned PredIdx, DOPIdx, SrcIdx, Src2Idx;
507 if (DstReg ==
MI.getOperand(3).getReg()) {
509 std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 3, 2);
516 std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(1, 2, 3);
519 std::tie(PredIdx, DOPIdx, SrcIdx) = std::make_tuple(2, 3, 3);
522 std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 2, 3, 4);
523 if (DstReg ==
MI.getOperand(3).getReg()) {
525 std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 3, 4, 2);
527 }
else if (DstReg ==
MI.getOperand(4).getReg()) {
529 std::tie(PredIdx, DOPIdx, SrcIdx, Src2Idx) = std::make_tuple(1, 4, 3, 2);
540 bool DOPRegIsUnique =
false;
543 DOPRegIsUnique = DstReg !=
MI.getOperand(SrcIdx).getReg();
548 DstReg !=
MI.getOperand(DOPIdx).getReg() ||
549 MI.getOperand(DOPIdx).getReg() !=
MI.getOperand(SrcIdx).getReg();
553 DOPRegIsUnique =
true;
557 DstReg !=
MI.getOperand(DOPIdx).getReg() ||
558 (
MI.getOperand(DOPIdx).getReg() !=
MI.getOperand(SrcIdx).getReg() &&
559 MI.getOperand(DOPIdx).getReg() !=
MI.getOperand(Src2Idx).getReg());
575 uint64_t ElementSize =
TII->getElementSizeForOpcode(Opcode);
576 unsigned MovPrfx, LSLZero, MovPrfxZero;
577 switch (ElementSize) {
580 MovPrfx = AArch64::MOVPRFX_ZZ;
581 LSLZero = AArch64::LSL_ZPmI_B;
582 MovPrfxZero = AArch64::MOVPRFX_ZPzZ_B;
585 MovPrfx = AArch64::MOVPRFX_ZZ;
586 LSLZero = AArch64::LSL_ZPmI_H;
587 MovPrfxZero = AArch64::MOVPRFX_ZPzZ_H;
590 MovPrfx = AArch64::MOVPRFX_ZZ;
591 LSLZero = AArch64::LSL_ZPmI_S;
592 MovPrfxZero = AArch64::MOVPRFX_ZPzZ_S;
595 MovPrfx = AArch64::MOVPRFX_ZZ;
596 LSLZero = AArch64::LSL_ZPmI_D;
597 MovPrfxZero = AArch64::MOVPRFX_ZPzZ_D;
613 "The destructive operand should be unique");
615 "This instruction is unpredicated");
620 .
addReg(
MI.getOperand(PredIdx).getReg())
621 .
addReg(
MI.getOperand(DOPIdx).getReg());
635 .
add(
MI.getOperand(PredIdx))
639 }
else if (DstReg !=
MI.getOperand(DOPIdx).getReg()) {
640 assert(DOPRegIsUnique &&
"The destructive operand should be unique");
643 .
addReg(
MI.getOperand(DOPIdx).getReg());
656 .
add(
MI.getOperand(PredIdx))
657 .
add(
MI.getOperand(SrcIdx));
663 DOP.
add(
MI.getOperand(PredIdx))
665 .
add(
MI.getOperand(SrcIdx));
668 DOP.
add(
MI.getOperand(PredIdx))
670 .
add(
MI.getOperand(SrcIdx))
671 .
add(
MI.getOperand(Src2Idx));
677 transferImpOps(
MI, PRFX, DOP);
679 transferImpOps(
MI, DOP, DOP);
681 MI.eraseFromParent();
685bool AArch64ExpandPseudo::expandSetTagLoop(
691 Register AddressReg =
MI.getOperand(1).getReg();
695 bool ZeroData =
MI.getOpcode() == AArch64::STZGloop_wback;
696 const unsigned OpCode1 =
697 ZeroData ? AArch64::STZGPostIndex : AArch64::STGPostIndex;
698 const unsigned OpCode2 =
699 ZeroData ? AArch64::STZ2GPostIndex : AArch64::ST2GPostIndex;
701 unsigned Size =
MI.getOperand(2).getImm();
703 if (
Size % (16 * 2) != 0) {
719 MF->
insert(++LoopBB->getIterator(), DoneBB);
738 LoopBB->addSuccessor(LoopBB);
739 LoopBB->addSuccessor(DoneBB);
741 DoneBB->splice(DoneBB->end(), &
MBB,
MI,
MBB.
end());
742 DoneBB->transferSuccessors(&
MBB);
747 MI.eraseFromParent();
754 LoopBB->clearLiveIns();
756 DoneBB->clearLiveIns();
764 unsigned Opc,
unsigned N) {
765 assert((Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI ||
766 Opc == AArch64::LDR_PXI || Opc == AArch64::STR_PXI) &&
767 "Unexpected opcode");
768 unsigned RState = (Opc == AArch64::LDR_ZXI || Opc == AArch64::LDR_PXI)
771 unsigned sub0 = (Opc == AArch64::LDR_ZXI || Opc == AArch64::STR_ZXI)
778 int ImmOffset =
MI.getOperand(2).getImm() +
Offset;
779 bool Kill = (
Offset + 1 ==
N) ?
MI.getOperand(1).isKill() :
false;
780 assert(ImmOffset >= -256 && ImmOffset < 256 &&
781 "Immediate spill offset out of range");
788 MI.eraseFromParent();
799 unsigned RegMaskStartIdx) {
808 while (!
MBBI->getOperand(RegMaskStartIdx).isRegMask()) {
810 assert(MOP.
isReg() &&
"can only add register operands");
812 MOP.
getReg(),
false,
true,
false,
818 Call->addOperand(MO);
829 unsigned RegMaskStartIdx) {
830 unsigned Opc = CallTarget.
isGlobal() ? AArch64::BL : AArch64::BLR;
833 "invalid operand for regular call");
837bool AArch64ExpandPseudo::expandCALL_RVMARKER(
846 assert(RVTarget.
isGlobal() &&
"invalid operand for attached call");
850 if (
MI.getOpcode() == AArch64::BLRA_RVMARKER) {
859 "Invalid auth call key");
866 assert(
MI.getOpcode() == AArch64::BLR_RVMARKER &&
"unknown rvmarker MI");
882 if (
MI.shouldUpdateCallSiteInfo())
885 MI.eraseFromParent();
887 std::next(RVCall->getIterator()));
911 if (
MI.shouldUpdateCallSiteInfo())
914 MI.eraseFromParent();
919bool AArch64ExpandPseudo::expandStoreSwiftAsyncContext(
927 if (STI.getTargetTriple().getArchName() !=
"arm64e") {
944 unsigned Opc =
Offset >= 0 ? AArch64::ADDXri : AArch64::SUBXri;
981 MI.getParent()->successors().begin() !=
982 MI.getParent()->successors().end()) &&
983 "Unexpected unreachable in block that restores ZA");
988 .
add(
MI.getOperand(0));
1010 for (
unsigned I = 2;
I <
MI.getNumOperands(); ++
I)
1011 MIB.
add(
MI.getOperand(
I));
1014 MI.eraseFromParent();
1028 MI.getParent()->successors().begin() ==
1029 MI.getParent()->successors().end()) {
1030 MI.eraseFromParent();
1073 switch (
MI.getOperand(2).getImm()) {
1077 Opc = AArch64::TBNZW;
1080 Opc = AArch64::TBZW;
1083 auto PStateSM =
MI.getOperand(3).getReg();
1085 unsigned SMReg32 =
TRI->getSubReg(PStateSM, AArch64::sub_32);
1107 TII->get(AArch64::MSRpstatesvcrImm1));
1111 MIB.
add(
MI.getOperand(0));
1112 MIB.
add(
MI.getOperand(1));
1113 for (
unsigned i = 4; i <
MI.getNumOperands(); ++i)
1114 MIB.
add(
MI.getOperand(i));
1118 MI.eraseFromParent();
1122bool AArch64ExpandPseudo::expandMultiVecPseudo(
1125 unsigned ContiguousOp,
unsigned StridedOpc) {
1140 .
add(
MI.getOperand(0))
1141 .
add(
MI.getOperand(1))
1142 .
add(
MI.getOperand(2))
1143 .
add(
MI.getOperand(3));
1144 transferImpOps(
MI, MIB, MIB);
1145 MI.eraseFromParent();
1149bool AArch64ExpandPseudo::expandFormTuplePseudo(
1154 Register ReturnTuple =
MI.getOperand(0).getReg();
1158 for (
unsigned I = 0;
I <
Size; ++
I) {
1159 Register FormTupleOpReg =
MI.getOperand(
I + 1).getReg();
1161 TRI->getSubReg(ReturnTuple, AArch64::zsub0 +
I);
1164 if (FormTupleOpReg != ReturnTupleSubReg)
1171 MI.eraseFromParent();
1181 unsigned Opcode =
MI.getOpcode();
1185 if (OrigInstr != -1) {
1186 auto &Orig =
TII->get(OrigInstr);
1189 return expand_DestructiveOp(
MI,
MBB,
MBBI);
1197 case AArch64::BSPv8i8:
1198 case AArch64::BSPv16i8: {
1200 if (DstReg ==
MI.getOperand(3).getReg()) {
1203 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BITv8i8
1204 : AArch64::BITv16i8))
1205 .
add(
MI.getOperand(0))
1206 .
add(
MI.getOperand(3))
1207 .
add(
MI.getOperand(2))
1208 .
add(
MI.getOperand(1));
1209 }
else if (DstReg ==
MI.getOperand(2).getReg()) {
1212 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BIFv8i8
1213 : AArch64::BIFv16i8))
1214 .
add(
MI.getOperand(0))
1215 .
add(
MI.getOperand(2))
1216 .
add(
MI.getOperand(3))
1217 .
add(
MI.getOperand(1));
1220 if (DstReg ==
MI.getOperand(1).getReg()) {
1222 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
1223 : AArch64::BSLv16i8))
1224 .
add(
MI.getOperand(0))
1225 .
add(
MI.getOperand(1))
1226 .
add(
MI.getOperand(2))
1227 .
add(
MI.getOperand(3));
1230 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::ORRv8i8
1231 : AArch64::ORRv16i8))
1235 .
add(
MI.getOperand(1))
1236 .
add(
MI.getOperand(1));
1238 TII->get(Opcode == AArch64::BSPv8i8 ? AArch64::BSLv8i8
1239 : AArch64::BSLv16i8))
1240 .
add(
MI.getOperand(0))
1244 .
add(
MI.getOperand(2))
1245 .
add(
MI.getOperand(3));
1248 MI.eraseFromParent();
1252 case AArch64::ADDWrr:
1253 case AArch64::SUBWrr:
1254 case AArch64::ADDXrr:
1255 case AArch64::SUBXrr:
1256 case AArch64::ADDSWrr:
1257 case AArch64::SUBSWrr:
1258 case AArch64::ADDSXrr:
1259 case AArch64::SUBSXrr:
1260 case AArch64::ANDWrr:
1261 case AArch64::ANDXrr:
1262 case AArch64::BICWrr:
1263 case AArch64::BICXrr:
1264 case AArch64::ANDSWrr:
1265 case AArch64::ANDSXrr:
1266 case AArch64::BICSWrr:
1267 case AArch64::BICSXrr:
1268 case AArch64::EONWrr:
1269 case AArch64::EONXrr:
1270 case AArch64::EORWrr:
1271 case AArch64::EORXrr:
1272 case AArch64::ORNWrr:
1273 case AArch64::ORNXrr:
1274 case AArch64::ORRWrr:
1275 case AArch64::ORRXrr: {
1277 switch (
MI.getOpcode()) {
1280 case AArch64::ADDWrr: Opcode = AArch64::ADDWrs;
break;
1281 case AArch64::SUBWrr: Opcode = AArch64::SUBWrs;
break;
1282 case AArch64::ADDXrr: Opcode = AArch64::ADDXrs;
break;
1283 case AArch64::SUBXrr: Opcode = AArch64::SUBXrs;
break;
1284 case AArch64::ADDSWrr: Opcode = AArch64::ADDSWrs;
break;
1285 case AArch64::SUBSWrr: Opcode = AArch64::SUBSWrs;
break;
1286 case AArch64::ADDSXrr: Opcode = AArch64::ADDSXrs;
break;
1287 case AArch64::SUBSXrr: Opcode = AArch64::SUBSXrs;
break;
1288 case AArch64::ANDWrr: Opcode = AArch64::ANDWrs;
break;
1289 case AArch64::ANDXrr: Opcode = AArch64::ANDXrs;
break;
1290 case AArch64::BICWrr: Opcode = AArch64::BICWrs;
break;
1291 case AArch64::BICXrr: Opcode = AArch64::BICXrs;
break;
1292 case AArch64::ANDSWrr: Opcode = AArch64::ANDSWrs;
break;
1293 case AArch64::ANDSXrr: Opcode = AArch64::ANDSXrs;
break;
1294 case AArch64::BICSWrr: Opcode = AArch64::BICSWrs;
break;
1295 case AArch64::BICSXrr: Opcode = AArch64::BICSXrs;
break;
1296 case AArch64::EONWrr: Opcode = AArch64::EONWrs;
break;
1297 case AArch64::EONXrr: Opcode = AArch64::EONXrs;
break;
1298 case AArch64::EORWrr: Opcode = AArch64::EORWrs;
break;
1299 case AArch64::EORXrr: Opcode = AArch64::EORXrs;
break;
1300 case AArch64::ORNWrr: Opcode = AArch64::ORNWrs;
break;
1301 case AArch64::ORNXrr: Opcode = AArch64::ORNXrs;
break;
1302 case AArch64::ORRWrr: Opcode = AArch64::ORRWrs;
break;
1303 case AArch64::ORRXrr: Opcode = AArch64::ORRXrs;
break;
1308 TII->get(Opcode),
MI.getDebugLoc(),
true);
1311 MIB1->setPCSections(MF,
MI.getPCSections());
1313 .add(
MI.getOperand(1))
1314 .add(
MI.getOperand(2))
1316 transferImpOps(
MI, MIB1, MIB1);
1317 if (
auto DebugNumber =
MI.peekDebugInstrNum())
1319 MI.eraseFromParent();
1323 case AArch64::LOADgot: {
1332 TII->get(AArch64::LDRXl), DstReg);
1340 "Only expect globals, externalsymbols, or constant pools");
1353 unsigned Reg32 =
TRI->getSubReg(DstReg, AArch64::sub_32);
1354 unsigned DstFlags =
MI.getOperand(0).getTargetFlags();
1362 .
add(
MI.getOperand(0))
1377 "Only expect globals, externalsymbols, or constant pools");
1385 transferImpOps(
MI, MIB1, MIB2);
1387 MI.eraseFromParent();
1390 case AArch64::MOVaddrBA: {
1397 assert(
MI.getOperand(1).getOffset() == 0 &&
"unexpected offset");
1407 TII->get(AArch64::LDRXui), DstReg)
1411 transferImpOps(
MI, MIB1, MIB2);
1412 MI.eraseFromParent();
1417 case AArch64::MOVaddr:
1418 case AArch64::MOVaddrJT:
1419 case AArch64::MOVaddrCP:
1420 case AArch64::MOVaddrTLS:
1421 case AArch64::MOVaddrEXT: {
1424 assert(DstReg != AArch64::XZR);
1427 .
add(
MI.getOperand(1));
1437 auto Tag =
MI.getOperand(1);
1439 Tag.setOffset(0x100000000);
1448 .
add(
MI.getOperand(0))
1450 .
add(
MI.getOperand(2))
1453 transferImpOps(
MI, MIB1, MIB2);
1454 MI.eraseFromParent();
1457 case AArch64::ADDlowTLS:
1460 .
add(
MI.getOperand(0))
1461 .
add(
MI.getOperand(1))
1462 .
add(
MI.getOperand(2))
1464 MI.eraseFromParent();
1467 case AArch64::MOVbaseTLS: {
1469 auto SysReg = AArch64SysReg::TPIDR_EL0;
1472 SysReg = AArch64SysReg::TPIDR_EL3;
1474 SysReg = AArch64SysReg::TPIDR_EL2;
1476 SysReg = AArch64SysReg::TPIDR_EL1;
1478 SysReg = AArch64SysReg::TPIDRRO_EL0;
1481 MI.eraseFromParent();
1485 case AArch64::MOVi32imm:
1487 case AArch64::MOVi64imm:
1489 case AArch64::RET_ReallyLR: {
1498 transferImpOps(
MI, MIB, MIB);
1499 MI.eraseFromParent();
1502 case AArch64::CMP_SWAP_8:
1503 return expandCMP_SWAP(
MBB,
MBBI, AArch64::LDAXRB, AArch64::STLXRB,
1506 AArch64::WZR, NextMBBI);
1507 case AArch64::CMP_SWAP_16:
1508 return expandCMP_SWAP(
MBB,
MBBI, AArch64::LDAXRH, AArch64::STLXRH,
1511 AArch64::WZR, NextMBBI);
1512 case AArch64::CMP_SWAP_32:
1513 return expandCMP_SWAP(
MBB,
MBBI, AArch64::LDAXRW, AArch64::STLXRW,
1516 AArch64::WZR, NextMBBI);
1517 case AArch64::CMP_SWAP_64:
1518 return expandCMP_SWAP(
MBB,
MBBI,
1519 AArch64::LDAXRX, AArch64::STLXRX, AArch64::SUBSXrs,
1521 AArch64::XZR, NextMBBI);
1522 case AArch64::CMP_SWAP_128:
1523 case AArch64::CMP_SWAP_128_RELEASE:
1524 case AArch64::CMP_SWAP_128_ACQUIRE:
1525 case AArch64::CMP_SWAP_128_MONOTONIC:
1526 return expandCMP_SWAP_128(
MBB,
MBBI, NextMBBI);
1528 case AArch64::AESMCrrTied:
1529 case AArch64::AESIMCrrTied: {
1532 TII->get(Opcode == AArch64::AESMCrrTied ? AArch64::AESMCrr :
1534 .
add(
MI.getOperand(0))
1535 .
add(
MI.getOperand(1));
1536 transferImpOps(
MI, MIB, MIB);
1537 MI.eraseFromParent();
1540 case AArch64::IRGstack: {
1551 StackOffset FrameRegOffset = TFI->resolveFrameOffsetReference(
1552 MF, BaseOffset,
false ,
false , FrameReg,
1556 if (FrameRegOffset) {
1558 SrcReg =
MI.getOperand(0).getReg();
1560 FrameRegOffset,
TII);
1563 .
add(
MI.getOperand(0))
1565 .
add(
MI.getOperand(2));
1566 MI.eraseFromParent();
1569 case AArch64::TAGPstack: {
1570 int64_t
Offset =
MI.getOperand(2).getImm();
1572 TII->get(
Offset >= 0 ? AArch64::ADDG : AArch64::SUBG))
1573 .
add(
MI.getOperand(0))
1574 .
add(
MI.getOperand(1))
1576 .
add(
MI.getOperand(4));
1577 MI.eraseFromParent();
1580 case AArch64::STGloop_wback:
1581 case AArch64::STZGloop_wback:
1582 return expandSetTagLoop(
MBB,
MBBI, NextMBBI);
1583 case AArch64::STGloop:
1584 case AArch64::STZGloop:
1586 "Non-writeback variants of STGloop / STZGloop should not "
1587 "survive past PrologEpilogInserter.");
1588 case AArch64::STR_ZZZZXI:
1589 return expandSVESpillFill(
MBB,
MBBI, AArch64::STR_ZXI, 4);
1590 case AArch64::STR_ZZZXI:
1591 return expandSVESpillFill(
MBB,
MBBI, AArch64::STR_ZXI, 3);
1592 case AArch64::STR_ZZXI:
1593 return expandSVESpillFill(
MBB,
MBBI, AArch64::STR_ZXI, 2);
1594 case AArch64::STR_PPXI:
1595 return expandSVESpillFill(
MBB,
MBBI, AArch64::STR_PXI, 2);
1596 case AArch64::LDR_ZZZZXI:
1597 return expandSVESpillFill(
MBB,
MBBI, AArch64::LDR_ZXI, 4);
1598 case AArch64::LDR_ZZZXI:
1599 return expandSVESpillFill(
MBB,
MBBI, AArch64::LDR_ZXI, 3);
1600 case AArch64::LDR_ZZXI:
1601 return expandSVESpillFill(
MBB,
MBBI, AArch64::LDR_ZXI, 2);
1602 case AArch64::LDR_PPXI:
1603 return expandSVESpillFill(
MBB,
MBBI, AArch64::LDR_PXI, 2);
1604 case AArch64::BLR_RVMARKER:
1605 case AArch64::BLRA_RVMARKER:
1606 return expandCALL_RVMARKER(
MBB,
MBBI);
1607 case AArch64::BLR_BTI:
1608 return expandCALL_BTI(
MBB,
MBBI);
1609 case AArch64::StoreSwiftAsyncContext:
1610 return expandStoreSwiftAsyncContext(
MBB,
MBBI);
1611 case AArch64::RestoreZAPseudo: {
1612 auto *NewMBB = expandRestoreZA(
MBB,
MBBI);
1617 case AArch64::MSRpstatePseudo: {
1618 auto *NewMBB = expandCondSMToggle(
MBB,
MBBI);
1623 case AArch64::COALESCER_BARRIER_FPR16:
1624 case AArch64::COALESCER_BARRIER_FPR32:
1625 case AArch64::COALESCER_BARRIER_FPR64:
1626 case AArch64::COALESCER_BARRIER_FPR128:
1627 MI.eraseFromParent();
1629 case AArch64::LD1B_2Z_IMM_PSEUDO:
1630 return expandMultiVecPseudo(
1631 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1632 AArch64::LD1B_2Z_IMM, AArch64::LD1B_2Z_STRIDED_IMM);
1633 case AArch64::LD1H_2Z_IMM_PSEUDO:
1634 return expandMultiVecPseudo(
1635 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1636 AArch64::LD1H_2Z_IMM, AArch64::LD1H_2Z_STRIDED_IMM);
1637 case AArch64::LD1W_2Z_IMM_PSEUDO:
1638 return expandMultiVecPseudo(
1639 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1640 AArch64::LD1W_2Z_IMM, AArch64::LD1W_2Z_STRIDED_IMM);
1641 case AArch64::LD1D_2Z_IMM_PSEUDO:
1642 return expandMultiVecPseudo(
1643 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1644 AArch64::LD1D_2Z_IMM, AArch64::LD1D_2Z_STRIDED_IMM);
1645 case AArch64::LDNT1B_2Z_IMM_PSEUDO:
1646 return expandMultiVecPseudo(
1647 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1648 AArch64::LDNT1B_2Z_IMM, AArch64::LDNT1B_2Z_STRIDED_IMM);
1649 case AArch64::LDNT1H_2Z_IMM_PSEUDO:
1650 return expandMultiVecPseudo(
1651 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1652 AArch64::LDNT1H_2Z_IMM, AArch64::LDNT1H_2Z_STRIDED_IMM);
1653 case AArch64::LDNT1W_2Z_IMM_PSEUDO:
1654 return expandMultiVecPseudo(
1655 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1656 AArch64::LDNT1W_2Z_IMM, AArch64::LDNT1W_2Z_STRIDED_IMM);
1657 case AArch64::LDNT1D_2Z_IMM_PSEUDO:
1658 return expandMultiVecPseudo(
1659 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1660 AArch64::LDNT1D_2Z_IMM, AArch64::LDNT1D_2Z_STRIDED_IMM);
1661 case AArch64::LD1B_2Z_PSEUDO:
1662 return expandMultiVecPseudo(
MBB,
MBBI, AArch64::ZPR2RegClass,
1663 AArch64::ZPR2StridedRegClass, AArch64::LD1B_2Z,
1664 AArch64::LD1B_2Z_STRIDED);
1665 case AArch64::LD1H_2Z_PSEUDO:
1666 return expandMultiVecPseudo(
MBB,
MBBI, AArch64::ZPR2RegClass,
1667 AArch64::ZPR2StridedRegClass, AArch64::LD1H_2Z,
1668 AArch64::LD1H_2Z_STRIDED);
1669 case AArch64::LD1W_2Z_PSEUDO:
1670 return expandMultiVecPseudo(
MBB,
MBBI, AArch64::ZPR2RegClass,
1671 AArch64::ZPR2StridedRegClass, AArch64::LD1W_2Z,
1672 AArch64::LD1W_2Z_STRIDED);
1673 case AArch64::LD1D_2Z_PSEUDO:
1674 return expandMultiVecPseudo(
MBB,
MBBI, AArch64::ZPR2RegClass,
1675 AArch64::ZPR2StridedRegClass, AArch64::LD1D_2Z,
1676 AArch64::LD1D_2Z_STRIDED);
1677 case AArch64::LDNT1B_2Z_PSEUDO:
1678 return expandMultiVecPseudo(
1679 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1680 AArch64::LDNT1B_2Z, AArch64::LDNT1B_2Z_STRIDED);
1681 case AArch64::LDNT1H_2Z_PSEUDO:
1682 return expandMultiVecPseudo(
1683 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1684 AArch64::LDNT1H_2Z, AArch64::LDNT1H_2Z_STRIDED);
1685 case AArch64::LDNT1W_2Z_PSEUDO:
1686 return expandMultiVecPseudo(
1687 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1688 AArch64::LDNT1W_2Z, AArch64::LDNT1W_2Z_STRIDED);
1689 case AArch64::LDNT1D_2Z_PSEUDO:
1690 return expandMultiVecPseudo(
1691 MBB,
MBBI, AArch64::ZPR2RegClass, AArch64::ZPR2StridedRegClass,
1692 AArch64::LDNT1D_2Z, AArch64::LDNT1D_2Z_STRIDED);
1693 case AArch64::LD1B_4Z_IMM_PSEUDO:
1694 return expandMultiVecPseudo(
1695 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1696 AArch64::LD1B_4Z_IMM, AArch64::LD1B_4Z_STRIDED_IMM);
1697 case AArch64::LD1H_4Z_IMM_PSEUDO:
1698 return expandMultiVecPseudo(
1699 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1700 AArch64::LD1H_4Z_IMM, AArch64::LD1H_4Z_STRIDED_IMM);
1701 case AArch64::LD1W_4Z_IMM_PSEUDO:
1702 return expandMultiVecPseudo(
1703 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1704 AArch64::LD1W_4Z_IMM, AArch64::LD1W_4Z_STRIDED_IMM);
1705 case AArch64::LD1D_4Z_IMM_PSEUDO:
1706 return expandMultiVecPseudo(
1707 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1708 AArch64::LD1D_4Z_IMM, AArch64::LD1D_4Z_STRIDED_IMM);
1709 case AArch64::LDNT1B_4Z_IMM_PSEUDO:
1710 return expandMultiVecPseudo(
1711 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1712 AArch64::LDNT1B_4Z_IMM, AArch64::LDNT1B_4Z_STRIDED_IMM);
1713 case AArch64::LDNT1H_4Z_IMM_PSEUDO:
1714 return expandMultiVecPseudo(
1715 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1716 AArch64::LDNT1H_4Z_IMM, AArch64::LDNT1H_4Z_STRIDED_IMM);
1717 case AArch64::LDNT1W_4Z_IMM_PSEUDO:
1718 return expandMultiVecPseudo(
1719 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1720 AArch64::LDNT1W_4Z_IMM, AArch64::LDNT1W_4Z_STRIDED_IMM);
1721 case AArch64::LDNT1D_4Z_IMM_PSEUDO:
1722 return expandMultiVecPseudo(
1723 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1724 AArch64::LDNT1D_4Z_IMM, AArch64::LDNT1D_4Z_STRIDED_IMM);
1725 case AArch64::LD1B_4Z_PSEUDO:
1726 return expandMultiVecPseudo(
MBB,
MBBI, AArch64::ZPR4RegClass,
1727 AArch64::ZPR4StridedRegClass, AArch64::LD1B_4Z,
1728 AArch64::LD1B_4Z_STRIDED);
1729 case AArch64::LD1H_4Z_PSEUDO:
1730 return expandMultiVecPseudo(
MBB,
MBBI, AArch64::ZPR4RegClass,
1731 AArch64::ZPR4StridedRegClass, AArch64::LD1H_4Z,
1732 AArch64::LD1H_4Z_STRIDED);
1733 case AArch64::LD1W_4Z_PSEUDO:
1734 return expandMultiVecPseudo(
MBB,
MBBI, AArch64::ZPR4RegClass,
1735 AArch64::ZPR4StridedRegClass, AArch64::LD1W_4Z,
1736 AArch64::LD1W_4Z_STRIDED);
1737 case AArch64::LD1D_4Z_PSEUDO:
1738 return expandMultiVecPseudo(
MBB,
MBBI, AArch64::ZPR4RegClass,
1739 AArch64::ZPR4StridedRegClass, AArch64::LD1D_4Z,
1740 AArch64::LD1D_4Z_STRIDED);
1741 case AArch64::LDNT1B_4Z_PSEUDO:
1742 return expandMultiVecPseudo(
1743 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1744 AArch64::LDNT1B_4Z, AArch64::LDNT1B_4Z_STRIDED);
1745 case AArch64::LDNT1H_4Z_PSEUDO:
1746 return expandMultiVecPseudo(
1747 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1748 AArch64::LDNT1H_4Z, AArch64::LDNT1H_4Z_STRIDED);
1749 case AArch64::LDNT1W_4Z_PSEUDO:
1750 return expandMultiVecPseudo(
1751 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1752 AArch64::LDNT1W_4Z, AArch64::LDNT1W_4Z_STRIDED);
1753 case AArch64::LDNT1D_4Z_PSEUDO:
1754 return expandMultiVecPseudo(
1755 MBB,
MBBI, AArch64::ZPR4RegClass, AArch64::ZPR4StridedRegClass,
1756 AArch64::LDNT1D_4Z, AArch64::LDNT1D_4Z_STRIDED);
1757 case AArch64::FORM_TRANSPOSED_REG_TUPLE_X2_PSEUDO:
1758 return expandFormTuplePseudo(
MBB,
MBBI, NextMBBI, 2);
1759 case AArch64::FORM_TRANSPOSED_REG_TUPLE_X4_PSEUDO:
1760 return expandFormTuplePseudo(
MBB,
MBBI, NextMBBI, 4);
1784 for (
auto &
MBB : MF)
1791 return new AArch64ExpandPseudo();
#define AARCH64_EXPAND_PSEUDO_NAME
MachineInstrBuilder & UseMI
static MachineInstr * createCallWithOps(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const AArch64InstrInfo *TII, unsigned Opcode, ArrayRef< MachineOperand > ExplicitOps, unsigned RegMaskStartIdx)
static MachineInstr * createCall(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const AArch64InstrInfo *TII, MachineOperand &CallTarget, unsigned RegMaskStartIdx)
MachineInstrBuilder MachineInstrBuilder & DefMI
SmallVector< AArch64_IMM::ImmInsnModel, 4 > Insn
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
unsigned getTaggedBasePointerOffset() const
bool isTargetILP32() const
bool isTargetMachO() const
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
The address of a basic block.
FunctionPass class - This class is used to implement most global optimizations.
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Describe properties that are true of each instruction in the target description file.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
MachineBasicBlock * splitAt(MachineInstr &SplitInst, bool UpdateLiveIns=true, LiveIntervals *LIS=nullptr)
Split a basic block into 2 pieces at SplitPoint.
void eraseFromParent()
This method unlinks 'this' from the containing function and deletes it.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
iterator_range< succ_iterator > successors()
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineInstr * CreateMachineInstr(const MCInstrDesc &MCID, DebugLoc DL, bool NoImplicit=false)
CreateMachineInstr - Allocate a new MachineInstr.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
void moveCallSiteInfo(const MachineInstr *Old, const MachineInstr *New)
Move the call site info from Old to \New call site info.
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & addExternalSymbol(const char *FnName, unsigned TargetFlags=0) const
const MachineInstrBuilder & setMIFlag(MachineInstr::MIFlag Flag) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addGlobalAddress(const GlobalValue *GV, int64_t Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & cloneMemRefs(const MachineInstr &OtherMI) const
const MachineInstrBuilder & addUse(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
MachineInstr * getInstr() const
If conversion operators fail, use this method to get the MachineInstr explicitly.
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
void setDebugInstrNum(unsigned Num)
Set instruction number of this MachineInstr.
MachineOperand class - Representation of each machine instruction operand.
const GlobalValue * getGlobal() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isSymbol() const
isSymbol - Tests if this is a MO_ExternalSymbol operand.
unsigned getTargetFlags() const
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
const char * getSymbolName() const
Register getReg() const
getReg - Returns the register number.
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
int64_t getOffset() const
Return the offset from the symbol in this operand.
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
MCRegister asMCReg() const
Utility to check-convert this value to a MCRegister.
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StackOffset holds a fixed and a scalable offset in bytes.
StringRef - Represent a constant reference to a string, i.e.
CodeModel::Model getCodeModel() const
Returns the code model.
ArrayRef< MCPhysReg > getRegisters() const
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
virtual const TargetInstrInfo * getInstrInfo() const
self_iterator getIterator()
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_PREL
MO_PREL - Indicates that the bits of the symbol operand represented by MO_G0 etc are PC relative.
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TAGGED
MO_TAGGED - With MO_PAGE, indicates that the page includes a memory tag in bits 56-63.
@ MO_G3
MO_G3 - A symbol operand with this flag (granule 3) represents the high 16-bits of a 64-bit address,...
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
void expandMOVImm(uint64_t Imm, unsigned BitSize, SmallVectorImpl< ImmInsnModel > &Insn)
Expand a MOVi32imm or MOVi64imm pseudo instruction to one or more real move-immediate instructions to...
int getSVERevInstr(uint16_t Opcode)
@ DestructiveInstTypeMask
@ DestructiveUnaryPassthru
@ DestructiveTernaryCommWithRev
@ DestructiveBinaryCommWithRev
int getSVEPseudoMap(uint16_t Opcode)
int getSVENonRevInstr(uint16_t Opcode)
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Renamable
Register that may be renamed.
@ Define
Register definition.
@ Kill
The last use of a register.
@ Undef
Value of the register doesn't matter.
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
void finalizeBundle(MachineBasicBlock &MBB, MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
finalizeBundle - Finalize a machine instruction bundle which includes a sequence of instructions star...
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
APFloat abs(APFloat X)
Returns the absolute value of the argument.
unsigned getDeadRegState(bool B)
void initializeAArch64ExpandPseudoPass(PassRegistry &)
FunctionPass * createAArch64ExpandPseudoPass()
Returns an instance of the pseudo instruction expansion pass.
void emitFrameOffset(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, StackOffset Offset, const TargetInstrInfo *TII, MachineInstr::MIFlag=MachineInstr::NoFlags, bool SetNZCV=false, bool NeedsWinCFI=false, bool *HasWinCFI=nullptr, bool EmitCFAOffset=false, StackOffset InitialOffset={}, unsigned FrameReg=AArch64::SP)
emitFrameOffset - Emit instructions as needed to set DestReg to SrcReg plus Offset.
void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
unsigned getKillRegState(bool B)
unsigned getRenamableRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
This struct is a compact representation of a valid (non-zero power of two) alignment.
Description of the encoding of one expression Op.