46#define DEBUG_TYPE "ppc-instr-info"
48#define GET_INSTRMAP_INFO
49#define GET_INSTRINFO_CTOR_DTOR
50#include "PPCGenInstrInfo.inc"
53 "Number of spillvsrrc spilled to stack as vec");
55 "Number of spillvsrrc spilled to stack as gpr");
56STATISTIC(NumGPRtoVSRSpill,
"Number of gpr spills to spillvsrrc");
58 "Number of ISELs that depend on comparison of constants converted");
60 "Number of compare-immediate instructions fed by constants");
62 "Number of record-form rotates converted to record-form andi");
66 cl::desc(
"Disable analysis for CTR loops"));
72cl::desc(
"Causes the backend to crash instead of generating a nop VSX copy"),
77 cl::desc(
"Use the old (incorrect) instruction latency calculation"));
81 cl::desc(
"register pressure factor for the transformations."));
85 cl::desc(
"enable register pressure reduce in machine combiner pass."));
88void PPCInstrInfo::anchor() {}
93 STI.isPPC64() ? PPC::BLR8 : PPC::BLR),
94 Subtarget(STI), RI(STI.getTargetMachine()) {}
102 static_cast<const PPCSubtarget *
>(STI)->getCPUDirective();
106 static_cast<const PPCSubtarget *
>(STI)->getInstrItineraryData();
138 unsigned *PredCost)
const {
140 return PPCGenInstrInfo::getInstrLatency(ItinData,
MI, PredCost);
150 unsigned DefClass =
MI.getDesc().getSchedClass();
151 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
169 std::optional<unsigned>
Latency = PPCGenInstrInfo::getOperandLatency(
172 if (!
DefMI.getParent())
179 if (Reg.isVirtual()) {
182 IsRegCR =
MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
183 MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
185 IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
186 PPC::CRBITRCRegClass.contains(Reg);
189 if (
UseMI.isBranch() && IsRegCR) {
269#define InfoArrayIdxFMAInst 0
270#define InfoArrayIdxFAddInst 1
271#define InfoArrayIdxFMULInst 2
272#define InfoArrayIdxAddOpIdx 3
273#define InfoArrayIdxMULOpIdx 4
274#define InfoArrayIdxFSubInst 5
285 {PPC::XSMADDADP, PPC::XSADDDP, PPC::XSMULDP, 1, 2, PPC::XSSUBDP},
286 {PPC::XSMADDASP, PPC::XSADDSP, PPC::XSMULSP, 1, 2, PPC::XSSUBSP},
287 {PPC::XVMADDADP, PPC::XVADDDP, PPC::XVMULDP, 1, 2, PPC::XVSUBDP},
288 {PPC::XVMADDASP, PPC::XVADDSP, PPC::XVMULSP, 1, 2, PPC::XVSUBSP},
289 {PPC::FMADD, PPC::FADD, PPC::FMUL, 3, 1, PPC::FSUB},
290 {PPC::FMADDS, PPC::FADDS, PPC::FMULS, 3, 1, PPC::FSUBS}};
294int16_t PPCInstrInfo::getFMAOpIdxInfo(
unsigned Opcode)
const {
351 bool DoRegPressureReduce)
const {
356 auto IsAllOpsVirtualReg = [](
const MachineInstr &Instr) {
357 for (
const auto &MO : Instr.explicit_operands())
358 if (!(MO.isReg() && MO.getReg().isVirtual()))
363 auto IsReassociableAddOrSub = [&](
const MachineInstr &Instr,
365 if (Instr.getOpcode() !=
376 if (!IsAllOpsVirtualReg(Instr))
382 !
MRI->hasOneNonDBGUse(Instr.getOperand(0).getReg()))
388 auto IsReassociableFMA = [&](
const MachineInstr &Instr, int16_t &AddOpIdx,
389 int16_t &MulOpIdx,
bool IsLeaf) {
390 int16_t
Idx = getFMAOpIdxInfo(Instr.getOpcode());
401 if (!IsAllOpsVirtualReg(Instr))
421 int16_t AddOpIdx = -1;
422 int16_t MulOpIdx = -1;
424 bool IsUsedOnceL =
false;
425 bool IsUsedOnceR =
false;
429 auto IsRPReductionCandidate = [&]() {
433 if (Opcode != PPC::XSMADDASP && Opcode != PPC::XSMADDADP)
438 if (IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
true)) {
439 assert((MulOpIdx >= 0) &&
"mul operand index not right!");
440 Register MULRegL =
TRI->lookThruSingleUseCopyChain(
442 Register MULRegR =
TRI->lookThruSingleUseCopyChain(
444 if (!MULRegL && !MULRegR)
447 if (MULRegL && !MULRegR) {
451 }
else if (!MULRegL && MULRegR) {
463 MULInstrL =
MRI->getVRegDef(MULRegL);
464 MULInstrR =
MRI->getVRegDef(MULRegR);
471 if (DoRegPressureReduce && IsRPReductionCandidate()) {
472 assert((MULInstrL && MULInstrR) &&
"wrong register preduction candidate!");
493 if (!IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
false))
496 assert((AddOpIdx >= 0) &&
"add operand index not right!");
503 if (!IsReassociableFMA(*Prev, AddOpIdx, MulOpIdx,
false))
506 assert((AddOpIdx >= 0) &&
"add operand index not right!");
511 if (IsReassociableFMA(*Leaf, AddOpIdx, MulOpIdx,
true)) {
527 assert(!InsInstrs.
empty() &&
"Instructions set to be inserted is empty!");
560 assert(isa<llvm::ConstantFP>(
C) &&
"not a valid constant!");
563 APFloat F1((dyn_cast<ConstantFP>(
C))->getValueAPF());
565 Constant *NegC = ConstantFP::get(dyn_cast<ConstantFP>(
C)->getContext(), F1);
573 for (
auto *Inst : InsInstrs) {
575 assert(Operand.isReg() &&
"Invalid instruction in InsInstrs!");
576 if (Operand.getReg() == PPC::ZERO8) {
577 Placeholder = &Operand;
583 assert(Placeholder &&
"Placeholder does not exist!");
588 generateLoadForNewConst(ConstPoolIdx, &Root,
C->getType(), InsInstrs);
591 Placeholder->setReg(LoadNewConst);
612 if (!(Subtarget.
isPPC64() && Subtarget.hasP9Vector() &&
620 auto GetMBBPressure =
630 if (
MI.isDebugValue() ||
MI.isDebugLabel())
636 RPTracker.
recede(RegOpers);
646 unsigned VSSRCLimit =
TRI->getRegPressureSetLimit(
650 return GetMBBPressure(
MBB)[PPC::RegisterPressureSets::VSSRC] >
656 if (!
I->hasOneMemOperand())
660 return Op->isLoad() &&
Op->getPseudoValue() &&
664Register PPCInstrInfo::generateLoadForNewConst(
672 "Target not supported!\n");
678 Register VReg1 =
MRI->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
680 BuildMI(*MF,
MI->getDebugLoc(),
get(PPC::ADDIStocHA8), VReg1)
685 "Only float and double are supported!");
690 LoadOpcode = PPC::DFLOADf32;
692 LoadOpcode = PPC::DFLOADf64;
722 assert(
I->mayLoad() &&
"Should be a load instruction.\n");
723 for (
auto MO :
I->uses()) {
727 if (Reg == 0 || !Reg.isVirtual())
733 return (MCP->
getConstants())[MO2.getIndex()].Val.ConstVal;
753 bool DoRegPressureReduce)
const {
763 DoRegPressureReduce);
776 reassociateFMA(Root,
Pattern, InsInstrs, DelInstrs, InstrIdxForVirtReg);
781 DelInstrs, InstrIdxForVirtReg);
786void PPCInstrInfo::reassociateFMA(
797 MRI.constrainRegClass(RegC, RC);
800 int16_t
Idx = getFMAOpIdxInfo(FmaOp);
801 assert(
Idx >= 0 &&
"Root must be a FMA instruction");
803 bool IsILPReassociate =
823 Leaf =
MRI.getVRegDef(MULReg);
829 Leaf =
MRI.getVRegDef(MULReg);
835 if (IsILPReassociate)
843 MRI.constrainRegClass(Reg, RC);
844 KillFlag = Operand.
isKill();
849 bool &MulOp1KillFlag,
bool &MulOp2KillFlag,
850 bool &AddOpKillFlag) {
851 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx), MulOp1, MulOp1KillFlag);
852 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx + 1), MulOp2, MulOp2KillFlag);
853 GetOperandInfo(
Instr.getOperand(AddOpIdx), AddOp, AddOpKillFlag);
856 Register RegM11, RegM12, RegX, RegY, RegM21, RegM22, RegM31, RegM32, RegA11,
858 bool KillX =
false, KillY =
false, KillM11 =
false, KillM12 =
false,
859 KillM21 =
false, KillM22 =
false, KillM31 =
false, KillM32 =
false,
860 KillA11 =
false, KillA21 =
false, KillB =
false;
862 GetFMAInstrInfo(Root, RegM31, RegM32, RegB, KillM31, KillM32, KillB);
864 if (IsILPReassociate)
865 GetFMAInstrInfo(*Prev, RegM21, RegM22, RegA21, KillM21, KillM22, KillA21);
868 GetFMAInstrInfo(*Leaf, RegM11, RegM12, RegA11, KillM11, KillM12, KillA11);
869 GetOperandInfo(Leaf->
getOperand(AddOpIdx), RegX, KillX);
871 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
872 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
875 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
876 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
886 InstrIdxForVirtReg.
insert(std::make_pair(NewVRA, 0));
889 if (IsILPReassociate) {
890 NewVRB =
MRI.createVirtualRegister(RC);
891 InstrIdxForVirtReg.
insert(std::make_pair(NewVRB, 1));
896 NewVRD =
MRI.createVirtualRegister(RC);
897 InstrIdxForVirtReg.
insert(std::make_pair(NewVRD, 2));
902 Register RegMul2,
bool KillRegMul2) {
903 MI->getOperand(AddOpIdx).setReg(RegAdd);
904 MI->getOperand(AddOpIdx).setIsKill(KillAdd);
905 MI->getOperand(FirstMulOpIdx).setReg(RegMul1);
906 MI->getOperand(FirstMulOpIdx).setIsKill(KillRegMul1);
907 MI->getOperand(FirstMulOpIdx + 1).setReg(RegMul2);
908 MI->getOperand(FirstMulOpIdx + 1).setIsKill(KillRegMul2);
929 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
930 AdjustOperandOrder(MINewA, RegY, KillY, RegM31, KillM31, RegM32, KillM32);
951 assert(NewVRD &&
"new FMA register not created!");
970 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
971 AdjustOperandOrder(MINewD, NewVRA,
true, RegM31, KillM31, RegM32,
997 bool KillVarReg =
false;
1000 KillVarReg = KillM31;
1003 KillVarReg = KillM32;
1027 if (!IsILPReassociate) {
1036 "Insertion instructions set should not be empty!");
1040 if (IsILPReassociate)
1048 unsigned &SubIdx)
const {
1049 switch (
MI.getOpcode()) {
1050 default:
return false;
1053 case PPC::EXTSW_32_64:
1054 SrcReg =
MI.getOperand(1).getReg();
1055 DstReg =
MI.getOperand(0).getReg();
1056 SubIdx = PPC::sub_32;
1062 int &FrameIndex)
const {
1066 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1067 MI.getOperand(2).isFI()) {
1068 FrameIndex =
MI.getOperand(2).getIndex();
1069 return MI.getOperand(0).getReg();
1079 switch (
MI.getOpcode()) {
1089 case PPC::ADDIStocHA:
1090 case PPC::ADDIStocHA8:
1092 case PPC::ADDItocL8:
1093 case PPC::LOAD_STACK_GUARD:
1094 case PPC::PPCLdFixedAddr:
1096 case PPC::XXLXORspz:
1097 case PPC::XXLXORdpz:
1098 case PPC::XXLEQVOnes:
1099 case PPC::XXSPLTI32DX:
1101 case PPC::XXSPLTIDP:
1105 case PPC::V_SETALLONESB:
1106 case PPC::V_SETALLONESH:
1107 case PPC::V_SETALLONES:
1110 case PPC::XXSETACCZ:
1111 case PPC::XXSETACCZW:
1118 int &FrameIndex)
const {
1120 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1121 MI.getOperand(2).isFI()) {
1122 FrameIndex =
MI.getOperand(2).getIndex();
1123 return MI.getOperand(0).getReg();
1131 unsigned OpIdx2)
const {
1135 if (
MI.getOpcode() != PPC::RLWIMI &&
MI.getOpcode() != PPC::RLWIMI_rec)
1143 if (
MI.getOperand(3).getImm() != 0)
1154 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
1155 "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMI_rec.");
1159 unsigned SubReg1 =
MI.getOperand(1).getSubReg();
1160 unsigned SubReg2 =
MI.getOperand(2).getSubReg();
1161 bool Reg1IsKill =
MI.getOperand(1).isKill();
1162 bool Reg2IsKill =
MI.getOperand(2).isKill();
1163 bool ChangeReg0 =
false;
1169 "Expecting a two-address instruction!");
1170 assert(
MI.getOperand(0).getSubReg() == SubReg1 &&
"Tied subreg mismatch");
1176 unsigned MB =
MI.getOperand(4).getImm();
1177 unsigned ME =
MI.getOperand(5).getImm();
1181 if (MB == 0 && ME == 31)
1186 Register Reg0 = ChangeReg0 ? Reg2 :
MI.getOperand(0).getReg();
1187 bool Reg0IsDead =
MI.getOperand(0).isDead();
1188 return BuildMI(MF,
MI.getDebugLoc(),
MI.getDesc())
1197 MI.getOperand(0).setReg(Reg2);
1198 MI.getOperand(0).setSubReg(SubReg2);
1200 MI.getOperand(2).setReg(Reg1);
1201 MI.getOperand(1).setReg(Reg2);
1202 MI.getOperand(2).setSubReg(SubReg1);
1203 MI.getOperand(1).setSubReg(SubReg2);
1204 MI.getOperand(2).setIsKill(Reg1IsKill);
1205 MI.getOperand(1).setIsKill(Reg2IsKill);
1208 MI.getOperand(4).setImm((ME + 1) & 31);
1209 MI.getOperand(5).setImm((MB - 1) & 31);
1214 unsigned &SrcOpIdx1,
1215 unsigned &SrcOpIdx2)
const {
1226 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
1236 default: Opcode = PPC::NOP;
break;
1262 bool AllowModify)
const {
1263 bool isPPC64 = Subtarget.
isPPC64();
1270 if (!isUnpredicatedTerminator(*
I))
1276 if (
I->getOpcode() == PPC::B &&
1278 I->eraseFromParent();
1282 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
1291 if (
I ==
MBB.
begin() || !isUnpredicatedTerminator(*--
I)) {
1297 }
else if (LastInst.
getOpcode() == PPC::BCC) {
1305 }
else if (LastInst.
getOpcode() == PPC::BC) {
1313 }
else if (LastInst.
getOpcode() == PPC::BCn) {
1321 }
else if (LastInst.
getOpcode() == PPC::BDNZ8 ||
1332 }
else if (LastInst.
getOpcode() == PPC::BDZ8 ||
1353 if (
I !=
MBB.
begin() && isUnpredicatedTerminator(*--
I))
1357 if (SecondLastInst.
getOpcode() == PPC::BCC &&
1367 }
else if (SecondLastInst.
getOpcode() == PPC::BC &&
1377 }
else if (SecondLastInst.
getOpcode() == PPC::BCn &&
1387 }
else if ((SecondLastInst.
getOpcode() == PPC::BDNZ8 ||
1388 SecondLastInst.
getOpcode() == PPC::BDNZ) &&
1401 }
else if ((SecondLastInst.
getOpcode() == PPC::BDZ8 ||
1402 SecondLastInst.
getOpcode() == PPC::BDZ) &&
1425 I->eraseFromParent();
1434 int *BytesRemoved)
const {
1435 assert(!BytesRemoved &&
"code size not handled");
1441 if (
I->getOpcode() != PPC::B &&
I->getOpcode() != PPC::BCC &&
1442 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1443 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1444 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1448 I->eraseFromParent();
1454 if (
I->getOpcode() != PPC::BCC &&
1455 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1456 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1457 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1461 I->eraseFromParent();
1470 int *BytesAdded)
const {
1472 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1474 "PPC branch conditions have two components!");
1475 assert(!BytesAdded &&
"code size not handled");
1477 bool isPPC64 = Subtarget.
isPPC64();
1485 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1486 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1502 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1503 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1521 Register FalseReg,
int &CondCycles,
1522 int &TrueCycles,
int &FalseCycles)
const {
1523 if (!Subtarget.hasISEL())
1526 if (
Cond.size() != 2)
1542 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1547 if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
1548 !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
1549 !PPC::G8RCRegClass.hasSubClassEq(RC) &&
1550 !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
1570 "PPC branch conditions have two components!");
1575 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1576 assert(RC &&
"TrueReg and FalseReg must have overlapping register classes");
1578 bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
1579 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
1581 PPC::GPRCRegClass.hasSubClassEq(RC) ||
1582 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
1583 "isel is for regular integer GPRs only");
1585 unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
1588 unsigned SubIdx = 0;
1589 bool SwapOps =
false;
1590 switch (SelectPred) {
1594 SubIdx = PPC::sub_eq; SwapOps =
false;
break;
1598 SubIdx = PPC::sub_eq; SwapOps =
true;
break;
1602 SubIdx = PPC::sub_lt; SwapOps =
false;
break;
1606 SubIdx = PPC::sub_lt; SwapOps =
true;
break;
1610 SubIdx = PPC::sub_gt; SwapOps =
false;
break;
1614 SubIdx = PPC::sub_gt; SwapOps =
true;
break;
1618 SubIdx = PPC::sub_un; SwapOps =
false;
break;
1622 SubIdx = PPC::sub_un; SwapOps =
true;
break;
1627 Register FirstReg = SwapOps ? FalseReg : TrueReg,
1628 SecondReg = SwapOps ? TrueReg : FalseReg;
1633 if (
MRI.getRegClass(FirstReg)->contains(PPC::R0) ||
1634 MRI.getRegClass(FirstReg)->contains(PPC::X0)) {
1636 MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
1637 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
1639 FirstReg =
MRI.createVirtualRegister(FirstRC);
1651 if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
1652 CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
1653 CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
1654 CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
1656 if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
1657 CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
1658 CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
1659 CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
1661 if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
1662 CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
1663 CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
1664 CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
1666 if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
1667 CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
1668 CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
1669 CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
1672 assert(Ret != 4 &&
"Invalid CR bit register");
1680 bool RenamableDest,
bool RenamableSrc)
const {
1684 if (PPC::F8RCRegClass.
contains(DestReg) &&
1685 PPC::VSRCRegClass.
contains(SrcReg)) {
1687 TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
1693 }
else if (PPC::F8RCRegClass.
contains(SrcReg) &&
1694 PPC::VSRCRegClass.
contains(DestReg)) {
1696 TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
1705 if (PPC::CRBITRCRegClass.
contains(SrcReg) &&
1706 PPC::GPRCRegClass.
contains(DestReg)) {
1718 }
else if (PPC::CRRCRegClass.
contains(SrcReg) &&
1719 (PPC::G8RCRegClass.
contains(DestReg) ||
1720 PPC::GPRCRegClass.
contains(DestReg))) {
1721 bool Is64Bit = PPC::G8RCRegClass.contains(DestReg);
1722 unsigned MvCode = Is64Bit ? PPC::MFOCRF8 : PPC::MFOCRF;
1723 unsigned ShCode = Is64Bit ? PPC::RLWINM8 : PPC::RLWINM;
1724 unsigned CRNum =
TRI->getEncodingValue(SrcReg);
1736 }
else if (PPC::G8RCRegClass.
contains(SrcReg) &&
1737 PPC::VSFRCRegClass.
contains(DestReg)) {
1738 assert(Subtarget.hasDirectMove() &&
1739 "Subtarget doesn't support directmove, don't know how to copy.");
1744 }
else if (PPC::VSFRCRegClass.
contains(SrcReg) &&
1745 PPC::G8RCRegClass.
contains(DestReg)) {
1746 assert(Subtarget.hasDirectMove() &&
1747 "Subtarget doesn't support directmove, don't know how to copy.");
1751 }
else if (PPC::SPERCRegClass.
contains(SrcReg) &&
1752 PPC::GPRCRegClass.
contains(DestReg)) {
1756 }
else if (PPC::GPRCRegClass.
contains(SrcReg) &&
1757 PPC::SPERCRegClass.
contains(DestReg)) {
1764 if (PPC::GPRCRegClass.
contains(DestReg, SrcReg))
1766 else if (PPC::G8RCRegClass.
contains(DestReg, SrcReg))
1768 else if (PPC::F4RCRegClass.
contains(DestReg, SrcReg))
1770 else if (PPC::CRRCRegClass.
contains(DestReg, SrcReg))
1772 else if (PPC::VRRCRegClass.
contains(DestReg, SrcReg))
1774 else if (PPC::VSRCRegClass.
contains(DestReg, SrcReg))
1784 else if (PPC::VSFRCRegClass.
contains(DestReg, SrcReg) ||
1785 PPC::VSSRCRegClass.
contains(DestReg, SrcReg))
1786 Opc = (Subtarget.hasP9Vector()) ? PPC::XSCPSGNDP : PPC::XXLORf;
1787 else if (Subtarget.pairedVectorMemops() &&
1788 PPC::VSRpRCRegClass.contains(DestReg, SrcReg)) {
1789 if (SrcReg > PPC::VSRp15)
1790 SrcReg = PPC::V0 + (SrcReg - PPC::VSRp16) * 2;
1792 SrcReg = PPC::VSL0 + (SrcReg - PPC::VSRp0) * 2;
1793 if (DestReg > PPC::VSRp15)
1794 DestReg = PPC::V0 + (DestReg - PPC::VSRp16) * 2;
1796 DestReg = PPC::VSL0 + (DestReg - PPC::VSRp0) * 2;
1803 else if (PPC::CRBITRCRegClass.
contains(DestReg, SrcReg))
1805 else if (PPC::SPERCRegClass.
contains(DestReg, SrcReg))
1807 else if ((PPC::ACCRCRegClass.
contains(DestReg) ||
1808 PPC::UACCRCRegClass.
contains(DestReg)) &&
1809 (PPC::ACCRCRegClass.
contains(SrcReg) ||
1810 PPC::UACCRCRegClass.
contains(SrcReg))) {
1816 bool DestPrimed = PPC::ACCRCRegClass.contains(DestReg);
1817 bool SrcPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1819 PPC::VSL0 + (SrcReg - (SrcPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1821 PPC::VSL0 + (DestReg - (DestPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1830 if (SrcPrimed && !KillSrc)
1833 }
else if (PPC::G8pRCRegClass.
contains(DestReg) &&
1834 PPC::G8pRCRegClass.
contains(SrcReg)) {
1836 unsigned DestRegIdx = DestReg - PPC::G8p0;
1837 MCRegister DestRegSub0 = PPC::X0 + 2 * DestRegIdx;
1838 MCRegister DestRegSub1 = PPC::X0 + 2 * DestRegIdx + 1;
1839 unsigned SrcRegIdx = SrcReg - PPC::G8p0;
1840 MCRegister SrcRegSub0 = PPC::X0 + 2 * SrcRegIdx;
1841 MCRegister SrcRegSub1 = PPC::X0 + 2 * SrcRegIdx + 1;
1863 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
1864 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
1866 }
else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
1867 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
1869 }
else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
1871 }
else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
1873 }
else if (PPC::SPERCRegClass.hasSubClassEq(RC)) {
1875 }
else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
1877 }
else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
1879 }
else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1881 }
else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1883 }
else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1885 }
else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
1887 }
else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) {
1889 }
else if (PPC::ACCRCRegClass.hasSubClassEq(RC)) {
1890 assert(Subtarget.pairedVectorMemops() &&
1891 "Register unexpected when paired memops are disabled.");
1893 }
else if (PPC::UACCRCRegClass.hasSubClassEq(RC)) {
1894 assert(Subtarget.pairedVectorMemops() &&
1895 "Register unexpected when paired memops are disabled.");
1897 }
else if (PPC::WACCRCRegClass.hasSubClassEq(RC)) {
1898 assert(Subtarget.pairedVectorMemops() &&
1899 "Register unexpected when paired memops are disabled.");
1901 }
else if (PPC::VSRpRCRegClass.hasSubClassEq(RC)) {
1902 assert(Subtarget.pairedVectorMemops() &&
1903 "Register unexpected when paired memops are disabled.");
1905 }
else if (PPC::G8pRCRegClass.hasSubClassEq(RC)) {
1916 return OpcodesForSpill[getSpillIndex(RC)];
1922 return OpcodesForSpill[getSpillIndex(RC)];
1925void PPCInstrInfo::StoreRegToStackSlot(
1939 if (PPC::CRRCRegClass.hasSubClassEq(RC) ||
1940 PPC::CRBITRCRegClass.hasSubClassEq(RC))
1954 StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs);
1964 NewMIs.
back()->addMemOperand(MF, MMO);
1983 unsigned DestReg,
int FrameIdx,
2001 LoadRegFromStackSlot(MF,
DL, DestReg, FrameIdx, RC, NewMIs);
2011 NewMIs.
back()->addMemOperand(MF, MMO);
2034 assert(
Cond.size() == 2 &&
"Invalid PPC branch opcode!");
2036 Cond[0].setImm(
Cond[0].getImm() == 0 ? 1 : 0);
2049 unsigned DefOpc =
DefMI.getOpcode();
2050 if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
2052 if (!
DefMI.getOperand(1).isImm())
2054 if (
DefMI.getOperand(1).getImm() != 0)
2070 for (UseIdx = 0; UseIdx <
UseMI.getNumOperands(); ++UseIdx)
2071 if (
UseMI.getOperand(UseIdx).isReg() &&
2075 assert(UseIdx <
UseMI.getNumOperands() &&
"Cannot find Reg in UseMI");
2086 if (UseInfo->
RegClass != PPC::GPRC_NOR0RegClassID &&
2087 UseInfo->
RegClass != PPC::G8RC_NOX0RegClassID)
2099 bool isPPC64 = Subtarget.
isPPC64();
2100 ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
2102 ZeroReg = UseInfo->
RegClass == PPC::G8RC_NOX0RegClassID ?
2103 PPC::ZERO8 : PPC::ZERO;
2108 UseMI.getOperand(UseIdx).setReg(ZeroReg);
2120 if (
MRI->use_nodbg_empty(Reg))
2121 DefMI.eraseFromParent();
2127 if (
MI.definesRegister(PPC::CTR,
nullptr) ||
2128 MI.definesRegister(PPC::CTR8,
nullptr))
2140 unsigned NumT,
unsigned ExtraT,
2142 unsigned NumF,
unsigned ExtraF,
2162 switch (
MI.getOpcode()) {
2178 unsigned OpC =
MI.getOpcode();
2179 if (OpC == PPC::BLR || OpC == PPC::BLR8) {
2180 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2181 bool isPPC64 = Subtarget.
isPPC64();
2182 MI.setDesc(
get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
2183 : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
2189 MI.setDesc(
get(PPC::BCLR));
2192 MI.setDesc(
get(PPC::BCLRn));
2195 MI.setDesc(
get(PPC::BCCLR));
2197 .
addImm(Pred[0].getImm())
2202 }
else if (OpC == PPC::B) {
2203 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2204 bool isPPC64 = Subtarget.
isPPC64();
2205 MI.setDesc(
get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
2206 : (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
2213 MI.removeOperand(0);
2215 MI.setDesc(
get(PPC::BC));
2221 MI.removeOperand(0);
2223 MI.setDesc(
get(PPC::BCn));
2229 MI.removeOperand(0);
2231 MI.setDesc(
get(PPC::BCC));
2233 .
addImm(Pred[0].getImm())
2239 }
else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || OpC == PPC::BCTRL ||
2240 OpC == PPC::BCTRL8 || OpC == PPC::BCTRL_RM ||
2241 OpC == PPC::BCTRL8_RM) {
2242 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR)
2245 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8 ||
2246 OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM;
2247 bool isPPC64 = Subtarget.
isPPC64();
2250 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
2251 : (setLR ? PPC::BCCTRL : PPC::BCCTR)));
2254 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
2255 : (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
2258 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
2259 : (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
2261 .
addImm(Pred[0].getImm())
2270 if (OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM)
2282 assert(Pred1.
size() == 2 &&
"Invalid PPC first predicate");
2283 assert(Pred2.
size() == 2 &&
"Invalid PPC second predicate");
2285 if (Pred1[1].
getReg() == PPC::CTR8 || Pred1[1].
getReg() == PPC::CTR)
2287 if (Pred2[1].
getReg() == PPC::CTR8 || Pred2[1].
getReg() == PPC::CTR)
2312 std::vector<MachineOperand> &Pred,
2313 bool SkipDead)
const {
2321 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
2322 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
2326 for (
unsigned c = 0; c < std::size(RCs) && !Found; ++c) {
2329 if (MO.isDef() && RC->
contains(MO.getReg())) {
2333 }
else if (MO.isRegMask()) {
2335 if (MO.clobbersPhysReg(R)) {
2348 int64_t &
Value)
const {
2349 unsigned Opc =
MI.getOpcode();
2352 default:
return false;
2357 SrcReg =
MI.getOperand(1).getReg();
2359 Value =
MI.getOperand(2).getImm();
2368 SrcReg =
MI.getOperand(1).getReg();
2369 SrcReg2 =
MI.getOperand(2).getReg();
2388 if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
2400 bool isPPC64 = Subtarget.
isPPC64();
2401 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
2402 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
2403 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
2412 if (!
MI)
return false;
2414 bool equalityOnly =
false;
2417 if (is32BitSignedCompare) {
2423 }
else if (is32BitUnsignedCompare) {
2428 equalityOnly =
true;
2432 equalityOnly = is64BitUnsignedCompare;
2434 equalityOnly = is32BitUnsignedCompare;
2440 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2452 if (SubIdx != PPC::sub_eq)
2464 bool FoundUse =
false;
2466 J =
MRI->use_instr_begin(CRReg), JE =
MRI->use_instr_end();
2493 else if (
Value != 0) {
2502 if (equalityOnly || !
MRI->hasOneUse(CRReg))
2512 int16_t Immed = (int16_t)
Value;
2546 for (;
I != E && !noSub; --
I) {
2548 unsigned IOpC = Instr.getOpcode();
2550 if (&*
I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0,
TRI) ||
2551 Instr.readsRegister(PPC::CR0,
TRI)))
2560 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
2561 OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
2562 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
2563 ((Instr.getOperand(1).getReg() == SrcReg &&
2564 Instr.getOperand(2).getReg() == SrcReg2) ||
2565 (Instr.getOperand(1).getReg() == SrcReg2 &&
2566 Instr.getOperand(2).getReg() == SrcReg))) {
2584 int MIOpC =
MI->getOpcode();
2585 if (MIOpC == PPC::ANDI_rec || MIOpC == PPC::ANDI8_rec ||
2586 MIOpC == PPC::ANDIS_rec || MIOpC == PPC::ANDIS8_rec)
2589 NewOpC = PPC::getRecordFormOpcode(MIOpC);
2607 if (!equalityOnly && (NewOpC == PPC::SUBF_rec || NewOpC == PPC::SUBF8_rec) &&
2617 bool ShouldSwap =
false;
2618 if (Sub &&
Value == 0) {
2624 ShouldSwap = !ShouldSwap;
2629 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2637 "Invalid predicate for equality-only optimization");
2644 assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&
2645 "Invalid CR bit for equality-only optimization");
2647 if (NewSubReg == PPC::sub_lt)
2648 NewSubReg = PPC::sub_gt;
2649 else if (NewSubReg == PPC::sub_gt)
2650 NewSubReg = PPC::sub_lt;
2658 "Non-zero immediate support and ShouldSwap"
2659 "may conflict in updating predicate");
2667 BuildMI(*
MI->getParent(), std::next(MII),
MI->getDebugLoc(),
2668 get(TargetOpcode::COPY), CRReg)
2673 MI->clearRegisterDeads(PPC::CR0);
2675 if (MIOpC != NewOpC) {
2685 if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM8) {
2686 Register GPRRes =
MI->getOperand(0).getReg();
2687 int64_t SH =
MI->getOperand(2).getImm();
2688 int64_t MB =
MI->getOperand(3).getImm();
2689 int64_t ME =
MI->getOperand(4).getImm();
2692 bool MBInLoHWord = MB >= 16;
2693 bool MEInLoHWord = ME >= 16;
2696 if (MB <= ME && MBInLoHWord == MEInLoHWord && SH == 0) {
2697 Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
2699 Mask >>= MBInLoHWord ? 0 : 16;
2700 NewOpC = MIOpC == PPC::RLWINM
2701 ? (MBInLoHWord ? PPC::ANDI_rec : PPC::ANDIS_rec)
2702 : (MBInLoHWord ? PPC::ANDI8_rec : PPC::ANDIS8_rec);
2703 }
else if (
MRI->use_empty(GPRRes) && (ME == 31) &&
2704 (ME - MB + 1 == SH) && (MB >= 16)) {
2708 Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1);
2710 NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDIS_rec : PPC::ANDIS8_rec;
2713 if (Mask != ~0LLU) {
2714 MI->removeOperand(4);
2715 MI->removeOperand(3);
2716 MI->getOperand(2).setImm(Mask);
2717 NumRcRotatesConvertedToRcAnd++;
2719 }
else if (MIOpC == PPC::RLDICL &&
MI->getOperand(2).getImm() == 0) {
2720 int64_t MB =
MI->getOperand(3).getImm();
2722 uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
2723 NewOpC = PPC::ANDI8_rec;
2724 MI->removeOperand(3);
2725 MI->getOperand(2).setImm(Mask);
2726 NumRcRotatesConvertedToRcAnd++;
2731 MI->setDesc(NewDesc);
2734 if (!
MI->definesRegister(ImpDef,
nullptr)) {
2735 MI->addOperand(*
MI->getParent()->getParent(),
2740 if (!
MI->readsRegister(ImpUse,
nullptr)) {
2741 MI->addOperand(*
MI->getParent()->getParent(),
2746 assert(
MI->definesRegister(PPC::CR0,
nullptr) &&
2747 "Record-form instruction does not define cr0?");
2752 for (
unsigned i = 0, e = PredsToUpdate.
size(); i < e; i++)
2753 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
2755 for (
unsigned i = 0, e = SubRegsToUpdate.
size(); i < e; i++)
2756 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
2767 int64_t CmpMask, CmpValue;
2772 if (CmpValue || !CmpMask || SrcReg2)
2780 if (Opc == PPC::CMPLWI || Opc == PPC::CMPLDI)
2787 if (Subtarget.
isPPC64() && Opc == PPC::CMPWI)
2794 bool SrcRegHasOtherUse =
false;
2801 if (CRReg != PPC::CR0)
2805 bool SeenUseOfCRReg =
false;
2806 bool IsCRRegKilled =
false;
2807 if (!isRegElgibleForForwarding(RegMO, *SrcMI, CmpMI,
false, IsCRRegKilled,
2813 int NewOpC = PPC::getRecordFormOpcode(SrcMIOpc);
2827 "Record-form instruction does not define cr0?");
2841 OffsetIsScalable =
false;
2876 case PPC::DFSTOREf64:
2877 return FirstOpc == SecondOpc;
2883 return SecondOpc == PPC::STW || SecondOpc == PPC::STW8;
2890 int64_t OpOffset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2891 unsigned NumBytes)
const {
2897 "Only base registers and frame indices are supported.");
2902 if (ClusterSize > 2)
2916 unsigned FirstOpc = FirstLdSt.
getOpcode();
2917 unsigned SecondOpc = SecondLdSt.
getOpcode();
2929 int64_t Offset1 = 0, Offset2 = 0;
2937 assert(Base1 == &BaseOp1 && Base2 == &BaseOp2 &&
2938 "getMemOperandWithOffsetWidth return incorrect base op");
2940 assert(Offset1 <= Offset2 &&
"Caller should have ordered offsets.");
2941 return Offset1 + (int64_t)Width1.
getValue() == Offset2;
2948 unsigned Opcode =
MI.getOpcode();
2950 if (Opcode == PPC::INLINEASM || Opcode == PPC::INLINEASM_BR) {
2952 const char *AsmStr =
MI.getOperand(0).getSymbolName();
2954 }
else if (Opcode == TargetOpcode::STACKMAP) {
2957 }
else if (Opcode == TargetOpcode::PATCHPOINT) {
2961 return get(Opcode).getSize();
2965std::pair<unsigned, unsigned>
2968 return std::make_pair(TF, 0u);
2973 using namespace PPCII;
2974 static const std::pair<unsigned, const char *> TargetFlags[] = {
2975 {MO_PLT,
"ppc-plt"},
2976 {MO_PIC_FLAG,
"ppc-pic"},
2977 {MO_PCREL_FLAG,
"ppc-pcrel"},
2978 {MO_GOT_FLAG,
"ppc-got"},
2979 {MO_PCREL_OPT_FLAG,
"ppc-opt-pcrel"},
2980 {MO_TLSGD_FLAG,
"ppc-tlsgd"},
2981 {MO_TPREL_FLAG,
"ppc-tprel"},
2982 {MO_TLSLDM_FLAG,
"ppc-tlsldm"},
2983 {MO_TLSLD_FLAG,
"ppc-tlsld"},
2984 {MO_TLSGDM_FLAG,
"ppc-tlsgdm"},
2985 {MO_GOT_TLSGD_PCREL_FLAG,
"ppc-got-tlsgd-pcrel"},
2986 {MO_GOT_TLSLD_PCREL_FLAG,
"ppc-got-tlsld-pcrel"},
2987 {MO_GOT_TPREL_PCREL_FLAG,
"ppc-got-tprel-pcrel"},
2990 {MO_TPREL_LO,
"ppc-tprel-lo"},
2991 {MO_TPREL_HA,
"ppc-tprel-ha"},
2992 {MO_DTPREL_LO,
"ppc-dtprel-lo"},
2993 {MO_TLSLD_LO,
"ppc-tlsld-lo"},
2994 {MO_TOC_LO,
"ppc-toc-lo"},
2995 {MO_TLS,
"ppc-tls"},
2996 {MO_PIC_HA_FLAG,
"ppc-ha-pic"},
2997 {MO_PIC_LO_FLAG,
"ppc-lo-pic"},
2998 {MO_TPREL_PCREL_FLAG,
"ppc-tprel-pcrel"},
2999 {MO_TLS_PCREL_FLAG,
"ppc-tls-pcrel"},
3000 {MO_GOT_PCREL_FLAG,
"ppc-got-pcrel"},
3012 unsigned UpperOpcode, LowerOpcode;
3013 switch (
MI.getOpcode()) {
3014 case PPC::DFLOADf32:
3015 UpperOpcode = PPC::LXSSP;
3016 LowerOpcode = PPC::LFS;
3018 case PPC::DFLOADf64:
3019 UpperOpcode = PPC::LXSD;
3020 LowerOpcode = PPC::LFD;
3022 case PPC::DFSTOREf32:
3023 UpperOpcode = PPC::STXSSP;
3024 LowerOpcode = PPC::STFS;
3026 case PPC::DFSTOREf64:
3027 UpperOpcode = PPC::STXSD;
3028 LowerOpcode = PPC::STFD;
3030 case PPC::XFLOADf32:
3031 UpperOpcode = PPC::LXSSPX;
3032 LowerOpcode = PPC::LFSX;
3034 case PPC::XFLOADf64:
3035 UpperOpcode = PPC::LXSDX;
3036 LowerOpcode = PPC::LFDX;
3038 case PPC::XFSTOREf32:
3039 UpperOpcode = PPC::STXSSPX;
3040 LowerOpcode = PPC::STFSX;
3042 case PPC::XFSTOREf64:
3043 UpperOpcode = PPC::STXSDX;
3044 LowerOpcode = PPC::STFDX;
3047 UpperOpcode = PPC::LXSIWAX;
3048 LowerOpcode = PPC::LFIWAX;
3051 UpperOpcode = PPC::LXSIWZX;
3052 LowerOpcode = PPC::LFIWZX;
3055 UpperOpcode = PPC::STXSIWX;
3056 LowerOpcode = PPC::STFIWX;
3062 Register TargetReg =
MI.getOperand(0).getReg();
3064 if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) ||
3065 (TargetReg >= PPC::VSL0 && TargetReg <= PPC::VSL31))
3066 Opcode = LowerOpcode;
3068 Opcode = UpperOpcode;
3069 MI.setDesc(
get(Opcode));
3078 auto &
MBB = *
MI.getParent();
3079 auto DL =
MI.getDebugLoc();
3081 switch (
MI.getOpcode()) {
3082 case PPC::BUILD_UACC: {
3085 if (ACC - PPC::ACC0 != UACC - PPC::UACC0) {
3086 MCRegister SrcVSR = PPC::VSL0 + (UACC - PPC::UACC0) * 4;
3087 MCRegister DstVSR = PPC::VSL0 + (ACC - PPC::ACC0) * 4;
3091 for (
int VecNo = 0; VecNo < 4; VecNo++)
3093 .addReg(SrcVSR + VecNo)
3101 case PPC::KILL_PAIR: {
3102 MI.setDesc(
get(PPC::UNENCODED_NOP));
3103 MI.removeOperand(1);
3104 MI.removeOperand(0);
3107 case TargetOpcode::LOAD_STACK_GUARD: {
3110 (Subtarget.
isTargetLinux() || M->getStackProtectorGuard() ==
"tls") &&
3111 "Only Linux target or tls mode are expected to contain "
3112 "LOAD_STACK_GUARD");
3114 if (M->getStackProtectorGuard() ==
"tls")
3115 Offset = M->getStackProtectorGuardOffset();
3118 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3119 MI.setDesc(
get(Subtarget.
isPPC64() ? PPC::LD : PPC::LWZ));
3125 case PPC::PPCLdFixedAddr: {
3127 "Only targets with Glibc expected to contain PPCLdFixedAddr");
3129 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3130 MI.setDesc(
get(PPC::LWZ));
3132#undef PPC_LNX_FEATURE
3134#define PPC_LNX_DEFINE_OFFSETS
3135#include "llvm/TargetParser/PPCTargetParser.def"
3137 bool Is64 = Subtarget.
isPPC64();
3138 if (FAType == PPC_FAWORD_HWCAP) {
3140 Offset = Is64 ? PPC_HWCAP_OFFSET_LE64 : PPC_HWCAP_OFFSET_LE32;
3142 Offset = Is64 ? PPC_HWCAP_OFFSET_BE64 : PPC_HWCAP_OFFSET_BE32;
3143 }
else if (FAType == PPC_FAWORD_HWCAP2) {
3145 Offset = Is64 ? PPC_HWCAP2_OFFSET_LE64 : PPC_HWCAP2_OFFSET_LE32;
3147 Offset = Is64 ? PPC_HWCAP2_OFFSET_BE64 : PPC_HWCAP2_OFFSET_BE32;
3148 }
else if (FAType == PPC_FAWORD_CPUID) {
3150 Offset = Is64 ? PPC_CPUID_OFFSET_LE64 : PPC_CPUID_OFFSET_LE32;
3152 Offset = Is64 ? PPC_CPUID_OFFSET_BE64 : PPC_CPUID_OFFSET_BE32;
3154 assert(
Offset &&
"Do not know the offset for this fixed addr load");
3155 MI.removeOperand(1);
3161#define PPC_TGT_PARSER_UNDEF_MACROS
3162#include "llvm/TargetParser/PPCTargetParser.def"
3163#undef PPC_TGT_PARSER_UNDEF_MACROS
3165 case PPC::DFLOADf32:
3166 case PPC::DFLOADf64:
3167 case PPC::DFSTOREf32:
3168 case PPC::DFSTOREf64: {
3169 assert(Subtarget.hasP9Vector() &&
3170 "Invalid D-Form Pseudo-ops on Pre-P9 target.");
3173 "D-form op must have register and immediate operands");
3176 case PPC::XFLOADf32:
3177 case PPC::XFSTOREf32:
3181 assert(Subtarget.hasP8Vector() &&
3182 "Invalid X-Form Pseudo-ops on Pre-P8 target.");
3183 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3184 "X-form op must have register and register operands");
3187 case PPC::XFLOADf64:
3188 case PPC::XFSTOREf64: {
3189 assert(Subtarget.hasVSX() &&
3190 "Invalid X-Form Pseudo-ops on target that has no VSX.");
3191 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3192 "X-form op must have register and register operands");
3195 case PPC::SPILLTOVSR_LD: {
3196 Register TargetReg =
MI.getOperand(0).getReg();
3197 if (PPC::VSFRCRegClass.
contains(TargetReg)) {
3198 MI.setDesc(
get(PPC::DFLOADf64));
3202 MI.setDesc(
get(PPC::LD));
3205 case PPC::SPILLTOVSR_ST: {
3207 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3208 NumStoreSPILLVSRRCAsVec++;
3209 MI.setDesc(
get(PPC::DFSTOREf64));
3212 NumStoreSPILLVSRRCAsGpr++;
3213 MI.setDesc(
get(PPC::STD));
3217 case PPC::SPILLTOVSR_LDX: {
3218 Register TargetReg =
MI.getOperand(0).getReg();
3219 if (PPC::VSFRCRegClass.
contains(TargetReg))
3220 MI.setDesc(
get(PPC::LXSDX));
3222 MI.setDesc(
get(PPC::LDX));
3225 case PPC::SPILLTOVSR_STX: {
3227 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3228 NumStoreSPILLVSRRCAsVec++;
3229 MI.setDesc(
get(PPC::STXSDX));
3231 NumStoreSPILLVSRRCAsGpr++;
3232 MI.setDesc(
get(PPC::STDX));
3239 case PPC::CFENCE8: {
3240 auto Val =
MI.getOperand(0).getReg();
3241 unsigned CmpOp = Subtarget.
isPPC64() ? PPC::CMPD : PPC::CMPW;
3247 MI.setDesc(
get(PPC::ISYNC));
3248 MI.removeOperand(0);
3259static unsigned selectReg(int64_t Imm1, int64_t Imm2,
unsigned CompareOpc,
3260 unsigned TrueReg,
unsigned FalseReg,
3261 unsigned CRSubReg) {
3263 if (CompareOpc == PPC::CMPWI || CompareOpc == PPC::CMPDI) {
3267 return Imm1 < Imm2 ? TrueReg : FalseReg;
3269 return Imm1 > Imm2 ? TrueReg : FalseReg;
3271 return Imm1 == Imm2 ? TrueReg : FalseReg;
3275 else if (CompareOpc == PPC::CMPLWI || CompareOpc == PPC::CMPLDI) {
3283 return Imm1 == Imm2 ? TrueReg : FalseReg;
3286 return PPC::NoRegister;
3291 int64_t Imm)
const {
3292 assert(
MI.getOperand(OpNo).isReg() &&
"Operand must be a REG");
3294 Register InUseReg =
MI.getOperand(OpNo).getReg();
3295 MI.getOperand(OpNo).ChangeToImmediate(Imm);
3303 int UseOpIdx =
MI.findRegisterUseOperandIdx(InUseReg,
TRI,
false);
3304 if (UseOpIdx >= 0) {
3314 MI.removeOperand(UseOpIdx);
3323 int OperandToKeep = LII.
SetCR ? 1 : 0;
3324 for (
int i =
MI.getNumOperands() - 1; i > OperandToKeep; i--)
3325 MI.removeOperand(i);
3329 MI.setDesc(
get(LII.
Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3344 bool &SeenIntermediateUse)
const {
3345 assert(!
MI.getParent()->getParent()->getRegInfo().isSSA() &&
3346 "Should be called after register allocation.");
3350 SeenIntermediateUse =
false;
3351 for (; It != E; ++It) {
3352 if (It->modifiesRegister(Reg,
TRI))
3354 if (It->readsRegister(Reg,
TRI))
3355 SeenIntermediateUse =
true;
3363 int64_t Imm)
const {
3365 "Register should be in non-SSA form after RA");
3366 bool isPPC64 = Subtarget.
isPPC64();
3370 if (isInt<16>(Imm)) {
3372 }
else if (isInt<32>(Imm)) {
3380 assert(isPPC64 &&
"Materializing 64-bit immediate to single register is "
3381 "only supported in PPC64");
3383 if ((Imm >> 32) & 0xFFFF)
3386 .
addImm((Imm >> 32) & 0xFFFF);
3393 .
addImm((Imm >> 16) & 0xFFFF);
3403 unsigned &OpNoForForwarding,
3404 bool &SeenIntermediateUse)
const {
3405 OpNoForForwarding = ~0U;
3413 for (
int i = 1, e =
MI.getNumOperands(); i < e; i++) {
3414 if (!
MI.getOperand(i).isReg())
3417 if (!Reg.isVirtual())
3422 if (DefMIForTrueReg->
getOpcode() == PPC::LI ||
3423 DefMIForTrueReg->
getOpcode() == PPC::LI8 ||
3424 DefMIForTrueReg->
getOpcode() == PPC::ADDI ||
3425 DefMIForTrueReg->
getOpcode() == PPC::ADDI8) {
3426 OpNoForForwarding = i;
3427 DefMI = DefMIForTrueReg;
3442 unsigned Opc =
MI.getOpcode();
3443 bool ConvertibleImmForm =
3444 Opc == PPC::CMPWI || Opc == PPC::CMPLWI || Opc == PPC::CMPDI ||
3445 Opc == PPC::CMPLDI || Opc == PPC::ADDI || Opc == PPC::ADDI8 ||
3446 Opc == PPC::ORI || Opc == PPC::ORI8 || Opc == PPC::XORI ||
3447 Opc == PPC::XORI8 || Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec ||
3448 Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 ||
3449 Opc == PPC::RLWINM || Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8 ||
3450 Opc == PPC::RLWINM8_rec;
3451 bool IsVFReg = (
MI.getNumOperands() &&
MI.getOperand(0).isReg())
3458 if ((Opc == PPC::OR || Opc == PPC::OR8) &&
3459 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
3461 for (
int i = 1, e =
MI.getNumOperands(); i <
e; i++) {
3463 SeenIntermediateUse =
false;
3477 case PPC::ADDItocL8:
3480 OpNoForForwarding = i;
3487 return OpNoForForwarding == ~0
U ? nullptr :
DefMI;
3490unsigned PPCInstrInfo::getSpillTarget()
const {
3493 bool IsP10Variant = Subtarget.isISA3_1() || Subtarget.pairedVectorMemops();
3495 return Subtarget.isISAFuture() ? 3 : IsP10Variant ?
3496 2 : Subtarget.hasP9Vector() ?
3535 bool PostRA = !
MRI->isSSA();
3541 unsigned ToBeDeletedReg = 0;
3542 int64_t OffsetImm = 0;
3543 unsigned XFormOpcode = 0;
3551 bool OtherIntermediateUse =
false;
3555 if (OtherIntermediateUse || !ADDMI)
3562 unsigned ScaleRegIdx = 0;
3563 int64_t OffsetAddi = 0;
3577 assert(ADDIMI &&
"There should be ADDIMI for valid ToBeChangedReg.");
3582 for (
auto It = ++Start; It !=
End; It++)
3591 (ScaleReg == PPC::R0 || ScaleReg == PPC::X0))
3596 if (NewDefFor(ToBeChangedReg, *ADDMI,
MI) || NewDefFor(ScaleReg, *ADDMI,
MI))
3612 MI.setDesc(
get(XFormOpcode));
3614 .ChangeToRegister(ScaleReg,
false,
false,
3618 .ChangeToRegister(ToBeChangedReg,
false,
false,
true);
3630 int64_t &Imm)
const {
3634 if (Opc != PPC::ADDI && Opc != PPC::ADDI8)
3650 return Opc == PPC::ADD4 || Opc == PPC::ADD8;
3654 unsigned &ToBeDeletedReg,
3655 unsigned &XFormOpcode,
3659 if (!
MI.mayLoadOrStore())
3662 unsigned Opc =
MI.getOpcode();
3667 if (XFormOpcode == PPC::INSTRUCTION_LIST_END)
3681 if (!ImmOperand.
isImm())
3684 assert(RegOperand.
isReg() &&
"Instruction format is not right");
3687 if (!RegOperand.
isKill())
3690 ToBeDeletedReg = RegOperand.
getReg();
3691 OffsetImm = ImmOperand.
getImm();
3698 int64_t &OffsetAddi,
3699 int64_t OffsetImm)
const {
3700 assert((Index == 1 || Index == 2) &&
"Invalid operand index for add.");
3706 bool OtherIntermediateUse =
false;
3727 if (OtherIntermediateUse || !ADDIMI)
3733 if (isInt<16>(OffsetAddi + OffsetImm))
3746 bool PostRA = !
MRI->isSSA();
3747 bool SeenIntermediateUse =
true;
3748 unsigned ForwardingOperand = ~0U;
3750 SeenIntermediateUse);
3753 assert(ForwardingOperand <
MI.getNumOperands() &&
3754 "The forwarding operand needs to be valid at this point");
3755 bool IsForwardingOperandKilled =
MI.getOperand(ForwardingOperand).isKill();
3756 bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled;
3757 if (KilledDef && KillFwdDefMI)
3772 PPC::INSTRUCTION_LIST_END &&
3773 transformToNewImmFormFedByAdd(
MI, *
DefMI, ForwardingOperand))
3777 bool IsVFReg =
MI.getOperand(0).isReg()
3785 transformToImmFormFedByAdd(
MI, III, ForwardingOperand, *
DefMI,
3792 transformToImmFormFedByLI(
MI, III, ForwardingOperand, *
DefMI))
3797 if (!HasImmForm && simplifyToLI(
MI, *
DefMI, ForwardingOperand, KilledDef))
3806 Register FoldingReg =
MI.getOperand(1).getReg();
3810 if (SrcMI->
getOpcode() != PPC::RLWINM &&
3811 SrcMI->
getOpcode() != PPC::RLWINM_rec &&
3815 assert((
MI.getOperand(2).isImm() &&
MI.getOperand(3).isImm() &&
3818 "Invalid PPC::RLWINM Instruction!");
3826 assert((MEMI < 32 && MESrc < 32 && MBMI < 32 && MBSrc < 32) &&
3827 "Invalid PPC::RLWINM Instruction!");
3849 bool SrcMaskFull = (MBSrc - MESrc == 1) || (MBSrc == 0 && MESrc == 31);
3852 if ((MBMI > MEMI) && !SrcMaskFull)
3862 APInt RotatedSrcMask = MaskSrc.
rotl(SHMI);
3863 APInt FinalMask = RotatedSrcMask & MaskMI;
3865 bool Simplified =
false;
3868 if (FinalMask.
isZero()) {
3870 (
MI.getOpcode() == PPC::RLWINM8 ||
MI.getOpcode() == PPC::RLWINM8_rec);
3875 if (
MI.getOpcode() == PPC::RLWINM ||
MI.getOpcode() == PPC::RLWINM8) {
3877 MI.removeOperand(4);
3878 MI.removeOperand(3);
3879 MI.removeOperand(2);
3880 MI.getOperand(1).ChangeToImmediate(0);
3881 MI.setDesc(
get(Is64Bit ? PPC::LI8 : PPC::LI));
3884 MI.removeOperand(4);
3885 MI.removeOperand(3);
3886 MI.getOperand(2).setImm(0);
3887 MI.setDesc(
get(Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3890 MI.getOperand(1).setIsKill(
true);
3894 MI.getOperand(1).setIsKill(
false);
3910 uint16_t NewSH = (SHSrc + SHMI) % 32;
3911 MI.getOperand(2).setImm(NewSH);
3914 MI.getOperand(3).setImm(NewMB);
3915 MI.getOperand(4).setImm(NewME);
3919 MI.getOperand(1).setIsKill(
true);
3923 MI.getOperand(1).setIsKill(
false);
3928 if (Simplified &
MRI->use_nodbg_empty(FoldingReg) &&
3953 default:
return false;
3961 III.
ImmOpcode = Opc == PPC::ADD4 ? PPC::ADDI : PPC::ADDI8;
3970 III.
ImmOpcode = Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8;
3986 III.
ImmOpcode = Opc == PPC::SUBFC ? PPC::SUBFIC : PPC::SUBFIC8;
3994 III.
ImmOpcode = Opc == PPC::CMPW ? PPC::CMPWI : PPC::CMPDI;
4002 III.
ImmOpcode = Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI;
4022 case PPC::OR: III.
ImmOpcode = PPC::ORI;
break;
4023 case PPC::OR8: III.
ImmOpcode = PPC::ORI8;
break;
4024 case PPC::XOR: III.
ImmOpcode = PPC::XORI;
break;
4025 case PPC::XOR8: III.
ImmOpcode = PPC::XORI8;
break;
4030 case PPC::RLWNM_rec:
4031 case PPC::RLWNM8_rec:
4051 if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 || Opc == PPC::RLWNM_rec ||
4052 Opc == PPC::RLWNM8_rec)
4058 case PPC::RLWNM: III.
ImmOpcode = PPC::RLWINM;
break;
4059 case PPC::RLWNM8: III.
ImmOpcode = PPC::RLWINM8;
break;
4060 case PPC::RLWNM_rec:
4063 case PPC::RLWNM8_rec:
4066 case PPC::SLW: III.
ImmOpcode = PPC::RLWINM;
break;
4067 case PPC::SLW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4074 case PPC::SRW: III.
ImmOpcode = PPC::RLWINM;
break;
4075 case PPC::SRW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4095 case PPC::RLDCL_rec:
4097 case PPC::RLDCR_rec:
4113 if (Opc == PPC::RLDCL || Opc == PPC::RLDCL_rec || Opc == PPC::RLDCR ||
4114 Opc == PPC::RLDCR_rec)
4120 case PPC::RLDCL: III.
ImmOpcode = PPC::RLDICL;
break;
4121 case PPC::RLDCL_rec:
4124 case PPC::RLDCR: III.
ImmOpcode = PPC::RLDICR;
break;
4125 case PPC::RLDCR_rec:
4128 case PPC::SLD: III.
ImmOpcode = PPC::RLDICR;
break;
4132 case PPC::SRD: III.
ImmOpcode = PPC::RLDICL;
break;
4179 case PPC::LBZX: III.
ImmOpcode = PPC::LBZ;
break;
4180 case PPC::LBZX8: III.
ImmOpcode = PPC::LBZ8;
break;
4181 case PPC::LHZX: III.
ImmOpcode = PPC::LHZ;
break;
4182 case PPC::LHZX8: III.
ImmOpcode = PPC::LHZ8;
break;
4183 case PPC::LHAX: III.
ImmOpcode = PPC::LHA;
break;
4184 case PPC::LHAX8: III.
ImmOpcode = PPC::LHA8;
break;
4185 case PPC::LWZX: III.
ImmOpcode = PPC::LWZ;
break;
4186 case PPC::LWZX8: III.
ImmOpcode = PPC::LWZ8;
break;
4192 case PPC::LFSX: III.
ImmOpcode = PPC::LFS;
break;
4193 case PPC::LFDX: III.
ImmOpcode = PPC::LFD;
break;
4194 case PPC::STBX: III.
ImmOpcode = PPC::STB;
break;
4195 case PPC::STBX8: III.
ImmOpcode = PPC::STB8;
break;
4196 case PPC::STHX: III.
ImmOpcode = PPC::STH;
break;
4197 case PPC::STHX8: III.
ImmOpcode = PPC::STH8;
break;
4198 case PPC::STWX: III.
ImmOpcode = PPC::STW;
break;
4199 case PPC::STWX8: III.
ImmOpcode = PPC::STW8;
break;
4204 case PPC::STFSX: III.
ImmOpcode = PPC::STFS;
break;
4205 case PPC::STFDX: III.
ImmOpcode = PPC::STFD;
break;
4237 case PPC::LBZUX: III.
ImmOpcode = PPC::LBZU;
break;
4238 case PPC::LBZUX8: III.
ImmOpcode = PPC::LBZU8;
break;
4239 case PPC::LHZUX: III.
ImmOpcode = PPC::LHZU;
break;
4240 case PPC::LHZUX8: III.
ImmOpcode = PPC::LHZU8;
break;
4241 case PPC::LHAUX: III.
ImmOpcode = PPC::LHAU;
break;
4242 case PPC::LHAUX8: III.
ImmOpcode = PPC::LHAU8;
break;
4243 case PPC::LWZUX: III.
ImmOpcode = PPC::LWZU;
break;
4244 case PPC::LWZUX8: III.
ImmOpcode = PPC::LWZU8;
break;
4249 case PPC::LFSUX: III.
ImmOpcode = PPC::LFSU;
break;
4250 case PPC::LFDUX: III.
ImmOpcode = PPC::LFDU;
break;
4251 case PPC::STBUX: III.
ImmOpcode = PPC::STBU;
break;
4252 case PPC::STBUX8: III.
ImmOpcode = PPC::STBU8;
break;
4253 case PPC::STHUX: III.
ImmOpcode = PPC::STHU;
break;
4254 case PPC::STHUX8: III.
ImmOpcode = PPC::STHU8;
break;
4255 case PPC::STWUX: III.
ImmOpcode = PPC::STWU;
break;
4256 case PPC::STWUX8: III.
ImmOpcode = PPC::STWU8;
break;
4261 case PPC::STFSUX: III.
ImmOpcode = PPC::STFSU;
break;
4262 case PPC::STFDUX: III.
ImmOpcode = PPC::STFDU;
break;
4275 case PPC::XFLOADf32:
4276 case PPC::XFLOADf64:
4277 case PPC::XFSTOREf32:
4278 case PPC::XFSTOREf64:
4279 if (!Subtarget.hasP9Vector())
4306 case PPC::XFLOADf32:
4320 case PPC::XFLOADf64:
4338 case PPC::XFSTOREf32:
4352 case PPC::XFSTOREf64:
4363 assert(Op1 != Op2 &&
"Cannot swap operand with itself.");
4365 unsigned MaxOp = std::max(Op1, Op2);
4366 unsigned MinOp = std::min(Op1, Op2);
4369 MI.removeOperand(std::max(Op1, Op2));
4370 MI.removeOperand(std::min(Op1, Op2));
4374 if (MaxOp - MinOp == 1 &&
MI.getNumOperands() == MinOp) {
4375 MI.addOperand(MOp2);
4376 MI.addOperand(MOp1);
4381 unsigned TotalOps =
MI.getNumOperands() + 2;
4382 for (
unsigned i =
MI.getNumOperands() - 1; i >= MinOp; i--) {
4384 MI.removeOperand(i);
4387 MI.addOperand(MOp2);
4389 for (
unsigned i =
MI.getNumOperands(); i < TotalOps; i++) {
4391 MI.addOperand(MOp1);
4393 MI.addOperand(MOps.
back());
4404 unsigned OpNoForForwarding
4444 unsigned Opc =
DefMI.getOpcode();
4445 if (Opc != PPC::ADDItocL8 && Opc != PPC::ADDI && Opc != PPC::ADDI8)
4451 if (Opc == PPC::ADDItocL8 && Subtarget.isAIX())
4455 "Add inst must have at least three operands");
4456 RegMO = &
DefMI.getOperand(1);
4457 ImmMO = &
DefMI.getOperand(2);
4460 if (!RegMO->
isReg())
4469bool PPCInstrInfo::isRegElgibleForForwarding(
4472 bool &IsFwdFeederRegKilled,
bool &SeenIntermediateUse)
const {
4489 for (; It != E; ++It) {
4493 IsFwdFeederRegKilled =
true;
4495 SeenIntermediateUse =
true;
4497 if ((&*It) == &
DefMI)
4510bool PPCInstrInfo::isImmElgibleForForwarding(
const MachineOperand &ImmMO,
4514 int64_t BaseImm)
const {
4516 if (
DefMI.getOpcode() == PPC::ADDItocL8) {
4537 if (ImmMO.
isImm()) {
4542 APInt ActualValue(64, ImmMO.
getImm() + BaseImm,
true);
4547 Imm = SignExtend64<16>(ImmMO.
getImm() + BaseImm);
4563 unsigned OpNoForForwarding,
4565 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4566 !
DefMI.getOperand(1).isImm())
4573 int64_t Immediate =
DefMI.getOperand(1).getImm();
4575 int64_t SExtImm = SignExtend64<16>(Immediate);
4577 bool ReplaceWithLI =
false;
4578 bool Is64BitLI =
false;
4581 unsigned Opc =
MI.getOpcode();
4602 bool Changed =
false;
4604 int64_t Comparand =
MI.getOperand(2).getImm();
4605 int64_t SExtComparand = ((
uint64_t)Comparand & ~0x7FFFuLL) != 0
4606 ? (Comparand | 0xFFFFFFFFFFFF0000)
4609 for (
auto &CompareUseMI :
MRI->use_instructions(DefReg)) {
4610 unsigned UseOpc = CompareUseMI.getOpcode();
4611 if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL8)
4613 unsigned CRSubReg = CompareUseMI.getOperand(3).getSubReg();
4614 Register TrueReg = CompareUseMI.getOperand(1).getReg();
4615 Register FalseReg = CompareUseMI.getOperand(2).getReg();
4616 unsigned RegToCopy =
4617 selectReg(SExtImm, SExtComparand, Opc, TrueReg, FalseReg, CRSubReg);
4618 if (RegToCopy == PPC::NoRegister)
4621 if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO8) {
4622 CompareUseMI.setDesc(
get(UseOpc == PPC::ISEL8 ? PPC::LI8 : PPC::LI));
4624 CompareUseMI.removeOperand(3);
4625 CompareUseMI.removeOperand(2);
4629 dbgs() <<
"Found LI -> CMPI -> ISEL, replacing with a copy.\n");
4633 CompareUseMI.setDesc(
get(PPC::COPY));
4634 CompareUseMI.removeOperand(3);
4635 CompareUseMI.removeOperand(RegToCopy == TrueReg ? 2 : 1);
4636 CmpIselsConverted++;
4645 MissedConvertibleImmediateInstrs++;
4653 int64_t Addend =
MI.getOperand(2).getImm();
4654 if (isInt<16>(Addend + SExtImm)) {
4655 ReplaceWithLI =
true;
4656 Is64BitLI = Opc == PPC::ADDI8;
4657 NewImm = Addend + SExtImm;
4663 case PPC::SUBFIC8: {
4665 if (
MI.getNumOperands() > 3 && !
MI.getOperand(3).isDead())
4667 int64_t Minuend =
MI.getOperand(2).getImm();
4668 if (isInt<16>(Minuend - SExtImm)) {
4669 ReplaceWithLI =
true;
4670 Is64BitLI = Opc == PPC::SUBFIC8;
4671 NewImm = Minuend - SExtImm;
4677 case PPC::RLDICL_rec:
4678 case PPC::RLDICL_32:
4679 case PPC::RLDICL_32_64: {
4681 int64_t SH =
MI.getOperand(2).getImm();
4682 int64_t MB =
MI.getOperand(3).getImm();
4683 APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec) ? 64 : 32,
4685 InVal = InVal.rotl(SH);
4691 if (isUInt<15>(InVal.getSExtValue()) ||
4692 (Opc == PPC::RLDICL_rec && isUInt<16>(InVal.getSExtValue()))) {
4693 ReplaceWithLI =
true;
4694 Is64BitLI = Opc != PPC::RLDICL_32;
4695 NewImm = InVal.getSExtValue();
4696 SetCR = Opc == PPC::RLDICL_rec;
4703 case PPC::RLWINM_rec:
4704 case PPC::RLWINM8_rec: {
4705 int64_t SH =
MI.getOperand(2).getImm();
4706 int64_t MB =
MI.getOperand(3).getImm();
4707 int64_t ME =
MI.getOperand(4).getImm();
4708 APInt InVal(32, SExtImm,
true);
4709 InVal = InVal.rotl(SH);
4715 bool ValueFits = isUInt<15>(InVal.getSExtValue());
4716 ValueFits |= ((Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec) &&
4717 isUInt<16>(InVal.getSExtValue()));
4719 ReplaceWithLI =
true;
4720 Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8_rec;
4721 NewImm = InVal.getSExtValue();
4722 SetCR = Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec;
4731 int64_t LogicalImm =
MI.getOperand(2).getImm();
4733 if (Opc == PPC::ORI || Opc == PPC::ORI8)
4734 Result = LogicalImm | SExtImm;
4736 Result = LogicalImm ^ SExtImm;
4737 if (isInt<16>(Result)) {
4738 ReplaceWithLI =
true;
4739 Is64BitLI = Opc == PPC::ORI8 || Opc == PPC::XORI8;
4747 if (ReplaceWithLI) {
4752 bool ImmChanged = (SExtImm & NewImm) != NewImm;
4753 if (PostRA && ImmChanged)
4760 DefMI.getOperand(1).setImm(NewImm);
4764 else if (
MRI->use_empty(
MI.getOperand(0).getReg())) {
4766 assert(Immediate &&
"Transformation converted zero to non-zero?");
4769 }
else if (ImmChanged)
4784 if (KilledDef && SetCR)
4785 *KilledDef =
nullptr;
4798bool PPCInstrInfo::transformToNewImmFormFedByAdd(
4808 if (!
MI.mayLoadOrStore())
4813 assert((XFormOpcode != PPC::INSTRUCTION_LIST_END) &&
4814 "MI must have x-form opcode");
4818 bool IsVFReg =
MI.getOperand(0).isReg()
4832 if (!ImmOperandMI.
isImm())
4838 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4840 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4845 int64_t ImmBase = ImmOperandMI.
getImm();
4847 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm, ImmBase))
4851 LLVM_DEBUG(
dbgs() <<
"Replacing existing reg+imm instruction:\n");
4868bool PPCInstrInfo::transformToImmFormFedByAdd(
4878 if (!isUseMIElgibleForForwarding(
MI, III, OpNoForForwarding))
4885 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4887 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4892 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm))
4895 bool IsFwdFeederRegKilled =
false;
4896 bool SeenIntermediateUse =
false;
4898 if (!isRegElgibleForForwarding(*RegMO,
DefMI,
MI, KillDefMI,
4899 IsFwdFeederRegKilled, SeenIntermediateUse))
4919 if (ImmMO->
isImm()) {
4930 if (
DefMI.getOpcode() == PPC::ADDItocL8)
4940 MI.removeOperand(i);
4946 MI.addOperand(*ImmMO);
4948 for (
auto &MO : MOps)
4965 unsigned ConstantOpNo,
4968 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4969 !
DefMI.getOperand(1).isImm())
4973 int64_t
Imm = SignExtend64<16>(
DefMI.getOperand(1).getImm());
4985 APInt ActualValue(64, Imm,
true);
4986 if (!ActualValue.isSignedIntN(III.
ImmWidth))
5000 Register OrigZeroReg =
MI.getOperand(PosForOrigZero).getReg();
5004 if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0) &&
5007 if ((OrigZeroReg == PPC::R0 || OrigZeroReg == PPC::X0) &&
5008 ConstantOpNo != PosForOrigZero)
5012 unsigned Opc =
MI.getOpcode();
5013 bool SpecialShift32 = Opc == PPC::SLW || Opc == PPC::SLW_rec ||
5014 Opc == PPC::SRW || Opc == PPC::SRW_rec ||
5015 Opc == PPC::SLW8 || Opc == PPC::SLW8_rec ||
5016 Opc == PPC::SRW8 || Opc == PPC::SRW8_rec;
5017 bool SpecialShift64 = Opc == PPC::SLD || Opc == PPC::SLD_rec ||
5018 Opc == PPC::SRD || Opc == PPC::SRD_rec;
5019 bool SetCR = Opc == PPC::SLW_rec || Opc == PPC::SRW_rec ||
5020 Opc == PPC::SLD_rec || Opc == PPC::SRD_rec;
5021 bool RightShift = Opc == PPC::SRW || Opc == PPC::SRW_rec || Opc == PPC::SRD ||
5022 Opc == PPC::SRD_rec;
5036 if (SpecialShift32 || SpecialShift64) {
5041 uint64_t ShAmt =
Imm & (SpecialShift32 ? 0x1F : 0x3F);
5042 if (Imm & (SpecialShift32 ? 0x20 : 0x40))
5047 else if (!SetCR && ShAmt == 0 && !PostRA) {
5048 MI.removeOperand(2);
5049 MI.setDesc(
get(PPC::COPY));
5052 if (SpecialShift32) {
5098 MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
5099 &PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
5100 MRI.setRegClass(RegToModify, NewRC);
5116 if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass)
5117 return &PPC::VSRCRegClass;
5122 return PPC::getRecordFormOpcode(Opcode);
5126 return (Opcode == PPC::LBZU || Opcode == PPC::LBZUX || Opcode == PPC::LBZU8 ||
5127 Opcode == PPC::LBZUX8 || Opcode == PPC::LHZU ||
5128 Opcode == PPC::LHZUX || Opcode == PPC::LHZU8 ||
5129 Opcode == PPC::LHZUX8);
5142 int Opcode =
MI->getOpcode();
5145 if (
TII->isSExt32To64(Opcode))
5154 if (Opcode == PPC::RLDICL &&
MI->getOperand(3).getImm() >= 33)
5160 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5161 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec) &&
5162 MI->getOperand(3).getImm() > 0 &&
5163 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5168 if (Opcode == PPC::ANDIS_rec || Opcode == PPC::ANDIS8_rec) {
5170 if ((Imm & 0x8000) == 0)
5189 int Opcode =
MI->getOpcode();
5192 if (
TII->isZExt32To64(Opcode))
5197 Opcode == PPC::LWZUX || Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8) &&
5198 MI->getOperand(0).getReg() == Reg)
5203 if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
5204 Opcode == PPC::LIS || Opcode == PPC::LIS8) {
5205 int64_t Imm =
MI->getOperand(1).getImm();
5206 if (((
uint64_t)Imm & ~0x7FFFuLL) == 0)
5212 if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec ||
5213 Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec ||
5214 Opcode == PPC::RLDICL_32_64) &&
5215 MI->getOperand(3).getImm() >= 32)
5218 if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) &&
5219 MI->getOperand(3).getImm() >= 32 &&
5220 MI->getOperand(3).getImm() <= 63 -
MI->getOperand(2).getImm())
5223 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5224 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec ||
5225 Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
5226 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5235 if (!
MI.getOperand(1).isImm() || !
MI.getOperand(2).isReg())
5239 Register StackReg =
MI.getOperand(2).getReg();
5262 unsigned BinOpDepth,
5264 if (!Reg.isVirtual())
5271 unsigned Opcode =
MI->getOpcode();
5280 unsigned OperandEnd = 3, OperandStride = 1;
5281 if (Opcode == PPC::PHI) {
5282 OperandEnd =
MI->getNumOperands();
5286 for (
unsigned I = 1;
I < OperandEnd;
I += OperandStride) {
5287 assert(
MI->getOperand(
I).isReg() &&
"Operand must be register");
5289 BinOpDepth + 1, LV);
5298 Register SrcReg =
MI->getOperand(1).getReg();
5312 if (SrcReg != PPC::X3)
5335 BinOpDepth + 1, LV);
5337 BinOpDepth + 1, LV);
5342 if (RC == &PPC::G8RCRegClass || RC == &PPC::G8RC_and_G8RC_NOX0RegClass)
5351 std::unordered_map<unsigned, unsigned> OpcodeMap = {
5352 {PPC::OR, PPC::OR8}, {PPC::ISEL, PPC::ISEL8},
5353 {PPC::ORI, PPC::ORI8}, {PPC::XORI, PPC::XORI8},
5354 {PPC::ORIS, PPC::ORIS8}, {PPC::XORIS, PPC::XORIS8},
5355 {PPC::AND, PPC::AND8}};
5358 auto It = OpcodeMap.find(Opcode);
5359 if (It != OpcodeMap.end()) {
5361 NewOpcode = It->second;
5363 if (!
TII->isSExt32To64(Opcode))
5369 NewOpcode = PPC::get64BitInstrFromSignedExt32BitInstr(Opcode);
5372 assert(NewOpcode != -1 &&
5373 "Must have a 64-bit opcode to map the 32-bit opcode!");
5380 Register SrcReg =
MI->getOperand(0).getReg();
5390 auto MBB =
MI->getParent();
5398 for (
unsigned i = 1; i <
MI->getNumOperands(); i++) {
5400 if (!Operand.
isReg())
5410 if (NewUsedRegRC != OrgRC && (OrgRC == &PPC::GPRCRegClass ||
5411 OrgRC == &PPC::GPRC_and_GPRC_NOR0RegClass)) {
5413 Register TmpReg =
MRI->createVirtualRegister(NewUsedRegRC);
5414 Register DstTmpReg =
MRI->createVirtualRegister(NewUsedRegRC);
5420 PromoteRegs[i] = DstTmpReg;
5424 Register NewDefinedReg =
MRI->createVirtualRegister(NewRC);
5430 for (
unsigned i = 1; i <
MI->getNumOperands(); i++) {
5431 if (PromoteRegs.
find(i) != PromoteRegs.
end())
5437 for (
unsigned i = 1; i < Iter->getNumOperands(); i++) {
5439 if (!Operand.
isReg())
5447 MI->eraseFromParent();
5463std::pair<bool, bool>
5465 const unsigned BinOpDepth,
5468 return std::pair<bool, bool>(
false,
false);
5472 return std::pair<bool, bool>(
false,
false);
5479 if (IsSExt && IsZExt)
5480 return std::pair<bool, bool>(IsSExt, IsZExt);
5482 switch (
MI->getOpcode()) {
5484 Register SrcReg =
MI->getOperand(1).getReg();
5493 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5494 SrcExt.second || IsZExt);
5500 if (
MI->getParent()->getBasicBlock() ==
5506 return std::pair<bool, bool>(IsSExt, IsZExt);
5510 if (SrcReg != PPC::X3) {
5513 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5514 SrcExt.second || IsZExt);
5524 std::pair<bool, bool> IsExtendPair = std::pair<bool, bool>(IsSExt, IsZExt);
5528 return IsExtendPair;
5532 return IsExtendPair;
5537 return IsExtendPair;
5541 IsSExt |= Attrs.hasAttribute(Attribute::SExt);
5542 IsZExt |= Attrs.hasAttribute(Attribute::ZExt);
5543 return std::pair<bool, bool>(IsSExt, IsZExt);
5546 return IsExtendPair;
5555 Register SrcReg =
MI->getOperand(1).getReg();
5557 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5558 SrcExt.second || IsZExt);
5569 Register SrcReg =
MI->getOperand(1).getReg();
5573 return std::pair<bool, bool>(
false, SrcExt.second || IsZExt);
5575 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5576 SrcExt.second || IsZExt);
5586 return std::pair<bool, bool>(
false,
false);
5590 unsigned OperandEnd = 3, OperandStride = 1;
5591 if (
MI->getOpcode() == PPC::PHI) {
5592 OperandEnd =
MI->getNumOperands();
5598 for (
unsigned I = 1;
I != OperandEnd;
I += OperandStride) {
5599 if (!
MI->getOperand(
I).isReg())
5600 return std::pair<bool, bool>(
false,
false);
5604 IsSExt &= SrcExt.first;
5605 IsZExt &= SrcExt.second;
5607 return std::pair<bool, bool>(IsSExt, IsZExt);
5616 return std::pair<bool, bool>(
false,
false);
5618 Register SrcReg1 =
MI->getOperand(1).getReg();
5619 Register SrcReg2 =
MI->getOperand(2).getReg();
5622 return std::pair<bool, bool>(Src1Ext.first && Src2Ext.first,
5623 Src1Ext.second || Src2Ext.second);
5629 return std::pair<bool, bool>(IsSExt, IsZExt);
5633 return (Opcode == (Subtarget.
isPPC64() ? PPC::BDNZ8 : PPC::BDNZ));
5646 :
Loop(
Loop), EndLoop(EndLoop), LoopCount(LoopCount),
5648 TII(MF->getSubtarget().getInstrInfo()) {
5657 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5659 return MI == EndLoop;
5662 std::optional<bool> createTripCountGreaterCondition(
5665 if (TripCount == -1) {
5675 return TripCount > TC;
5683 void adjustTripCount(
int TripCountAdjust)
override {
5686 if (LoopCount->
getOpcode() == PPC::LI8 ||
5697 void disposed()
override {
5698 Loop->eraseFromParent();
5705std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5710 if (Preheader == LoopBB)
5711 Preheader = *std::next(LoopBB->
pred_begin());
5714 if (
I != LoopBB->
end() &&
isBDNZ(
I->getOpcode())) {
5717 Register LoopCountReg = LoopInst->getOperand(0).getReg();
5720 return std::make_unique<PPCPipelinerLoopInfo>(LoopInst, &*
I, LoopCount);
5730 unsigned LOOPi = (Subtarget.
isPPC64() ? PPC::MTCTR8loop : PPC::MTCTRloop);
5733 for (
auto &
I : PreHeader.
instrs())
5734 if (
I.getOpcode() == LOOPi)
5780 int64_t OffsetA = 0, OffsetB = 0;
5785 int LowOffset = std::min(OffsetA, OffsetB);
5786 int HighOffset = std::max(OffsetA, OffsetB);
5787 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
5789 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
Module.h This file contains the declarations for the Module class.
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static bool isOpZeroOfSubwordPreincLoad(int Opcode)
static bool MBBDefinesCTR(MachineBasicBlock &MBB)
static bool definedByZeroExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< float > FMARPFactor("ppc-fma-rp-factor", cl::Hidden, cl::init(1.5), cl::desc("register pressure factor for the transformations."))
#define InfoArrayIdxMULOpIdx
static unsigned selectReg(int64_t Imm1, int64_t Imm2, unsigned CompareOpc, unsigned TrueReg, unsigned FalseReg, unsigned CRSubReg)
static unsigned getCRBitValue(unsigned CRBit)
static bool isAnImmediateOperand(const MachineOperand &MO)
static const uint16_t FMAOpIdxInfo[][6]
static cl::opt< bool > DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, cl::desc("Disable analysis for CTR loops"))
#define InfoArrayIdxAddOpIdx
static cl::opt< bool > UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden, cl::desc("Use the old (incorrect) instruction latency calculation"))
#define InfoArrayIdxFMAInst
static bool isClusterableLdStOpcPair(unsigned FirstOpc, unsigned SecondOpc, const PPCSubtarget &Subtarget)
static cl::opt< bool > EnableFMARegPressureReduction("ppc-fma-rp-reduction", cl::Hidden, cl::init(true), cl::desc("enable register pressure reduce in machine combiner pass."))
static bool isLdStSafeToCluster(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
const unsigned MAX_BINOP_DEPTH
static cl::opt< bool > DisableCmpOpt("disable-ppc-cmp-opt", cl::desc("Disable compare instruction optimization"), cl::Hidden)
#define InfoArrayIdxFSubInst
#define InfoArrayIdxFAddInst
#define InfoArrayIdxFMULInst
static bool definedBySignExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< bool > VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy", cl::desc("Causes the backend to crash instead of generating a nop VSX copy"), cl::Hidden)
static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2)
static constexpr Register SPReg
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static unsigned getSize(unsigned Kind)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, unsigned hiBit)
Wrap version of getBitsSet.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
iterator find(const_arg_type_t< KeyT > Val)
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
const BasicBlock & getEntryBlock() const
AttributeList getAttributes() const
Return the attribute list for this Function.
Type * getReturnType() const
Returns the type of the ret val.
A possibly irreducible generalization of a Loop.
Module * getParent()
Get the module that this global value is contained inside of...
const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
void recomputeForSingleDefVirtReg(Register Reg)
Recompute liveness from scratch for a virtual register Reg that is known to have a single def that do...
TypeSize getValue() const
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void setOpcode(unsigned Op)
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
bool isPseudo() const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint16_t Constraints
Operand constraints (see OperandConstraint enum).
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Wrapper class representing physical registers. Should be passed by value.
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
Instructions::iterator instr_iterator
pred_iterator pred_begin()
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCall(QueryType Type=AnyInBundle) const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
iterator_range< mop_iterator > uses()
Returns a range that includes all operands which may be register uses.
void addOperand(MachineFunction &MF, const MachineOperand &Op)
Add the specified operand to the instruction.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool hasImplicitDef() const
Returns true if the instruction has implicit definition.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
iterator_range< mop_iterator > operands()
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isLiveIn(Register Reg) const
PPCDispatchGroupSBHazardRecognizer - This class implements a scoreboard-based hazard recognizer for P...
uint64_t getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
bool isLiveInSExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and sign-extended.
bool isLiveInZExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and zero-extended.
PPCHazardRecognizer970 - This class defines a finite state automata that models the dispatch logic on...
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
PPCInstrInfo(PPCSubtarget &STI)
bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for a fma chain ending in Root.
bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase=nullptr) const
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
const TargetRegisterClass * updatedRC(const TargetRegisterClass *RC) const
bool isPredicated(const MachineInstr &MI) const override
bool expandVSXMemPseudo(MachineInstr &MI) const
bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg) const
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
Fixup the placeholders we put in genAlternativeCodeSequence() for MachineCombiner.
MCInst getNop() const override
Return the noop instruction to use for a noop.
static int getRecordFormOpcode(unsigned Opcode)
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool isXFormMemOp(unsigned Opcode) const
const PPCRegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
CombinerObjective getCombinerObjective(unsigned Pattern) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const
unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const
void promoteInstr32To64ForElimEXTSW(const Register &Reg, MachineRegisterInfo *MRI, unsigned BinOpDepth, LiveVariables *LV) const
bool isTOCSaveMI(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer to use for this target when ...
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
bool isBDNZ(unsigned Opcode) const
Check Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isZeroExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
std::pair< bool, bool > isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, const MachineRegisterInfo *MRI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, MachineInstr *&ADDIMI, int64_t &OffsetAddi, int64_t OffsetImm) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
void materializeImmPostRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, int64_t Imm) const
bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Return true if two MIs access different memory addresses and false otherwise.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
CreateTargetHazardRecognizer - Return the hazard recognizer to use for this target when scheduling th...
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
Get the base operand and byte offset of an instruction that reads/writes memory.
void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const
bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool foldFrameOffset(MachineInstr &MI) const
bool isLoadFromConstantPool(MachineInstr *I) const
MachineInstr * findLoopInstr(MachineBasicBlock &PreHeader, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool convertToImmediateForm(MachineInstr &MI, SmallSet< Register, 4 > &RegsToUpdate, MachineInstr **KilledDef=nullptr) const
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
Return true if get the base operand, byte offset of an instruction and the memory width.
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const override
On PowerPC, we leverage machine combiner pass to reduce register pressure when the register pressure ...
bool isSignExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, int64_t Imm) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Returns true if the two given memory operations should be scheduled adjacent.
void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const
bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, unsigned &XFormOpcode, int64_t &OffsetOfImmInstr, ImmInstrInfo &III) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in <Root...
bool optimizeCmpPostRA(MachineInstr &MI) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
const Constant * getConstantFromConstantPool(MachineInstr *I) const
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, bool PostRA) const
MachineInstr * getDefMIPostRA(unsigned Reg, MachineInstr &MI, bool &SeenIntermediateUse) const
unsigned getMappedIdxOpcForImmOpc(unsigned ImmOpcode) const
getMappedIdxOpcForImmOpc - Return the mapped index form load/store opcode for a given imm form load/s...
static void emitAccCopyInfo(MachineBasicBlock &MBB, MCRegister DestReg, MCRegister SrcReg)
const PPCFrameLowering * getFrameLowering() const override
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
bool isLittleEndian() const
bool isTargetLinux() const
const PPCTargetMachine & getTargetMachine() const
const Triple & getTargetTriple() const
void setGlibcHWCAPAccess(bool Val=true) const
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Track the current register pressure at some position in the instruction stream, and remember the high...
void closeRegion()
Finalize the region boundaries and recored live ins and live outs.
void recede(SmallVectorImpl< RegisterMaskPair > *LiveUses=nullptr)
Recede across the previous instruction.
RegisterPressure & getPressure()
Get the resulting register pressure over the traversed region.
void recedeSkipDebugValues()
Recede until we find an instruction which is not a DebugValue.
void init(const MachineFunction *mf, const RegisterClassInfo *rci, const LiveIntervals *lis, const MachineBasicBlock *mbb, MachineBasicBlock::const_iterator pos, bool TrackLaneMasks, bool TrackUntiedDefs)
Setup the RegPressureTracker.
MachineBasicBlock::const_iterator getPos() const
Get the MI position corresponding to this register pressure.
List of registers defined and used by a machine instruction.
void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
const TargetInstrInfo * TII
Target instruction information.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
StackOffset holds a fixed and a scalable offset in bytes.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
bool isOSGlibc() const
Tests whether the OS uses glibc.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
LLVM Value Representation.
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Predicate getSwappedPredicate(Predicate Opcode)
Assume the condition register is set by MI(a,b), return the predicate if we modify the instructions s...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
int getAltVSXFMAOpcode(uint16_t Opcode)
int getNonRecordFormOpcode(uint16_t)
unsigned getPredicateCondition(Predicate Opcode)
Return the condition without hint bits.
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Predicate InvertPredicate(Predicate Opcode)
Invert the specified predicate. != -> ==, < -> >=.
unsigned getPredicateHint(Predicate Opcode)
Return the hint bits of the predicate.
static bool isVFRegister(unsigned Reg)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getDeadRegState(bool B)
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static unsigned getCRFromCRBit(unsigned SrcReg)
auto reverse(ContainerTy &&C)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
@ MustReduceRegisterPressure
void recomputeLivenessFlags(MachineBasicBlock &MBB)
Recomputes dead and kill flags in MBB.
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
static bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME)
Returns true iff Val consists of one contiguous run of 1s with any number of 0s on either side.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t IsSummingOperands
uint64_t OpNoForForwarding
uint64_t ImmMustBeMultipleOf
uint64_t ZeroIsSpecialNew
uint64_t ZeroIsSpecialOrig
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
std::vector< unsigned > MaxSetPressure
Map of max reg pressure indexed by pressure set ID, not class ID.