48#define DEBUG_TYPE "ppc-instr-info"
50#define GET_INSTRMAP_INFO
51#define GET_INSTRINFO_CTOR_DTOR
52#include "PPCGenInstrInfo.inc"
55 "Number of spillvsrrc spilled to stack as vec");
57 "Number of spillvsrrc spilled to stack as gpr");
58STATISTIC(NumGPRtoVSRSpill,
"Number of gpr spills to spillvsrrc");
60 "Number of ISELs that depend on comparison of constants converted");
62 "Number of compare-immediate instructions fed by constants");
64 "Number of record-form rotates converted to record-form andi");
68 cl::desc(
"Disable analysis for CTR loops"));
74cl::desc(
"Causes the backend to crash instead of generating a nop VSX copy"),
79 cl::desc(
"Use the old (incorrect) instruction latency calculation"));
83 cl::desc(
"register pressure factor for the transformations."));
87 cl::desc(
"enable register pressure reduce in machine combiner pass."));
90void PPCInstrInfo::anchor() {}
95 STI.isPPC64() ? PPC::BLR8 : PPC::BLR),
96 Subtarget(STI), RI(STI.getTargetMachine()) {}
104 static_cast<const PPCSubtarget *
>(STI)->getCPUDirective();
108 static_cast<const PPCSubtarget *
>(STI)->getInstrItineraryData();
140 unsigned *PredCost)
const {
142 return PPCGenInstrInfo::getInstrLatency(ItinData,
MI, PredCost);
152 unsigned DefClass =
MI.getDesc().getSchedClass();
153 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
171 std::optional<unsigned>
Latency = PPCGenInstrInfo::getOperandLatency(
174 if (!
DefMI.getParent())
181 if (Reg.isVirtual()) {
184 IsRegCR =
MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
185 MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
187 IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
188 PPC::CRBITRCRegClass.contains(Reg);
191 if (
UseMI.isBranch() && IsRegCR) {
271#define InfoArrayIdxFMAInst 0
272#define InfoArrayIdxFAddInst 1
273#define InfoArrayIdxFMULInst 2
274#define InfoArrayIdxAddOpIdx 3
275#define InfoArrayIdxMULOpIdx 4
276#define InfoArrayIdxFSubInst 5
287 {PPC::XSMADDADP, PPC::XSADDDP, PPC::XSMULDP, 1, 2, PPC::XSSUBDP},
288 {PPC::XSMADDASP, PPC::XSADDSP, PPC::XSMULSP, 1, 2, PPC::XSSUBSP},
289 {PPC::XVMADDADP, PPC::XVADDDP, PPC::XVMULDP, 1, 2, PPC::XVSUBDP},
290 {PPC::XVMADDASP, PPC::XVADDSP, PPC::XVMULSP, 1, 2, PPC::XVSUBSP},
291 {PPC::FMADD, PPC::FADD, PPC::FMUL, 3, 1, PPC::FSUB},
292 {PPC::FMADDS, PPC::FADDS, PPC::FMULS, 3, 1, PPC::FSUBS}};
296int16_t PPCInstrInfo::getFMAOpIdxInfo(
unsigned Opcode)
const {
353 bool DoRegPressureReduce)
const {
358 auto IsAllOpsVirtualReg = [](
const MachineInstr &Instr) {
359 for (
const auto &MO : Instr.explicit_operands())
360 if (!(MO.isReg() && MO.getReg().isVirtual()))
365 auto IsReassociableAddOrSub = [&](
const MachineInstr &Instr,
367 if (Instr.getOpcode() !=
378 if (!IsAllOpsVirtualReg(Instr))
384 !
MRI->hasOneNonDBGUse(Instr.getOperand(0).getReg()))
390 auto IsReassociableFMA = [&](
const MachineInstr &Instr, int16_t &AddOpIdx,
391 int16_t &MulOpIdx,
bool IsLeaf) {
392 int16_t
Idx = getFMAOpIdxInfo(Instr.getOpcode());
403 if (!IsAllOpsVirtualReg(Instr))
423 int16_t AddOpIdx = -1;
424 int16_t MulOpIdx = -1;
426 bool IsUsedOnceL =
false;
427 bool IsUsedOnceR =
false;
431 auto IsRPReductionCandidate = [&]() {
435 if (Opcode != PPC::XSMADDASP && Opcode != PPC::XSMADDADP)
440 if (IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
true)) {
441 assert((MulOpIdx >= 0) &&
"mul operand index not right!");
442 Register MULRegL =
TRI->lookThruSingleUseCopyChain(
444 Register MULRegR =
TRI->lookThruSingleUseCopyChain(
446 if (!MULRegL && !MULRegR)
449 if (MULRegL && !MULRegR) {
453 }
else if (!MULRegL && MULRegR) {
465 MULInstrL =
MRI->getVRegDef(MULRegL);
466 MULInstrR =
MRI->getVRegDef(MULRegR);
473 if (DoRegPressureReduce && IsRPReductionCandidate()) {
474 assert((MULInstrL && MULInstrR) &&
"wrong register preduction candidate!");
495 if (!IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
false))
498 assert((AddOpIdx >= 0) &&
"add operand index not right!");
505 if (!IsReassociableFMA(*Prev, AddOpIdx, MulOpIdx,
false))
508 assert((AddOpIdx >= 0) &&
"add operand index not right!");
513 if (IsReassociableFMA(*Leaf, AddOpIdx, MulOpIdx,
true)) {
529 assert(!InsInstrs.
empty() &&
"Instructions set to be inserted is empty!");
562 assert(isa<llvm::ConstantFP>(
C) &&
"not a valid constant!");
565 APFloat F1((dyn_cast<ConstantFP>(
C))->getValueAPF());
567 Constant *NegC = ConstantFP::get(dyn_cast<ConstantFP>(
C)->getContext(), F1);
575 for (
auto *Inst : InsInstrs) {
577 assert(Operand.isReg() &&
"Invalid instruction in InsInstrs!");
578 if (Operand.getReg() == PPC::ZERO8) {
579 Placeholder = &Operand;
585 assert(Placeholder &&
"Placeholder does not exist!");
590 generateLoadForNewConst(ConstPoolIdx, &Root,
C->getType(), InsInstrs);
593 Placeholder->setReg(LoadNewConst);
614 if (!(Subtarget.
isPPC64() && Subtarget.hasP9Vector() &&
622 auto GetMBBPressure =
632 if (
MI.isDebugValue() ||
MI.isDebugLabel())
638 RPTracker.
recede(RegOpers);
648 unsigned VSSRCLimit =
TRI->getRegPressureSetLimit(
652 return GetMBBPressure(
MBB)[PPC::RegisterPressureSets::VSSRC] >
658 if (!
I->hasOneMemOperand())
662 return Op->isLoad() &&
Op->getPseudoValue() &&
666Register PPCInstrInfo::generateLoadForNewConst(
674 "Target not supported!\n");
680 Register VReg1 =
MRI->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
682 BuildMI(*MF,
MI->getDebugLoc(),
get(PPC::ADDIStocHA8), VReg1)
687 "Only float and double are supported!");
692 LoadOpcode = PPC::DFLOADf32;
694 LoadOpcode = PPC::DFLOADf64;
724 assert(
I->mayLoad() &&
"Should be a load instruction.\n");
725 for (
auto MO :
I->uses()) {
729 if (Reg == 0 || !Reg.isVirtual())
735 return (MCP->
getConstants())[MO2.getIndex()].Val.ConstVal;
755 bool DoRegPressureReduce)
const {
765 DoRegPressureReduce);
778 reassociateFMA(Root,
Pattern, InsInstrs, DelInstrs, InstrIdxForVirtReg);
783 DelInstrs, InstrIdxForVirtReg);
788void PPCInstrInfo::reassociateFMA(
799 MRI.constrainRegClass(RegC, RC);
802 int16_t
Idx = getFMAOpIdxInfo(FmaOp);
803 assert(
Idx >= 0 &&
"Root must be a FMA instruction");
805 bool IsILPReassociate =
825 Leaf =
MRI.getVRegDef(MULReg);
831 Leaf =
MRI.getVRegDef(MULReg);
837 if (IsILPReassociate)
845 MRI.constrainRegClass(Reg, RC);
846 KillFlag = Operand.
isKill();
851 bool &MulOp1KillFlag,
bool &MulOp2KillFlag,
852 bool &AddOpKillFlag) {
853 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx), MulOp1, MulOp1KillFlag);
854 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx + 1), MulOp2, MulOp2KillFlag);
855 GetOperandInfo(
Instr.getOperand(AddOpIdx), AddOp, AddOpKillFlag);
858 Register RegM11, RegM12, RegX, RegY, RegM21, RegM22, RegM31, RegM32, RegA11,
860 bool KillX =
false, KillY =
false, KillM11 =
false, KillM12 =
false,
861 KillM21 =
false, KillM22 =
false, KillM31 =
false, KillM32 =
false,
862 KillA11 =
false, KillA21 =
false, KillB =
false;
864 GetFMAInstrInfo(Root, RegM31, RegM32, RegB, KillM31, KillM32, KillB);
866 if (IsILPReassociate)
867 GetFMAInstrInfo(*Prev, RegM21, RegM22, RegA21, KillM21, KillM22, KillA21);
870 GetFMAInstrInfo(*Leaf, RegM11, RegM12, RegA11, KillM11, KillM12, KillA11);
871 GetOperandInfo(Leaf->
getOperand(AddOpIdx), RegX, KillX);
873 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
874 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
877 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
878 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
888 InstrIdxForVirtReg.
insert(std::make_pair(NewVRA, 0));
891 if (IsILPReassociate) {
892 NewVRB =
MRI.createVirtualRegister(RC);
893 InstrIdxForVirtReg.
insert(std::make_pair(NewVRB, 1));
898 NewVRD =
MRI.createVirtualRegister(RC);
899 InstrIdxForVirtReg.
insert(std::make_pair(NewVRD, 2));
904 Register RegMul2,
bool KillRegMul2) {
905 MI->getOperand(AddOpIdx).setReg(RegAdd);
906 MI->getOperand(AddOpIdx).setIsKill(KillAdd);
907 MI->getOperand(FirstMulOpIdx).setReg(RegMul1);
908 MI->getOperand(FirstMulOpIdx).setIsKill(KillRegMul1);
909 MI->getOperand(FirstMulOpIdx + 1).setReg(RegMul2);
910 MI->getOperand(FirstMulOpIdx + 1).setIsKill(KillRegMul2);
931 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
932 AdjustOperandOrder(MINewA, RegY, KillY, RegM31, KillM31, RegM32, KillM32);
953 assert(NewVRD &&
"new FMA register not created!");
972 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
973 AdjustOperandOrder(MINewD, NewVRA,
true, RegM31, KillM31, RegM32,
999 bool KillVarReg =
false;
1002 KillVarReg = KillM31;
1005 KillVarReg = KillM32;
1029 if (!IsILPReassociate) {
1038 "Insertion instructions set should not be empty!");
1042 if (IsILPReassociate)
1050 unsigned &SubIdx)
const {
1051 switch (
MI.getOpcode()) {
1052 default:
return false;
1055 case PPC::EXTSW_32_64:
1056 SrcReg =
MI.getOperand(1).getReg();
1057 DstReg =
MI.getOperand(0).getReg();
1058 SubIdx = PPC::sub_32;
1064 int &FrameIndex)
const {
1068 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1069 MI.getOperand(2).isFI()) {
1070 FrameIndex =
MI.getOperand(2).getIndex();
1071 return MI.getOperand(0).getReg();
1081 switch (
MI.getOpcode()) {
1091 case PPC::ADDIStocHA:
1092 case PPC::ADDIStocHA8:
1094 case PPC::ADDItocL8:
1095 case PPC::LOAD_STACK_GUARD:
1096 case PPC::PPCLdFixedAddr:
1098 case PPC::XXLXORspz:
1099 case PPC::XXLXORdpz:
1100 case PPC::XXLEQVOnes:
1101 case PPC::XXSPLTI32DX:
1103 case PPC::XXSPLTIDP:
1107 case PPC::V_SETALLONESB:
1108 case PPC::V_SETALLONESH:
1109 case PPC::V_SETALLONES:
1112 case PPC::XXSETACCZ:
1113 case PPC::XXSETACCZW:
1120 int &FrameIndex)
const {
1122 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1123 MI.getOperand(2).isFI()) {
1124 FrameIndex =
MI.getOperand(2).getIndex();
1125 return MI.getOperand(0).getReg();
1133 unsigned OpIdx2)
const {
1137 if (
MI.getOpcode() != PPC::RLWIMI &&
MI.getOpcode() != PPC::RLWIMI_rec)
1145 if (
MI.getOperand(3).getImm() != 0)
1156 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
1157 "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMI_rec.");
1161 unsigned SubReg1 =
MI.getOperand(1).getSubReg();
1162 unsigned SubReg2 =
MI.getOperand(2).getSubReg();
1163 bool Reg1IsKill =
MI.getOperand(1).isKill();
1164 bool Reg2IsKill =
MI.getOperand(2).isKill();
1165 bool ChangeReg0 =
false;
1171 "Expecting a two-address instruction!");
1172 assert(
MI.getOperand(0).getSubReg() == SubReg1 &&
"Tied subreg mismatch");
1178 unsigned MB =
MI.getOperand(4).getImm();
1179 unsigned ME =
MI.getOperand(5).getImm();
1183 if (MB == 0 && ME == 31)
1188 Register Reg0 = ChangeReg0 ? Reg2 :
MI.getOperand(0).getReg();
1189 bool Reg0IsDead =
MI.getOperand(0).isDead();
1190 return BuildMI(MF,
MI.getDebugLoc(),
MI.getDesc())
1199 MI.getOperand(0).setReg(Reg2);
1200 MI.getOperand(0).setSubReg(SubReg2);
1202 MI.getOperand(2).setReg(Reg1);
1203 MI.getOperand(1).setReg(Reg2);
1204 MI.getOperand(2).setSubReg(SubReg1);
1205 MI.getOperand(1).setSubReg(SubReg2);
1206 MI.getOperand(2).setIsKill(Reg1IsKill);
1207 MI.getOperand(1).setIsKill(Reg2IsKill);
1210 MI.getOperand(4).setImm((ME + 1) & 31);
1211 MI.getOperand(5).setImm((MB - 1) & 31);
1216 unsigned &SrcOpIdx1,
1217 unsigned &SrcOpIdx2)
const {
1228 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
1238 default: Opcode = PPC::NOP;
break;
1264 bool AllowModify)
const {
1265 bool isPPC64 = Subtarget.
isPPC64();
1272 if (!isUnpredicatedTerminator(*
I))
1278 if (
I->getOpcode() == PPC::B &&
1280 I->eraseFromParent();
1284 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
1293 if (
I ==
MBB.
begin() || !isUnpredicatedTerminator(*--
I)) {
1299 }
else if (LastInst.
getOpcode() == PPC::BCC) {
1307 }
else if (LastInst.
getOpcode() == PPC::BC) {
1315 }
else if (LastInst.
getOpcode() == PPC::BCn) {
1323 }
else if (LastInst.
getOpcode() == PPC::BDNZ8 ||
1334 }
else if (LastInst.
getOpcode() == PPC::BDZ8 ||
1355 if (
I !=
MBB.
begin() && isUnpredicatedTerminator(*--
I))
1359 if (SecondLastInst.
getOpcode() == PPC::BCC &&
1369 }
else if (SecondLastInst.
getOpcode() == PPC::BC &&
1379 }
else if (SecondLastInst.
getOpcode() == PPC::BCn &&
1389 }
else if ((SecondLastInst.
getOpcode() == PPC::BDNZ8 ||
1390 SecondLastInst.
getOpcode() == PPC::BDNZ) &&
1403 }
else if ((SecondLastInst.
getOpcode() == PPC::BDZ8 ||
1404 SecondLastInst.
getOpcode() == PPC::BDZ) &&
1427 I->eraseFromParent();
1436 int *BytesRemoved)
const {
1437 assert(!BytesRemoved &&
"code size not handled");
1443 if (
I->getOpcode() != PPC::B &&
I->getOpcode() != PPC::BCC &&
1444 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1445 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1446 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1450 I->eraseFromParent();
1456 if (
I->getOpcode() != PPC::BCC &&
1457 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1458 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1459 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1463 I->eraseFromParent();
1472 int *BytesAdded)
const {
1474 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1476 "PPC branch conditions have two components!");
1477 assert(!BytesAdded &&
"code size not handled");
1479 bool isPPC64 = Subtarget.
isPPC64();
1487 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1488 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1504 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1505 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1523 Register FalseReg,
int &CondCycles,
1524 int &TrueCycles,
int &FalseCycles)
const {
1525 if (!Subtarget.hasISEL())
1528 if (
Cond.size() != 2)
1544 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1549 if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
1550 !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
1551 !PPC::G8RCRegClass.hasSubClassEq(RC) &&
1552 !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
1572 "PPC branch conditions have two components!");
1577 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1578 assert(RC &&
"TrueReg and FalseReg must have overlapping register classes");
1580 bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
1581 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
1583 PPC::GPRCRegClass.hasSubClassEq(RC) ||
1584 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
1585 "isel is for regular integer GPRs only");
1587 unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
1590 unsigned SubIdx = 0;
1591 bool SwapOps =
false;
1592 switch (SelectPred) {
1596 SubIdx = PPC::sub_eq; SwapOps =
false;
break;
1600 SubIdx = PPC::sub_eq; SwapOps =
true;
break;
1604 SubIdx = PPC::sub_lt; SwapOps =
false;
break;
1608 SubIdx = PPC::sub_lt; SwapOps =
true;
break;
1612 SubIdx = PPC::sub_gt; SwapOps =
false;
break;
1616 SubIdx = PPC::sub_gt; SwapOps =
true;
break;
1620 SubIdx = PPC::sub_un; SwapOps =
false;
break;
1624 SubIdx = PPC::sub_un; SwapOps =
true;
break;
1629 Register FirstReg = SwapOps ? FalseReg : TrueReg,
1630 SecondReg = SwapOps ? TrueReg : FalseReg;
1635 if (
MRI.getRegClass(FirstReg)->contains(PPC::R0) ||
1636 MRI.getRegClass(FirstReg)->contains(PPC::X0)) {
1638 MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
1639 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
1641 FirstReg =
MRI.createVirtualRegister(FirstRC);
1653 if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
1654 CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
1655 CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
1656 CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
1658 if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
1659 CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
1660 CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
1661 CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
1663 if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
1664 CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
1665 CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
1666 CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
1668 if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
1669 CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
1670 CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
1671 CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
1674 assert(Ret != 4 &&
"Invalid CR bit register");
1685 if (PPC::F8RCRegClass.
contains(DestReg) &&
1686 PPC::VSRCRegClass.
contains(SrcReg)) {
1688 TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
1694 }
else if (PPC::F8RCRegClass.
contains(SrcReg) &&
1695 PPC::VSRCRegClass.
contains(DestReg)) {
1697 TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
1706 if (PPC::CRBITRCRegClass.
contains(SrcReg) &&
1707 PPC::GPRCRegClass.
contains(DestReg)) {
1719 }
else if (PPC::CRRCRegClass.
contains(SrcReg) &&
1720 (PPC::G8RCRegClass.
contains(DestReg) ||
1721 PPC::GPRCRegClass.
contains(DestReg))) {
1722 bool Is64Bit = PPC::G8RCRegClass.contains(DestReg);
1723 unsigned MvCode = Is64Bit ? PPC::MFOCRF8 : PPC::MFOCRF;
1724 unsigned ShCode = Is64Bit ? PPC::RLWINM8 : PPC::RLWINM;
1725 unsigned CRNum =
TRI->getEncodingValue(SrcReg);
1737 }
else if (PPC::G8RCRegClass.
contains(SrcReg) &&
1738 PPC::VSFRCRegClass.
contains(DestReg)) {
1739 assert(Subtarget.hasDirectMove() &&
1740 "Subtarget doesn't support directmove, don't know how to copy.");
1745 }
else if (PPC::VSFRCRegClass.
contains(SrcReg) &&
1746 PPC::G8RCRegClass.
contains(DestReg)) {
1747 assert(Subtarget.hasDirectMove() &&
1748 "Subtarget doesn't support directmove, don't know how to copy.");
1752 }
else if (PPC::SPERCRegClass.
contains(SrcReg) &&
1753 PPC::GPRCRegClass.
contains(DestReg)) {
1757 }
else if (PPC::GPRCRegClass.
contains(SrcReg) &&
1758 PPC::SPERCRegClass.
contains(DestReg)) {
1765 if (PPC::GPRCRegClass.
contains(DestReg, SrcReg))
1767 else if (PPC::G8RCRegClass.
contains(DestReg, SrcReg))
1769 else if (PPC::F4RCRegClass.
contains(DestReg, SrcReg))
1771 else if (PPC::CRRCRegClass.
contains(DestReg, SrcReg))
1773 else if (PPC::VRRCRegClass.
contains(DestReg, SrcReg))
1775 else if (PPC::VSRCRegClass.
contains(DestReg, SrcReg))
1785 else if (PPC::VSFRCRegClass.
contains(DestReg, SrcReg) ||
1786 PPC::VSSRCRegClass.
contains(DestReg, SrcReg))
1787 Opc = (Subtarget.hasP9Vector()) ? PPC::XSCPSGNDP : PPC::XXLORf;
1788 else if (Subtarget.pairedVectorMemops() &&
1789 PPC::VSRpRCRegClass.contains(DestReg, SrcReg)) {
1790 if (SrcReg > PPC::VSRp15)
1791 SrcReg = PPC::V0 + (SrcReg - PPC::VSRp16) * 2;
1793 SrcReg = PPC::VSL0 + (SrcReg - PPC::VSRp0) * 2;
1794 if (DestReg > PPC::VSRp15)
1795 DestReg = PPC::V0 + (DestReg - PPC::VSRp16) * 2;
1797 DestReg = PPC::VSL0 + (DestReg - PPC::VSRp0) * 2;
1804 else if (PPC::CRBITRCRegClass.
contains(DestReg, SrcReg))
1806 else if (PPC::SPERCRegClass.
contains(DestReg, SrcReg))
1808 else if ((PPC::ACCRCRegClass.
contains(DestReg) ||
1809 PPC::UACCRCRegClass.
contains(DestReg)) &&
1810 (PPC::ACCRCRegClass.
contains(SrcReg) ||
1811 PPC::UACCRCRegClass.
contains(SrcReg))) {
1817 bool DestPrimed = PPC::ACCRCRegClass.contains(DestReg);
1818 bool SrcPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1820 PPC::VSL0 + (SrcReg - (SrcPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1822 PPC::VSL0 + (DestReg - (DestPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1831 if (SrcPrimed && !KillSrc)
1834 }
else if (PPC::G8pRCRegClass.
contains(DestReg) &&
1835 PPC::G8pRCRegClass.
contains(SrcReg)) {
1837 unsigned DestRegIdx = DestReg - PPC::G8p0;
1838 MCRegister DestRegSub0 = PPC::X0 + 2 * DestRegIdx;
1839 MCRegister DestRegSub1 = PPC::X0 + 2 * DestRegIdx + 1;
1840 unsigned SrcRegIdx = SrcReg - PPC::G8p0;
1841 MCRegister SrcRegSub0 = PPC::X0 + 2 * SrcRegIdx;
1842 MCRegister SrcRegSub1 = PPC::X0 + 2 * SrcRegIdx + 1;
1864 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
1865 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
1867 }
else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
1868 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
1870 }
else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
1872 }
else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
1874 }
else if (PPC::SPERCRegClass.hasSubClassEq(RC)) {
1876 }
else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
1878 }
else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
1880 }
else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1882 }
else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1884 }
else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1886 }
else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
1888 }
else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) {
1890 }
else if (PPC::ACCRCRegClass.hasSubClassEq(RC)) {
1891 assert(Subtarget.pairedVectorMemops() &&
1892 "Register unexpected when paired memops are disabled.");
1894 }
else if (PPC::UACCRCRegClass.hasSubClassEq(RC)) {
1895 assert(Subtarget.pairedVectorMemops() &&
1896 "Register unexpected when paired memops are disabled.");
1898 }
else if (PPC::WACCRCRegClass.hasSubClassEq(RC)) {
1899 assert(Subtarget.pairedVectorMemops() &&
1900 "Register unexpected when paired memops are disabled.");
1902 }
else if (PPC::VSRpRCRegClass.hasSubClassEq(RC)) {
1903 assert(Subtarget.pairedVectorMemops() &&
1904 "Register unexpected when paired memops are disabled.");
1906 }
else if (PPC::G8pRCRegClass.hasSubClassEq(RC)) {
1917 return OpcodesForSpill[getSpillIndex(RC)];
1923 return OpcodesForSpill[getSpillIndex(RC)];
1926void PPCInstrInfo::StoreRegToStackSlot(
1940 if (PPC::CRRCRegClass.hasSubClassEq(RC) ||
1941 PPC::CRBITRCRegClass.hasSubClassEq(RC))
1955 StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs);
1965 NewMIs.
back()->addMemOperand(MF, MMO);
1984 unsigned DestReg,
int FrameIdx,
2002 LoadRegFromStackSlot(MF,
DL, DestReg, FrameIdx, RC, NewMIs);
2012 NewMIs.
back()->addMemOperand(MF, MMO);
2035 assert(
Cond.size() == 2 &&
"Invalid PPC branch opcode!");
2037 Cond[0].setImm(
Cond[0].getImm() == 0 ? 1 : 0);
2050 unsigned DefOpc =
DefMI.getOpcode();
2051 if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
2053 if (!
DefMI.getOperand(1).isImm())
2055 if (
DefMI.getOperand(1).getImm() != 0)
2071 for (UseIdx = 0; UseIdx <
UseMI.getNumOperands(); ++UseIdx)
2072 if (
UseMI.getOperand(UseIdx).isReg() &&
2076 assert(UseIdx <
UseMI.getNumOperands() &&
"Cannot find Reg in UseMI");
2087 if (UseInfo->
RegClass != PPC::GPRC_NOR0RegClassID &&
2088 UseInfo->
RegClass != PPC::G8RC_NOX0RegClassID)
2100 bool isPPC64 = Subtarget.
isPPC64();
2101 ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
2103 ZeroReg = UseInfo->
RegClass == PPC::G8RC_NOX0RegClassID ?
2104 PPC::ZERO8 : PPC::ZERO;
2109 UseMI.getOperand(UseIdx).setReg(ZeroReg);
2121 if (
MRI->use_nodbg_empty(Reg))
2122 DefMI.eraseFromParent();
2128 if (
MI.definesRegister(PPC::CTR,
nullptr) ||
2129 MI.definesRegister(PPC::CTR8,
nullptr))
2141 unsigned NumT,
unsigned ExtraT,
2143 unsigned NumF,
unsigned ExtraF,
2163 switch (
MI.getOpcode()) {
2179 unsigned OpC =
MI.getOpcode();
2180 if (OpC == PPC::BLR || OpC == PPC::BLR8) {
2181 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2182 bool isPPC64 = Subtarget.
isPPC64();
2183 MI.setDesc(
get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
2184 : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
2190 MI.setDesc(
get(PPC::BCLR));
2193 MI.setDesc(
get(PPC::BCLRn));
2196 MI.setDesc(
get(PPC::BCCLR));
2198 .
addImm(Pred[0].getImm())
2203 }
else if (OpC == PPC::B) {
2204 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2205 bool isPPC64 = Subtarget.
isPPC64();
2206 MI.setDesc(
get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
2207 : (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
2214 MI.removeOperand(0);
2216 MI.setDesc(
get(PPC::BC));
2222 MI.removeOperand(0);
2224 MI.setDesc(
get(PPC::BCn));
2230 MI.removeOperand(0);
2232 MI.setDesc(
get(PPC::BCC));
2234 .
addImm(Pred[0].getImm())
2240 }
else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || OpC == PPC::BCTRL ||
2241 OpC == PPC::BCTRL8 || OpC == PPC::BCTRL_RM ||
2242 OpC == PPC::BCTRL8_RM) {
2243 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR)
2246 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8 ||
2247 OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM;
2248 bool isPPC64 = Subtarget.
isPPC64();
2251 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
2252 : (setLR ? PPC::BCCTRL : PPC::BCCTR)));
2255 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
2256 : (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
2259 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
2260 : (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
2262 .
addImm(Pred[0].getImm())
2271 if (OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM)
2283 assert(Pred1.
size() == 2 &&
"Invalid PPC first predicate");
2284 assert(Pred2.
size() == 2 &&
"Invalid PPC second predicate");
2286 if (Pred1[1].
getReg() == PPC::CTR8 || Pred1[1].
getReg() == PPC::CTR)
2288 if (Pred2[1].
getReg() == PPC::CTR8 || Pred2[1].
getReg() == PPC::CTR)
2313 std::vector<MachineOperand> &Pred,
2314 bool SkipDead)
const {
2322 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
2323 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
2327 for (
unsigned c = 0; c < std::size(RCs) && !Found; ++c) {
2330 if (MO.isDef() && RC->
contains(MO.getReg())) {
2334 }
else if (MO.isRegMask()) {
2336 if (MO.clobbersPhysReg(R)) {
2349 int64_t &
Value)
const {
2350 unsigned Opc =
MI.getOpcode();
2353 default:
return false;
2358 SrcReg =
MI.getOperand(1).getReg();
2360 Value =
MI.getOperand(2).getImm();
2369 SrcReg =
MI.getOperand(1).getReg();
2370 SrcReg2 =
MI.getOperand(2).getReg();
2389 if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
2401 bool isPPC64 = Subtarget.
isPPC64();
2402 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
2403 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
2404 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
2413 if (!
MI)
return false;
2415 bool equalityOnly =
false;
2418 if (is32BitSignedCompare) {
2424 }
else if (is32BitUnsignedCompare) {
2429 equalityOnly =
true;
2433 equalityOnly = is64BitUnsignedCompare;
2435 equalityOnly = is32BitUnsignedCompare;
2441 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2453 if (SubIdx != PPC::sub_eq)
2465 bool FoundUse =
false;
2467 J =
MRI->use_instr_begin(CRReg), JE =
MRI->use_instr_end();
2494 else if (
Value != 0) {
2503 if (equalityOnly || !
MRI->hasOneUse(CRReg))
2513 int16_t Immed = (int16_t)
Value;
2547 for (;
I != E && !noSub; --
I) {
2549 unsigned IOpC = Instr.getOpcode();
2551 if (&*
I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0,
TRI) ||
2552 Instr.readsRegister(PPC::CR0,
TRI)))
2561 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
2562 OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
2563 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
2564 ((Instr.getOperand(1).getReg() == SrcReg &&
2565 Instr.getOperand(2).getReg() == SrcReg2) ||
2566 (Instr.getOperand(1).getReg() == SrcReg2 &&
2567 Instr.getOperand(2).getReg() == SrcReg))) {
2585 int MIOpC =
MI->getOpcode();
2586 if (MIOpC == PPC::ANDI_rec || MIOpC == PPC::ANDI8_rec ||
2587 MIOpC == PPC::ANDIS_rec || MIOpC == PPC::ANDIS8_rec)
2590 NewOpC = PPC::getRecordFormOpcode(MIOpC);
2608 if (!equalityOnly && (NewOpC == PPC::SUBF_rec || NewOpC == PPC::SUBF8_rec) &&
2618 bool ShouldSwap =
false;
2619 if (Sub &&
Value == 0) {
2625 ShouldSwap = !ShouldSwap;
2630 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2638 "Invalid predicate for equality-only optimization");
2645 assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&
2646 "Invalid CR bit for equality-only optimization");
2648 if (NewSubReg == PPC::sub_lt)
2649 NewSubReg = PPC::sub_gt;
2650 else if (NewSubReg == PPC::sub_gt)
2651 NewSubReg = PPC::sub_lt;
2659 "Non-zero immediate support and ShouldSwap"
2660 "may conflict in updating predicate");
2668 BuildMI(*
MI->getParent(), std::next(MII),
MI->getDebugLoc(),
2669 get(TargetOpcode::COPY), CRReg)
2674 MI->clearRegisterDeads(PPC::CR0);
2676 if (MIOpC != NewOpC) {
2686 if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM8) {
2687 Register GPRRes =
MI->getOperand(0).getReg();
2688 int64_t SH =
MI->getOperand(2).getImm();
2689 int64_t MB =
MI->getOperand(3).getImm();
2690 int64_t ME =
MI->getOperand(4).getImm();
2693 bool MBInLoHWord = MB >= 16;
2694 bool MEInLoHWord = ME >= 16;
2697 if (MB <= ME && MBInLoHWord == MEInLoHWord && SH == 0) {
2698 Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
2700 Mask >>= MBInLoHWord ? 0 : 16;
2701 NewOpC = MIOpC == PPC::RLWINM
2702 ? (MBInLoHWord ? PPC::ANDI_rec : PPC::ANDIS_rec)
2703 : (MBInLoHWord ? PPC::ANDI8_rec : PPC::ANDIS8_rec);
2704 }
else if (
MRI->use_empty(GPRRes) && (ME == 31) &&
2705 (ME - MB + 1 == SH) && (MB >= 16)) {
2709 Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1);
2711 NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDIS_rec : PPC::ANDIS8_rec;
2714 if (Mask != ~0LLU) {
2715 MI->removeOperand(4);
2716 MI->removeOperand(3);
2717 MI->getOperand(2).setImm(Mask);
2718 NumRcRotatesConvertedToRcAnd++;
2720 }
else if (MIOpC == PPC::RLDICL &&
MI->getOperand(2).getImm() == 0) {
2721 int64_t MB =
MI->getOperand(3).getImm();
2723 uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
2724 NewOpC = PPC::ANDI8_rec;
2725 MI->removeOperand(3);
2726 MI->getOperand(2).setImm(Mask);
2727 NumRcRotatesConvertedToRcAnd++;
2732 MI->setDesc(NewDesc);
2735 if (!
MI->definesRegister(ImpDef,
nullptr)) {
2736 MI->addOperand(*
MI->getParent()->getParent(),
2741 if (!
MI->readsRegister(ImpUse,
nullptr)) {
2742 MI->addOperand(*
MI->getParent()->getParent(),
2747 assert(
MI->definesRegister(PPC::CR0,
nullptr) &&
2748 "Record-form instruction does not define cr0?");
2753 for (
unsigned i = 0, e = PredsToUpdate.
size(); i < e; i++)
2754 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
2756 for (
unsigned i = 0, e = SubRegsToUpdate.
size(); i < e; i++)
2757 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
2768 int64_t CmpMask, CmpValue;
2773 if (CmpValue || !CmpMask || SrcReg2)
2781 if (Opc == PPC::CMPLWI || Opc == PPC::CMPLDI)
2788 if (Subtarget.
isPPC64() && Opc == PPC::CMPWI)
2795 bool SrcRegHasOtherUse =
false;
2802 if (CRReg != PPC::CR0)
2806 bool SeenUseOfCRReg =
false;
2807 bool IsCRRegKilled =
false;
2808 if (!isRegElgibleForForwarding(RegMO, *SrcMI, CmpMI,
false, IsCRRegKilled,
2814 int NewOpC = PPC::getRecordFormOpcode(SrcMIOpc);
2828 "Record-form instruction does not define cr0?");
2842 OffsetIsScalable =
false;
2877 case PPC::DFSTOREf64:
2878 return FirstOpc == SecondOpc;
2884 return SecondOpc == PPC::STW || SecondOpc == PPC::STW8;
2891 int64_t OpOffset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2892 unsigned NumBytes)
const {
2898 "Only base registers and frame indices are supported.");
2903 if (ClusterSize > 2)
2917 unsigned FirstOpc = FirstLdSt.
getOpcode();
2918 unsigned SecondOpc = SecondLdSt.
getOpcode();
2930 int64_t Offset1 = 0, Offset2 = 0;
2938 assert(Base1 == &BaseOp1 && Base2 == &BaseOp2 &&
2939 "getMemOperandWithOffsetWidth return incorrect base op");
2941 assert(Offset1 <= Offset2 &&
"Caller should have ordered offsets.");
2942 return Offset1 + (int64_t)Width1.
getValue() == Offset2;
2949 unsigned Opcode =
MI.getOpcode();
2951 if (Opcode == PPC::INLINEASM || Opcode == PPC::INLINEASM_BR) {
2953 const char *AsmStr =
MI.getOperand(0).getSymbolName();
2955 }
else if (Opcode == TargetOpcode::STACKMAP) {
2958 }
else if (Opcode == TargetOpcode::PATCHPOINT) {
2962 return get(Opcode).getSize();
2966std::pair<unsigned, unsigned>
2969 return std::make_pair(TF, 0u);
2974 using namespace PPCII;
2975 static const std::pair<unsigned, const char *> TargetFlags[] = {
2976 {MO_PLT,
"ppc-plt"},
2977 {MO_PIC_FLAG,
"ppc-pic"},
2978 {MO_PCREL_FLAG,
"ppc-pcrel"},
2979 {MO_GOT_FLAG,
"ppc-got"},
2980 {MO_PCREL_OPT_FLAG,
"ppc-opt-pcrel"},
2981 {MO_TLSGD_FLAG,
"ppc-tlsgd"},
2982 {MO_TPREL_FLAG,
"ppc-tprel"},
2983 {MO_TLSLDM_FLAG,
"ppc-tlsldm"},
2984 {MO_TLSLD_FLAG,
"ppc-tlsld"},
2985 {MO_TLSGDM_FLAG,
"ppc-tlsgdm"},
2986 {MO_GOT_TLSGD_PCREL_FLAG,
"ppc-got-tlsgd-pcrel"},
2987 {MO_GOT_TLSLD_PCREL_FLAG,
"ppc-got-tlsld-pcrel"},
2988 {MO_GOT_TPREL_PCREL_FLAG,
"ppc-got-tprel-pcrel"},
2991 {MO_TPREL_LO,
"ppc-tprel-lo"},
2992 {MO_TPREL_HA,
"ppc-tprel-ha"},
2993 {MO_DTPREL_LO,
"ppc-dtprel-lo"},
2994 {MO_TLSLD_LO,
"ppc-tlsld-lo"},
2995 {MO_TOC_LO,
"ppc-toc-lo"},
2996 {MO_TLS,
"ppc-tls"},
2997 {MO_PIC_HA_FLAG,
"ppc-ha-pic"},
2998 {MO_PIC_LO_FLAG,
"ppc-lo-pic"},
2999 {MO_TPREL_PCREL_FLAG,
"ppc-tprel-pcrel"},
3000 {MO_TLS_PCREL_FLAG,
"ppc-tls-pcrel"},
3001 {MO_GOT_PCREL_FLAG,
"ppc-got-pcrel"},
3013 unsigned UpperOpcode, LowerOpcode;
3014 switch (
MI.getOpcode()) {
3015 case PPC::DFLOADf32:
3016 UpperOpcode = PPC::LXSSP;
3017 LowerOpcode = PPC::LFS;
3019 case PPC::DFLOADf64:
3020 UpperOpcode = PPC::LXSD;
3021 LowerOpcode = PPC::LFD;
3023 case PPC::DFSTOREf32:
3024 UpperOpcode = PPC::STXSSP;
3025 LowerOpcode = PPC::STFS;
3027 case PPC::DFSTOREf64:
3028 UpperOpcode = PPC::STXSD;
3029 LowerOpcode = PPC::STFD;
3031 case PPC::XFLOADf32:
3032 UpperOpcode = PPC::LXSSPX;
3033 LowerOpcode = PPC::LFSX;
3035 case PPC::XFLOADf64:
3036 UpperOpcode = PPC::LXSDX;
3037 LowerOpcode = PPC::LFDX;
3039 case PPC::XFSTOREf32:
3040 UpperOpcode = PPC::STXSSPX;
3041 LowerOpcode = PPC::STFSX;
3043 case PPC::XFSTOREf64:
3044 UpperOpcode = PPC::STXSDX;
3045 LowerOpcode = PPC::STFDX;
3048 UpperOpcode = PPC::LXSIWAX;
3049 LowerOpcode = PPC::LFIWAX;
3052 UpperOpcode = PPC::LXSIWZX;
3053 LowerOpcode = PPC::LFIWZX;
3056 UpperOpcode = PPC::STXSIWX;
3057 LowerOpcode = PPC::STFIWX;
3063 Register TargetReg =
MI.getOperand(0).getReg();
3065 if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) ||
3066 (TargetReg >= PPC::VSL0 && TargetReg <= PPC::VSL31))
3067 Opcode = LowerOpcode;
3069 Opcode = UpperOpcode;
3070 MI.setDesc(
get(Opcode));
3079 auto &
MBB = *
MI.getParent();
3080 auto DL =
MI.getDebugLoc();
3082 switch (
MI.getOpcode()) {
3083 case PPC::BUILD_UACC: {
3086 if (ACC - PPC::ACC0 != UACC - PPC::UACC0) {
3087 MCRegister SrcVSR = PPC::VSL0 + (UACC - PPC::UACC0) * 4;
3088 MCRegister DstVSR = PPC::VSL0 + (ACC - PPC::ACC0) * 4;
3092 for (
int VecNo = 0; VecNo < 4; VecNo++)
3094 .addReg(SrcVSR + VecNo)
3102 case PPC::KILL_PAIR: {
3103 MI.setDesc(
get(PPC::UNENCODED_NOP));
3104 MI.removeOperand(1);
3105 MI.removeOperand(0);
3108 case TargetOpcode::LOAD_STACK_GUARD: {
3110 "Only Linux target is expected to contain LOAD_STACK_GUARD");
3111 const int64_t
Offset = Subtarget.
isPPC64() ? -0x7010 : -0x7008;
3112 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3113 MI.setDesc(
get(Subtarget.
isPPC64() ? PPC::LD : PPC::LWZ));
3119 case PPC::PPCLdFixedAddr: {
3121 "Only targets with Glibc expected to contain PPCLdFixedAddr");
3123 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3124 MI.setDesc(
get(PPC::LWZ));
3126#undef PPC_LNX_FEATURE
3128#define PPC_LNX_DEFINE_OFFSETS
3129#include "llvm/TargetParser/PPCTargetParser.def"
3131 bool Is64 = Subtarget.
isPPC64();
3132 if (FAType == PPC_FAWORD_HWCAP) {
3134 Offset = Is64 ? PPC_HWCAP_OFFSET_LE64 : PPC_HWCAP_OFFSET_LE32;
3136 Offset = Is64 ? PPC_HWCAP_OFFSET_BE64 : PPC_HWCAP_OFFSET_BE32;
3137 }
else if (FAType == PPC_FAWORD_HWCAP2) {
3139 Offset = Is64 ? PPC_HWCAP2_OFFSET_LE64 : PPC_HWCAP2_OFFSET_LE32;
3141 Offset = Is64 ? PPC_HWCAP2_OFFSET_BE64 : PPC_HWCAP2_OFFSET_BE32;
3142 }
else if (FAType == PPC_FAWORD_CPUID) {
3144 Offset = Is64 ? PPC_CPUID_OFFSET_LE64 : PPC_CPUID_OFFSET_LE32;
3146 Offset = Is64 ? PPC_CPUID_OFFSET_BE64 : PPC_CPUID_OFFSET_BE32;
3148 assert(
Offset &&
"Do not know the offset for this fixed addr load");
3149 MI.removeOperand(1);
3155#define PPC_TGT_PARSER_UNDEF_MACROS
3156#include "llvm/TargetParser/PPCTargetParser.def"
3157#undef PPC_TGT_PARSER_UNDEF_MACROS
3159 case PPC::DFLOADf32:
3160 case PPC::DFLOADf64:
3161 case PPC::DFSTOREf32:
3162 case PPC::DFSTOREf64: {
3163 assert(Subtarget.hasP9Vector() &&
3164 "Invalid D-Form Pseudo-ops on Pre-P9 target.");
3167 "D-form op must have register and immediate operands");
3170 case PPC::XFLOADf32:
3171 case PPC::XFSTOREf32:
3175 assert(Subtarget.hasP8Vector() &&
3176 "Invalid X-Form Pseudo-ops on Pre-P8 target.");
3177 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3178 "X-form op must have register and register operands");
3181 case PPC::XFLOADf64:
3182 case PPC::XFSTOREf64: {
3183 assert(Subtarget.hasVSX() &&
3184 "Invalid X-Form Pseudo-ops on target that has no VSX.");
3185 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3186 "X-form op must have register and register operands");
3189 case PPC::SPILLTOVSR_LD: {
3190 Register TargetReg =
MI.getOperand(0).getReg();
3191 if (PPC::VSFRCRegClass.
contains(TargetReg)) {
3192 MI.setDesc(
get(PPC::DFLOADf64));
3196 MI.setDesc(
get(PPC::LD));
3199 case PPC::SPILLTOVSR_ST: {
3201 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3202 NumStoreSPILLVSRRCAsVec++;
3203 MI.setDesc(
get(PPC::DFSTOREf64));
3206 NumStoreSPILLVSRRCAsGpr++;
3207 MI.setDesc(
get(PPC::STD));
3211 case PPC::SPILLTOVSR_LDX: {
3212 Register TargetReg =
MI.getOperand(0).getReg();
3213 if (PPC::VSFRCRegClass.
contains(TargetReg))
3214 MI.setDesc(
get(PPC::LXSDX));
3216 MI.setDesc(
get(PPC::LDX));
3219 case PPC::SPILLTOVSR_STX: {
3221 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3222 NumStoreSPILLVSRRCAsVec++;
3223 MI.setDesc(
get(PPC::STXSDX));
3225 NumStoreSPILLVSRRCAsGpr++;
3226 MI.setDesc(
get(PPC::STDX));
3233 case PPC::CFENCE8: {
3234 auto Val =
MI.getOperand(0).getReg();
3235 unsigned CmpOp = Subtarget.
isPPC64() ? PPC::CMPD : PPC::CMPW;
3241 MI.setDesc(
get(PPC::ISYNC));
3242 MI.removeOperand(0);
3253static unsigned selectReg(int64_t Imm1, int64_t Imm2,
unsigned CompareOpc,
3254 unsigned TrueReg,
unsigned FalseReg,
3255 unsigned CRSubReg) {
3257 if (CompareOpc == PPC::CMPWI || CompareOpc == PPC::CMPDI) {
3261 return Imm1 < Imm2 ? TrueReg : FalseReg;
3263 return Imm1 > Imm2 ? TrueReg : FalseReg;
3265 return Imm1 == Imm2 ? TrueReg : FalseReg;
3269 else if (CompareOpc == PPC::CMPLWI || CompareOpc == PPC::CMPLDI) {
3277 return Imm1 == Imm2 ? TrueReg : FalseReg;
3280 return PPC::NoRegister;
3285 int64_t Imm)
const {
3286 assert(
MI.getOperand(OpNo).isReg() &&
"Operand must be a REG");
3288 Register InUseReg =
MI.getOperand(OpNo).getReg();
3289 MI.getOperand(OpNo).ChangeToImmediate(Imm);
3297 int UseOpIdx =
MI.findRegisterUseOperandIdx(InUseReg,
TRI,
false);
3298 if (UseOpIdx >= 0) {
3308 MI.removeOperand(UseOpIdx);
3317 int OperandToKeep = LII.
SetCR ? 1 : 0;
3318 for (
int i =
MI.getNumOperands() - 1; i > OperandToKeep; i--)
3319 MI.removeOperand(i);
3323 MI.setDesc(
get(LII.
Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3338 bool &SeenIntermediateUse)
const {
3339 assert(!
MI.getParent()->getParent()->getRegInfo().isSSA() &&
3340 "Should be called after register allocation.");
3344 SeenIntermediateUse =
false;
3345 for (; It != E; ++It) {
3346 if (It->modifiesRegister(Reg,
TRI))
3348 if (It->readsRegister(Reg,
TRI))
3349 SeenIntermediateUse =
true;
3357 int64_t Imm)
const {
3359 "Register should be in non-SSA form after RA");
3360 bool isPPC64 = Subtarget.
isPPC64();
3364 if (isInt<16>(Imm)) {
3366 }
else if (isInt<32>(Imm)) {
3374 assert(isPPC64 &&
"Materializing 64-bit immediate to single register is "
3375 "only supported in PPC64");
3377 if ((Imm >> 32) & 0xFFFF)
3380 .
addImm((Imm >> 32) & 0xFFFF);
3387 .
addImm((Imm >> 16) & 0xFFFF);
3397 unsigned &OpNoForForwarding,
3398 bool &SeenIntermediateUse)
const {
3399 OpNoForForwarding = ~0U;
3407 for (
int i = 1, e =
MI.getNumOperands(); i < e; i++) {
3408 if (!
MI.getOperand(i).isReg())
3411 if (!Reg.isVirtual())
3416 if (DefMIForTrueReg->
getOpcode() == PPC::LI ||
3417 DefMIForTrueReg->
getOpcode() == PPC::LI8 ||
3418 DefMIForTrueReg->
getOpcode() == PPC::ADDI ||
3419 DefMIForTrueReg->
getOpcode() == PPC::ADDI8) {
3420 OpNoForForwarding = i;
3421 DefMI = DefMIForTrueReg;
3436 unsigned Opc =
MI.getOpcode();
3437 bool ConvertibleImmForm =
3438 Opc == PPC::CMPWI || Opc == PPC::CMPLWI || Opc == PPC::CMPDI ||
3439 Opc == PPC::CMPLDI || Opc == PPC::ADDI || Opc == PPC::ADDI8 ||
3440 Opc == PPC::ORI || Opc == PPC::ORI8 || Opc == PPC::XORI ||
3441 Opc == PPC::XORI8 || Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec ||
3442 Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 ||
3443 Opc == PPC::RLWINM || Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8 ||
3444 Opc == PPC::RLWINM8_rec;
3445 bool IsVFReg = (
MI.getNumOperands() &&
MI.getOperand(0).isReg())
3452 if ((Opc == PPC::OR || Opc == PPC::OR8) &&
3453 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
3455 for (
int i = 1, e =
MI.getNumOperands(); i <
e; i++) {
3457 SeenIntermediateUse =
false;
3471 case PPC::ADDItocL8:
3474 OpNoForForwarding = i;
3481 return OpNoForForwarding == ~0
U ? nullptr :
DefMI;
3484unsigned PPCInstrInfo::getSpillTarget()
const {
3487 bool IsP10Variant = Subtarget.isISA3_1() || Subtarget.pairedVectorMemops();
3489 return Subtarget.isISAFuture() ? 3 : IsP10Variant ?
3490 2 : Subtarget.hasP9Vector() ?
3529 bool PostRA = !
MRI->isSSA();
3535 unsigned ToBeDeletedReg = 0;
3536 int64_t OffsetImm = 0;
3537 unsigned XFormOpcode = 0;
3545 bool OtherIntermediateUse =
false;
3549 if (OtherIntermediateUse || !ADDMI)
3556 unsigned ScaleRegIdx = 0;
3557 int64_t OffsetAddi = 0;
3571 assert(ADDIMI &&
"There should be ADDIMI for valid ToBeChangedReg.");
3576 for (
auto It = ++Start; It !=
End; It++)
3585 (ScaleReg == PPC::R0 || ScaleReg == PPC::X0))
3590 if (NewDefFor(ToBeChangedReg, *ADDMI,
MI) || NewDefFor(ScaleReg, *ADDMI,
MI))
3606 MI.setDesc(
get(XFormOpcode));
3608 .ChangeToRegister(ScaleReg,
false,
false,
3612 .ChangeToRegister(ToBeChangedReg,
false,
false,
true);
3624 int64_t &Imm)
const {
3628 if (Opc != PPC::ADDI && Opc != PPC::ADDI8)
3644 return Opc == PPC::ADD4 || Opc == PPC::ADD8;
3648 unsigned &ToBeDeletedReg,
3649 unsigned &XFormOpcode,
3653 if (!
MI.mayLoadOrStore())
3656 unsigned Opc =
MI.getOpcode();
3661 if (XFormOpcode == PPC::INSTRUCTION_LIST_END)
3675 if (!ImmOperand.
isImm())
3678 assert(RegOperand.
isReg() &&
"Instruction format is not right");
3681 if (!RegOperand.
isKill())
3684 ToBeDeletedReg = RegOperand.
getReg();
3685 OffsetImm = ImmOperand.
getImm();
3692 int64_t &OffsetAddi,
3693 int64_t OffsetImm)
const {
3700 bool OtherIntermediateUse =
false;
3721 if (OtherIntermediateUse || !ADDIMI)
3727 if (isInt<16>(OffsetAddi + OffsetImm))
3740 bool PostRA = !
MRI->isSSA();
3741 bool SeenIntermediateUse =
true;
3742 unsigned ForwardingOperand = ~0U;
3744 SeenIntermediateUse);
3747 assert(ForwardingOperand <
MI.getNumOperands() &&
3748 "The forwarding operand needs to be valid at this point");
3749 bool IsForwardingOperandKilled =
MI.getOperand(ForwardingOperand).isKill();
3750 bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled;
3751 if (KilledDef && KillFwdDefMI)
3766 PPC::INSTRUCTION_LIST_END &&
3767 transformToNewImmFormFedByAdd(
MI, *
DefMI, ForwardingOperand))
3771 bool IsVFReg =
MI.getOperand(0).isReg()
3779 transformToImmFormFedByAdd(
MI, III, ForwardingOperand, *
DefMI,
3786 transformToImmFormFedByLI(
MI, III, ForwardingOperand, *
DefMI))
3791 if (!HasImmForm && simplifyToLI(
MI, *
DefMI, ForwardingOperand, KilledDef))
3800 Register FoldingReg =
MI.getOperand(1).getReg();
3804 if (SrcMI->
getOpcode() != PPC::RLWINM &&
3805 SrcMI->
getOpcode() != PPC::RLWINM_rec &&
3809 assert((
MI.getOperand(2).isImm() &&
MI.getOperand(3).isImm() &&
3812 "Invalid PPC::RLWINM Instruction!");
3820 assert((MEMI < 32 && MESrc < 32 && MBMI < 32 && MBSrc < 32) &&
3821 "Invalid PPC::RLWINM Instruction!");
3843 bool SrcMaskFull = (MBSrc - MESrc == 1) || (MBSrc == 0 && MESrc == 31);
3846 if ((MBMI > MEMI) && !SrcMaskFull)
3856 APInt RotatedSrcMask = MaskSrc.
rotl(SHMI);
3857 APInt FinalMask = RotatedSrcMask & MaskMI;
3859 bool Simplified =
false;
3862 if (FinalMask.
isZero()) {
3864 (
MI.getOpcode() == PPC::RLWINM8 ||
MI.getOpcode() == PPC::RLWINM8_rec);
3869 if (
MI.getOpcode() == PPC::RLWINM ||
MI.getOpcode() == PPC::RLWINM8) {
3871 MI.removeOperand(4);
3872 MI.removeOperand(3);
3873 MI.removeOperand(2);
3874 MI.getOperand(1).ChangeToImmediate(0);
3875 MI.setDesc(
get(Is64Bit ? PPC::LI8 : PPC::LI));
3878 MI.removeOperand(4);
3879 MI.removeOperand(3);
3880 MI.getOperand(2).setImm(0);
3881 MI.setDesc(
get(Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3884 MI.getOperand(1).setIsKill(
true);
3888 MI.getOperand(1).setIsKill(
false);
3904 uint16_t NewSH = (SHSrc + SHMI) % 32;
3905 MI.getOperand(2).setImm(NewSH);
3908 MI.getOperand(3).setImm(NewMB);
3909 MI.getOperand(4).setImm(NewME);
3913 MI.getOperand(1).setIsKill(
true);
3917 MI.getOperand(1).setIsKill(
false);
3922 if (Simplified &
MRI->use_nodbg_empty(FoldingReg) &&
3947 default:
return false;
3955 III.
ImmOpcode = Opc == PPC::ADD4 ? PPC::ADDI : PPC::ADDI8;
3964 III.
ImmOpcode = Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8;
3980 III.
ImmOpcode = Opc == PPC::SUBFC ? PPC::SUBFIC : PPC::SUBFIC8;
3988 III.
ImmOpcode = Opc == PPC::CMPW ? PPC::CMPWI : PPC::CMPDI;
3996 III.
ImmOpcode = Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI;
4016 case PPC::OR: III.
ImmOpcode = PPC::ORI;
break;
4017 case PPC::OR8: III.
ImmOpcode = PPC::ORI8;
break;
4018 case PPC::XOR: III.
ImmOpcode = PPC::XORI;
break;
4019 case PPC::XOR8: III.
ImmOpcode = PPC::XORI8;
break;
4024 case PPC::RLWNM_rec:
4025 case PPC::RLWNM8_rec:
4045 if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 || Opc == PPC::RLWNM_rec ||
4046 Opc == PPC::RLWNM8_rec)
4052 case PPC::RLWNM: III.
ImmOpcode = PPC::RLWINM;
break;
4053 case PPC::RLWNM8: III.
ImmOpcode = PPC::RLWINM8;
break;
4054 case PPC::RLWNM_rec:
4057 case PPC::RLWNM8_rec:
4060 case PPC::SLW: III.
ImmOpcode = PPC::RLWINM;
break;
4061 case PPC::SLW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4068 case PPC::SRW: III.
ImmOpcode = PPC::RLWINM;
break;
4069 case PPC::SRW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4089 case PPC::RLDCL_rec:
4091 case PPC::RLDCR_rec:
4107 if (Opc == PPC::RLDCL || Opc == PPC::RLDCL_rec || Opc == PPC::RLDCR ||
4108 Opc == PPC::RLDCR_rec)
4114 case PPC::RLDCL: III.
ImmOpcode = PPC::RLDICL;
break;
4115 case PPC::RLDCL_rec:
4118 case PPC::RLDCR: III.
ImmOpcode = PPC::RLDICR;
break;
4119 case PPC::RLDCR_rec:
4122 case PPC::SLD: III.
ImmOpcode = PPC::RLDICR;
break;
4126 case PPC::SRD: III.
ImmOpcode = PPC::RLDICL;
break;
4173 case PPC::LBZX: III.
ImmOpcode = PPC::LBZ;
break;
4174 case PPC::LBZX8: III.
ImmOpcode = PPC::LBZ8;
break;
4175 case PPC::LHZX: III.
ImmOpcode = PPC::LHZ;
break;
4176 case PPC::LHZX8: III.
ImmOpcode = PPC::LHZ8;
break;
4177 case PPC::LHAX: III.
ImmOpcode = PPC::LHA;
break;
4178 case PPC::LHAX8: III.
ImmOpcode = PPC::LHA8;
break;
4179 case PPC::LWZX: III.
ImmOpcode = PPC::LWZ;
break;
4180 case PPC::LWZX8: III.
ImmOpcode = PPC::LWZ8;
break;
4186 case PPC::LFSX: III.
ImmOpcode = PPC::LFS;
break;
4187 case PPC::LFDX: III.
ImmOpcode = PPC::LFD;
break;
4188 case PPC::STBX: III.
ImmOpcode = PPC::STB;
break;
4189 case PPC::STBX8: III.
ImmOpcode = PPC::STB8;
break;
4190 case PPC::STHX: III.
ImmOpcode = PPC::STH;
break;
4191 case PPC::STHX8: III.
ImmOpcode = PPC::STH8;
break;
4192 case PPC::STWX: III.
ImmOpcode = PPC::STW;
break;
4193 case PPC::STWX8: III.
ImmOpcode = PPC::STW8;
break;
4198 case PPC::STFSX: III.
ImmOpcode = PPC::STFS;
break;
4199 case PPC::STFDX: III.
ImmOpcode = PPC::STFD;
break;
4231 case PPC::LBZUX: III.
ImmOpcode = PPC::LBZU;
break;
4232 case PPC::LBZUX8: III.
ImmOpcode = PPC::LBZU8;
break;
4233 case PPC::LHZUX: III.
ImmOpcode = PPC::LHZU;
break;
4234 case PPC::LHZUX8: III.
ImmOpcode = PPC::LHZU8;
break;
4235 case PPC::LHAUX: III.
ImmOpcode = PPC::LHAU;
break;
4236 case PPC::LHAUX8: III.
ImmOpcode = PPC::LHAU8;
break;
4237 case PPC::LWZUX: III.
ImmOpcode = PPC::LWZU;
break;
4238 case PPC::LWZUX8: III.
ImmOpcode = PPC::LWZU8;
break;
4243 case PPC::LFSUX: III.
ImmOpcode = PPC::LFSU;
break;
4244 case PPC::LFDUX: III.
ImmOpcode = PPC::LFDU;
break;
4245 case PPC::STBUX: III.
ImmOpcode = PPC::STBU;
break;
4246 case PPC::STBUX8: III.
ImmOpcode = PPC::STBU8;
break;
4247 case PPC::STHUX: III.
ImmOpcode = PPC::STHU;
break;
4248 case PPC::STHUX8: III.
ImmOpcode = PPC::STHU8;
break;
4249 case PPC::STWUX: III.
ImmOpcode = PPC::STWU;
break;
4250 case PPC::STWUX8: III.
ImmOpcode = PPC::STWU8;
break;
4255 case PPC::STFSUX: III.
ImmOpcode = PPC::STFSU;
break;
4256 case PPC::STFDUX: III.
ImmOpcode = PPC::STFDU;
break;
4269 case PPC::XFLOADf32:
4270 case PPC::XFLOADf64:
4271 case PPC::XFSTOREf32:
4272 case PPC::XFSTOREf64:
4273 if (!Subtarget.hasP9Vector())
4300 case PPC::XFLOADf32:
4314 case PPC::XFLOADf64:
4332 case PPC::XFSTOREf32:
4346 case PPC::XFSTOREf64:
4357 assert(Op1 != Op2 &&
"Cannot swap operand with itself.");
4359 unsigned MaxOp = std::max(Op1, Op2);
4360 unsigned MinOp = std::min(Op1, Op2);
4363 MI.removeOperand(std::max(Op1, Op2));
4364 MI.removeOperand(std::min(Op1, Op2));
4368 if (MaxOp - MinOp == 1 &&
MI.getNumOperands() == MinOp) {
4369 MI.addOperand(MOp2);
4370 MI.addOperand(MOp1);
4375 unsigned TotalOps =
MI.getNumOperands() + 2;
4376 for (
unsigned i =
MI.getNumOperands() - 1; i >= MinOp; i--) {
4378 MI.removeOperand(i);
4381 MI.addOperand(MOp2);
4383 for (
unsigned i =
MI.getNumOperands(); i < TotalOps; i++) {
4385 MI.addOperand(MOp1);
4387 MI.addOperand(MOps.
back());
4398 unsigned OpNoForForwarding
4438 unsigned Opc =
DefMI.getOpcode();
4439 if (Opc != PPC::ADDItocL8 && Opc != PPC::ADDI && Opc != PPC::ADDI8)
4445 if (Opc == PPC::ADDItocL8 && Subtarget.isAIX())
4449 "Add inst must have at least three operands");
4450 RegMO = &
DefMI.getOperand(1);
4451 ImmMO = &
DefMI.getOperand(2);
4454 if (!RegMO->
isReg())
4463bool PPCInstrInfo::isRegElgibleForForwarding(
4466 bool &IsFwdFeederRegKilled,
bool &SeenIntermediateUse)
const {
4483 for (; It != E; ++It) {
4487 IsFwdFeederRegKilled =
true;
4489 SeenIntermediateUse =
true;
4491 if ((&*It) == &
DefMI)
4504bool PPCInstrInfo::isImmElgibleForForwarding(
const MachineOperand &ImmMO,
4508 int64_t BaseImm)
const {
4510 if (
DefMI.getOpcode() == PPC::ADDItocL8) {
4531 if (ImmMO.
isImm()) {
4536 APInt ActualValue(64, ImmMO.
getImm() + BaseImm,
true);
4541 Imm = SignExtend64<16>(ImmMO.
getImm() + BaseImm);
4557 unsigned OpNoForForwarding,
4559 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4560 !
DefMI.getOperand(1).isImm())
4567 int64_t Immediate =
DefMI.getOperand(1).getImm();
4569 int64_t SExtImm = SignExtend64<16>(Immediate);
4571 bool ReplaceWithLI =
false;
4572 bool Is64BitLI =
false;
4575 unsigned Opc =
MI.getOpcode();
4596 bool Changed =
false;
4598 int64_t Comparand =
MI.getOperand(2).getImm();
4599 int64_t SExtComparand = ((
uint64_t)Comparand & ~0x7FFFuLL) != 0
4600 ? (Comparand | 0xFFFFFFFFFFFF0000)
4603 for (
auto &CompareUseMI :
MRI->use_instructions(DefReg)) {
4604 unsigned UseOpc = CompareUseMI.getOpcode();
4605 if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL8)
4607 unsigned CRSubReg = CompareUseMI.getOperand(3).getSubReg();
4608 Register TrueReg = CompareUseMI.getOperand(1).getReg();
4609 Register FalseReg = CompareUseMI.getOperand(2).getReg();
4610 unsigned RegToCopy =
4611 selectReg(SExtImm, SExtComparand, Opc, TrueReg, FalseReg, CRSubReg);
4612 if (RegToCopy == PPC::NoRegister)
4615 if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO8) {
4616 CompareUseMI.setDesc(
get(UseOpc == PPC::ISEL8 ? PPC::LI8 : PPC::LI));
4618 CompareUseMI.removeOperand(3);
4619 CompareUseMI.removeOperand(2);
4623 dbgs() <<
"Found LI -> CMPI -> ISEL, replacing with a copy.\n");
4627 CompareUseMI.setDesc(
get(PPC::COPY));
4628 CompareUseMI.removeOperand(3);
4629 CompareUseMI.removeOperand(RegToCopy == TrueReg ? 2 : 1);
4630 CmpIselsConverted++;
4639 MissedConvertibleImmediateInstrs++;
4647 int64_t Addend =
MI.getOperand(2).getImm();
4648 if (isInt<16>(Addend + SExtImm)) {
4649 ReplaceWithLI =
true;
4650 Is64BitLI = Opc == PPC::ADDI8;
4651 NewImm = Addend + SExtImm;
4657 case PPC::SUBFIC8: {
4659 if (
MI.getNumOperands() > 3 && !
MI.getOperand(3).isDead())
4661 int64_t Minuend =
MI.getOperand(2).getImm();
4662 if (isInt<16>(Minuend - SExtImm)) {
4663 ReplaceWithLI =
true;
4664 Is64BitLI = Opc == PPC::SUBFIC8;
4665 NewImm = Minuend - SExtImm;
4671 case PPC::RLDICL_rec:
4672 case PPC::RLDICL_32:
4673 case PPC::RLDICL_32_64: {
4675 int64_t SH =
MI.getOperand(2).getImm();
4676 int64_t MB =
MI.getOperand(3).getImm();
4677 APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec) ? 64 : 32,
4679 InVal = InVal.rotl(SH);
4685 if (isUInt<15>(InVal.getSExtValue()) ||
4686 (Opc == PPC::RLDICL_rec && isUInt<16>(InVal.getSExtValue()))) {
4687 ReplaceWithLI =
true;
4688 Is64BitLI = Opc != PPC::RLDICL_32;
4689 NewImm = InVal.getSExtValue();
4690 SetCR = Opc == PPC::RLDICL_rec;
4697 case PPC::RLWINM_rec:
4698 case PPC::RLWINM8_rec: {
4699 int64_t SH =
MI.getOperand(2).getImm();
4700 int64_t MB =
MI.getOperand(3).getImm();
4701 int64_t ME =
MI.getOperand(4).getImm();
4702 APInt InVal(32, SExtImm,
true);
4703 InVal = InVal.rotl(SH);
4709 bool ValueFits = isUInt<15>(InVal.getSExtValue());
4710 ValueFits |= ((Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec) &&
4711 isUInt<16>(InVal.getSExtValue()));
4713 ReplaceWithLI =
true;
4714 Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8_rec;
4715 NewImm = InVal.getSExtValue();
4716 SetCR = Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec;
4725 int64_t LogicalImm =
MI.getOperand(2).getImm();
4727 if (Opc == PPC::ORI || Opc == PPC::ORI8)
4728 Result = LogicalImm | SExtImm;
4730 Result = LogicalImm ^ SExtImm;
4731 if (isInt<16>(Result)) {
4732 ReplaceWithLI =
true;
4733 Is64BitLI = Opc == PPC::ORI8 || Opc == PPC::XORI8;
4741 if (ReplaceWithLI) {
4746 bool ImmChanged = (SExtImm & NewImm) != NewImm;
4747 if (PostRA && ImmChanged)
4754 DefMI.getOperand(1).setImm(NewImm);
4758 else if (
MRI->use_empty(
MI.getOperand(0).getReg())) {
4760 assert(Immediate &&
"Transformation converted zero to non-zero?");
4763 }
else if (ImmChanged)
4778 if (KilledDef && SetCR)
4779 *KilledDef =
nullptr;
4792bool PPCInstrInfo::transformToNewImmFormFedByAdd(
4802 if (!
MI.mayLoadOrStore())
4807 assert((XFormOpcode != PPC::INSTRUCTION_LIST_END) &&
4808 "MI must have x-form opcode");
4812 bool IsVFReg =
MI.getOperand(0).isReg()
4826 if (!ImmOperandMI.
isImm())
4832 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4834 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4839 int64_t ImmBase = ImmOperandMI.
getImm();
4841 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm, ImmBase))
4845 LLVM_DEBUG(
dbgs() <<
"Replacing existing reg+imm instruction:\n");
4862bool PPCInstrInfo::transformToImmFormFedByAdd(
4872 if (!isUseMIElgibleForForwarding(
MI, III, OpNoForForwarding))
4879 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4881 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4886 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm))
4889 bool IsFwdFeederRegKilled =
false;
4890 bool SeenIntermediateUse =
false;
4892 if (!isRegElgibleForForwarding(*RegMO,
DefMI,
MI, KillDefMI,
4893 IsFwdFeederRegKilled, SeenIntermediateUse))
4913 if (ImmMO->
isImm()) {
4924 if (
DefMI.getOpcode() == PPC::ADDItocL8)
4934 MI.removeOperand(i);
4940 MI.addOperand(*ImmMO);
4942 for (
auto &MO : MOps)
4959 unsigned ConstantOpNo,
4962 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4963 !
DefMI.getOperand(1).isImm())
4967 int64_t
Imm = SignExtend64<16>(
DefMI.getOperand(1).getImm());
4979 APInt ActualValue(64, Imm,
true);
4980 if (!ActualValue.isSignedIntN(III.
ImmWidth))
4994 Register OrigZeroReg =
MI.getOperand(PosForOrigZero).getReg();
4998 if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0) &&
5001 if ((OrigZeroReg == PPC::R0 || OrigZeroReg == PPC::X0) &&
5002 ConstantOpNo != PosForOrigZero)
5006 unsigned Opc =
MI.getOpcode();
5007 bool SpecialShift32 = Opc == PPC::SLW || Opc == PPC::SLW_rec ||
5008 Opc == PPC::SRW || Opc == PPC::SRW_rec ||
5009 Opc == PPC::SLW8 || Opc == PPC::SLW8_rec ||
5010 Opc == PPC::SRW8 || Opc == PPC::SRW8_rec;
5011 bool SpecialShift64 = Opc == PPC::SLD || Opc == PPC::SLD_rec ||
5012 Opc == PPC::SRD || Opc == PPC::SRD_rec;
5013 bool SetCR = Opc == PPC::SLW_rec || Opc == PPC::SRW_rec ||
5014 Opc == PPC::SLD_rec || Opc == PPC::SRD_rec;
5015 bool RightShift = Opc == PPC::SRW || Opc == PPC::SRW_rec || Opc == PPC::SRD ||
5016 Opc == PPC::SRD_rec;
5030 if (SpecialShift32 || SpecialShift64) {
5035 uint64_t ShAmt =
Imm & (SpecialShift32 ? 0x1F : 0x3F);
5036 if (Imm & (SpecialShift32 ? 0x20 : 0x40))
5041 else if (!SetCR && ShAmt == 0 && !PostRA) {
5042 MI.removeOperand(2);
5043 MI.setDesc(
get(PPC::COPY));
5046 if (SpecialShift32) {
5092 MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
5093 &PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
5094 MRI.setRegClass(RegToModify, NewRC);
5110 if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass)
5111 return &PPC::VSRCRegClass;
5116 return PPC::getRecordFormOpcode(Opcode);
5120 return (Opcode == PPC::LBZU || Opcode == PPC::LBZUX || Opcode == PPC::LBZU8 ||
5121 Opcode == PPC::LBZUX8 || Opcode == PPC::LHZU ||
5122 Opcode == PPC::LHZUX || Opcode == PPC::LHZU8 ||
5123 Opcode == PPC::LHZUX8);
5136 int Opcode =
MI->getOpcode();
5139 if (
TII->isSExt32To64(Opcode))
5148 if (Opcode == PPC::RLDICL &&
MI->getOperand(3).getImm() >= 33)
5154 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5155 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec) &&
5156 MI->getOperand(3).getImm() > 0 &&
5157 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5162 if (Opcode == PPC::ANDIS_rec || Opcode == PPC::ANDIS8_rec) {
5164 if ((Imm & 0x8000) == 0)
5183 int Opcode =
MI->getOpcode();
5186 if (
TII->isZExt32To64(Opcode))
5191 Opcode == PPC::LWZUX || Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8) &&
5192 MI->getOperand(0).getReg() == Reg)
5197 if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
5198 Opcode == PPC::LIS || Opcode == PPC::LIS8) {
5199 int64_t Imm =
MI->getOperand(1).getImm();
5200 if (((
uint64_t)Imm & ~0x7FFFuLL) == 0)
5206 if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec ||
5207 Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec ||
5208 Opcode == PPC::RLDICL_32_64) &&
5209 MI->getOperand(3).getImm() >= 32)
5212 if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) &&
5213 MI->getOperand(3).getImm() >= 32 &&
5214 MI->getOperand(3).getImm() <= 63 -
MI->getOperand(2).getImm())
5217 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5218 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec ||
5219 Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
5220 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5229 if (!
MI.getOperand(1).isImm() || !
MI.getOperand(2).isReg())
5233 Register StackReg =
MI.getOperand(2).getReg();
5235 if (StackReg == SPReg &&
StackOffset == TOCSaveOffset)
5249std::pair<bool, bool>
5251 const unsigned BinOpDepth,
5254 return std::pair<bool, bool>(
false,
false);
5258 return std::pair<bool, bool>(
false,
false);
5265 if (IsSExt && IsZExt)
5266 return std::pair<bool, bool>(IsSExt, IsZExt);
5268 switch (
MI->getOpcode()) {
5270 Register SrcReg =
MI->getOperand(1).getReg();
5279 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5280 SrcExt.second || IsZExt);
5286 if (
MI->getParent()->getBasicBlock() ==
5292 return std::pair<bool, bool>(IsSExt, IsZExt);
5296 if (SrcReg != PPC::X3) {
5299 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5300 SrcExt.second || IsZExt);
5310 std::pair<bool, bool> IsExtendPair = std::pair<bool, bool>(IsSExt, IsZExt);
5314 return IsExtendPair;
5318 return IsExtendPair;
5323 return IsExtendPair;
5327 IsSExt |= Attrs.hasAttribute(Attribute::SExt);
5328 IsZExt |= Attrs.hasAttribute(Attribute::ZExt);
5329 return std::pair<bool, bool>(IsSExt, IsZExt);
5332 return IsExtendPair;
5341 Register SrcReg =
MI->getOperand(1).getReg();
5343 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5344 SrcExt.second || IsZExt);
5355 Register SrcReg =
MI->getOperand(1).getReg();
5359 return std::pair<bool, bool>(
false, SrcExt.second || IsZExt);
5361 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5362 SrcExt.second || IsZExt);
5372 return std::pair<bool, bool>(
false,
false);
5376 unsigned OperandEnd = 3, OperandStride = 1;
5377 if (
MI->getOpcode() == PPC::PHI) {
5378 OperandEnd =
MI->getNumOperands();
5384 for (
unsigned I = 1;
I != OperandEnd;
I += OperandStride) {
5385 if (!
MI->getOperand(
I).isReg())
5386 return std::pair<bool, bool>(
false,
false);
5390 IsSExt &= SrcExt.first;
5391 IsZExt &= SrcExt.second;
5393 return std::pair<bool, bool>(IsSExt, IsZExt);
5402 return std::pair<bool, bool>(
false,
false);
5404 Register SrcReg1 =
MI->getOperand(1).getReg();
5405 Register SrcReg2 =
MI->getOperand(2).getReg();
5408 return std::pair<bool, bool>(Src1Ext.first && Src2Ext.first,
5409 Src1Ext.second || Src2Ext.second);
5415 return std::pair<bool, bool>(IsSExt, IsZExt);
5419 return (Opcode == (Subtarget.
isPPC64() ? PPC::BDNZ8 : PPC::BDNZ));
5432 :
Loop(
Loop), EndLoop(EndLoop), LoopCount(LoopCount),
5434 TII(MF->getSubtarget().getInstrInfo()) {
5443 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5445 return MI == EndLoop;
5448 std::optional<bool> createTripCountGreaterCondition(
5451 if (TripCount == -1) {
5461 return TripCount > TC;
5469 void adjustTripCount(
int TripCountAdjust)
override {
5472 if (LoopCount->
getOpcode() == PPC::LI8 ||
5483 void disposed()
override {
5484 Loop->eraseFromParent();
5491std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5496 if (Preheader == LoopBB)
5497 Preheader = *std::next(LoopBB->
pred_begin());
5500 if (
I != LoopBB->
end() &&
isBDNZ(
I->getOpcode())) {
5503 Register LoopCountReg = LoopInst->getOperand(0).getReg();
5506 return std::make_unique<PPCPipelinerLoopInfo>(LoopInst, &*
I, LoopCount);
5516 unsigned LOOPi = (Subtarget.
isPPC64() ? PPC::MTCTR8loop : PPC::MTCTRloop);
5519 for (
auto &
I : PreHeader.
instrs())
5520 if (
I.getOpcode() == LOOPi)
5566 int64_t OffsetA = 0, OffsetB = 0;
5571 int LowOffset = std::min(OffsetA, OffsetB);
5572 int HighOffset = std::max(OffsetA, OffsetB);
5573 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
5575 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
uint64_t IntrinsicInst * II
static bool isOpZeroOfSubwordPreincLoad(int Opcode)
static bool MBBDefinesCTR(MachineBasicBlock &MBB)
static bool definedByZeroExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< float > FMARPFactor("ppc-fma-rp-factor", cl::Hidden, cl::init(1.5), cl::desc("register pressure factor for the transformations."))
#define InfoArrayIdxMULOpIdx
static unsigned selectReg(int64_t Imm1, int64_t Imm2, unsigned CompareOpc, unsigned TrueReg, unsigned FalseReg, unsigned CRSubReg)
static unsigned getCRBitValue(unsigned CRBit)
static bool isAnImmediateOperand(const MachineOperand &MO)
static const uint16_t FMAOpIdxInfo[][6]
static cl::opt< bool > DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, cl::desc("Disable analysis for CTR loops"))
#define InfoArrayIdxAddOpIdx
static cl::opt< bool > UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden, cl::desc("Use the old (incorrect) instruction latency calculation"))
#define InfoArrayIdxFMAInst
static bool isClusterableLdStOpcPair(unsigned FirstOpc, unsigned SecondOpc, const PPCSubtarget &Subtarget)
static cl::opt< bool > EnableFMARegPressureReduction("ppc-fma-rp-reduction", cl::Hidden, cl::init(true), cl::desc("enable register pressure reduce in machine combiner pass."))
static bool isLdStSafeToCluster(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
const unsigned MAX_BINOP_DEPTH
static cl::opt< bool > DisableCmpOpt("disable-ppc-cmp-opt", cl::desc("Disable compare instruction optimization"), cl::Hidden)
#define InfoArrayIdxFSubInst
#define InfoArrayIdxFAddInst
#define InfoArrayIdxFMULInst
static bool definedBySignExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< bool > VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy", cl::desc("Causes the backend to crash instead of generating a nop VSX copy"), cl::Hidden)
static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, unsigned hiBit)
Wrap version of getBitsSet.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
const BasicBlock & getEntryBlock() const
AttributeList getAttributes() const
Return the attribute list for this Function.
Type * getReturnType() const
Returns the type of the ret val.
A possibly irreducible generalization of a Loop.
const DataLayout & getDataLayout() const
Get the data layout of the module this global belongs to.
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
TypeSize getValue() const
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void setOpcode(unsigned Op)
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
bool isPseudo() const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint16_t Constraints
Operand constraints (see OperandConstraint enum).
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Wrapper class representing physical registers. Should be passed by value.
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
pred_iterator pred_begin()
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, LLT MemTy, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCall(QueryType Type=AnyInBundle) const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool hasImplicitDef() const
Returns true if the instruction has implicit definition.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
iterator_range< mop_iterator > operands()
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isLiveIn(Register Reg) const
PPCDispatchGroupSBHazardRecognizer - This class implements a scoreboard-based hazard recognizer for P...
uint64_t getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
bool isLiveInSExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and sign-extended.
bool isLiveInZExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and zero-extended.
PPCHazardRecognizer970 - This class defines a finite state automata that models the dispatch logic on...
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
PPCInstrInfo(PPCSubtarget &STI)
bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for a fma chain ending in Root.
bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase=nullptr) const
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
const TargetRegisterClass * updatedRC(const TargetRegisterClass *RC) const
bool isPredicated(const MachineInstr &MI) const override
bool expandVSXMemPseudo(MachineInstr &MI) const
bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg) const
void finalizeInsInstrs(MachineInstr &Root, unsigned &Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
Fixup the placeholders we put in genAlternativeCodeSequence() for MachineCombiner.
MCInst getNop() const override
Return the noop instruction to use for a noop.
static int getRecordFormOpcode(unsigned Opcode)
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool isXFormMemOp(unsigned Opcode) const
const PPCRegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
CombinerObjective getCombinerObjective(unsigned Pattern) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const
unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const
bool isTOCSaveMI(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer to use for this target when ...
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
bool isBDNZ(unsigned Opcode) const
Check Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isZeroExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
std::pair< bool, bool > isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, const MachineRegisterInfo *MRI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, MachineInstr *&ADDIMI, int64_t &OffsetAddi, int64_t OffsetImm) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
void materializeImmPostRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, int64_t Imm) const
bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Return true if two MIs access different memory addresses and false otherwise.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
CreateTargetHazardRecognizer - Return the hazard recognizer to use for this target when scheduling th...
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
Get the base operand and byte offset of an instruction that reads/writes memory.
void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const
bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool foldFrameOffset(MachineInstr &MI) const
bool isLoadFromConstantPool(MachineInstr *I) const
MachineInstr * findLoopInstr(MachineBasicBlock &PreHeader, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool convertToImmediateForm(MachineInstr &MI, SmallSet< Register, 4 > &RegsToUpdate, MachineInstr **KilledDef=nullptr) const
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
Return true if get the base operand, byte offset of an instruction and the memory width.
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const override
On PowerPC, we leverage machine combiner pass to reduce register pressure when the register pressure ...
bool isSignExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, int64_t Imm) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Returns true if the two given memory operations should be scheduled adjacent.
void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const
bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, unsigned &XFormOpcode, int64_t &OffsetOfImmInstr, ImmInstrInfo &III) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in <Root...
bool optimizeCmpPostRA(MachineInstr &MI) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
const Constant * getConstantFromConstantPool(MachineInstr *I) const
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, bool PostRA) const
MachineInstr * getDefMIPostRA(unsigned Reg, MachineInstr &MI, bool &SeenIntermediateUse) const
unsigned getMappedIdxOpcForImmOpc(unsigned ImmOpcode) const
getMappedIdxOpcForImmOpc - Return the mapped index form load/store opcode for a given imm form load/s...
static void emitAccCopyInfo(MachineBasicBlock &MBB, MCRegister DestReg, MCRegister SrcReg)
const PPCFrameLowering * getFrameLowering() const override
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
bool isLittleEndian() const
bool isTargetLinux() const
const PPCTargetMachine & getTargetMachine() const
const Triple & getTargetTriple() const
void setGlibcHWCAPAccess(bool Val=true) const
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Track the current register pressure at some position in the instruction stream, and remember the high...
void closeRegion()
Finalize the region boundaries and recored live ins and live outs.
void recede(SmallVectorImpl< RegisterMaskPair > *LiveUses=nullptr)
Recede across the previous instruction.
RegisterPressure & getPressure()
Get the resulting register pressure over the traversed region.
void recedeSkipDebugValues()
Recede until we find an instruction which is not a DebugValue.
void init(const MachineFunction *mf, const RegisterClassInfo *rci, const LiveIntervals *lis, const MachineBasicBlock *mbb, MachineBasicBlock::const_iterator pos, bool TrackLaneMasks, bool TrackUntiedDefs)
Setup the RegPressureTracker.
MachineBasicBlock::const_iterator getPos() const
Get the MI position corresponding to this register pressure.
List of registers defined and used by a machine instruction.
void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
const TargetInstrInfo * TII
Target instruction information.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
StackOffset holds a fixed and a scalable offset in bytes.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual void genAlternativeCodeSequence(MachineInstr &Root, unsigned Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< unsigned > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual CombinerObjective getCombinerObjective(unsigned Pattern) const
Return the objective of a combiner pattern.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
CodeModel::Model getCodeModel() const
Returns the code model.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
bool isOSGlibc() const
Tests whether the OS uses glibc.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
LLVM Value Representation.
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Predicate getSwappedPredicate(Predicate Opcode)
Assume the condition register is set by MI(a,b), return the predicate if we modify the instructions s...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
int getAltVSXFMAOpcode(uint16_t Opcode)
int getNonRecordFormOpcode(uint16_t)
unsigned getPredicateCondition(Predicate Opcode)
Return the condition without hint bits.
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Predicate InvertPredicate(Predicate Opcode)
Invert the specified predicate. != -> ==, < -> >=.
unsigned getPredicateHint(Predicate Opcode)
Return the hint bits of the predicate.
static bool isVFRegister(unsigned Reg)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getDeadRegState(bool B)
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static unsigned getCRFromCRBit(unsigned SrcReg)
auto reverse(ContainerTy &&C)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
CombinerObjective
The combiner's goal may differ based on which pattern it is attempting to optimize.
@ MustReduceRegisterPressure
void recomputeLivenessFlags(MachineBasicBlock &MBB)
Recomputes dead and kill flags in MBB.
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
static bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME)
Returns true iff Val consists of one contiguous run of 1s with any number of 0s on either side.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t IsSummingOperands
uint64_t OpNoForForwarding
uint64_t ImmMustBeMultipleOf
uint64_t ZeroIsSpecialNew
uint64_t ZeroIsSpecialOrig
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
std::vector< unsigned > MaxSetPressure
Map of max reg pressure indexed by pressure set ID, not class ID.