48#define DEBUG_TYPE "ppc-instr-info"
50#define GET_INSTRMAP_INFO
51#define GET_INSTRINFO_CTOR_DTOR
52#include "PPCGenInstrInfo.inc"
55 "Number of spillvsrrc spilled to stack as vec");
57 "Number of spillvsrrc spilled to stack as gpr");
58STATISTIC(NumGPRtoVSRSpill,
"Number of gpr spills to spillvsrrc");
60 "Number of ISELs that depend on comparison of constants converted");
62 "Number of compare-immediate instructions fed by constants");
64 "Number of record-form rotates converted to record-form andi");
68 cl::desc(
"Disable analysis for CTR loops"));
74cl::desc(
"Causes the backend to crash instead of generating a nop VSX copy"),
79 cl::desc(
"Use the old (incorrect) instruction latency calculation"));
83 cl::desc(
"register pressure factor for the transformations."));
87 cl::desc(
"enable register pressure reduce in machine combiner pass."));
90void PPCInstrInfo::anchor() {}
95 STI.isPPC64() ? PPC::BLR8 : PPC::BLR),
96 Subtarget(STI), RI(STI.getTargetMachine()) {}
104 static_cast<const PPCSubtarget *
>(STI)->getCPUDirective();
108 static_cast<const PPCSubtarget *
>(STI)->getInstrItineraryData();
140 unsigned *PredCost)
const {
142 return PPCGenInstrInfo::getInstrLatency(ItinData,
MI, PredCost);
152 unsigned DefClass =
MI.getDesc().getSchedClass();
153 for (
unsigned i = 0, e =
MI.getNumOperands(); i != e; ++i) {
171 std::optional<unsigned>
Latency = PPCGenInstrInfo::getOperandLatency(
174 if (!
DefMI.getParent())
181 if (Reg.isVirtual()) {
184 IsRegCR =
MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRRCRegClass) ||
185 MRI->getRegClass(Reg)->hasSuperClassEq(&PPC::CRBITRCRegClass);
187 IsRegCR = PPC::CRRCRegClass.contains(Reg) ||
188 PPC::CRBITRCRegClass.contains(Reg);
191 if (
UseMI.isBranch() && IsRegCR) {
271#define InfoArrayIdxFMAInst 0
272#define InfoArrayIdxFAddInst 1
273#define InfoArrayIdxFMULInst 2
274#define InfoArrayIdxAddOpIdx 3
275#define InfoArrayIdxMULOpIdx 4
276#define InfoArrayIdxFSubInst 5
287 {PPC::XSMADDADP, PPC::XSADDDP, PPC::XSMULDP, 1, 2, PPC::XSSUBDP},
288 {PPC::XSMADDASP, PPC::XSADDSP, PPC::XSMULSP, 1, 2, PPC::XSSUBSP},
289 {PPC::XVMADDADP, PPC::XVADDDP, PPC::XVMULDP, 1, 2, PPC::XVSUBDP},
290 {PPC::XVMADDASP, PPC::XVADDSP, PPC::XVMULSP, 1, 2, PPC::XVSUBSP},
291 {PPC::FMADD, PPC::FADD, PPC::FMUL, 3, 1, PPC::FSUB},
292 {PPC::FMADDS, PPC::FADDS, PPC::FMULS, 3, 1, PPC::FSUBS}};
296int16_t PPCInstrInfo::getFMAOpIdxInfo(
unsigned Opcode)
const {
353 bool DoRegPressureReduce)
const {
358 auto IsAllOpsVirtualReg = [](
const MachineInstr &Instr) {
359 for (
const auto &MO : Instr.explicit_operands())
360 if (!(MO.isReg() && MO.getReg().isVirtual()))
365 auto IsReassociableAddOrSub = [&](
const MachineInstr &Instr,
367 if (Instr.getOpcode() !=
378 if (!IsAllOpsVirtualReg(Instr))
384 !
MRI->hasOneNonDBGUse(Instr.getOperand(0).getReg()))
390 auto IsReassociableFMA = [&](
const MachineInstr &Instr, int16_t &AddOpIdx,
391 int16_t &MulOpIdx,
bool IsLeaf) {
392 int16_t
Idx = getFMAOpIdxInfo(Instr.getOpcode());
403 if (!IsAllOpsVirtualReg(Instr))
423 int16_t AddOpIdx = -1;
424 int16_t MulOpIdx = -1;
426 bool IsUsedOnceL =
false;
427 bool IsUsedOnceR =
false;
431 auto IsRPReductionCandidate = [&]() {
435 if (Opcode != PPC::XSMADDASP && Opcode != PPC::XSMADDADP)
440 if (IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
true)) {
441 assert((MulOpIdx >= 0) &&
"mul operand index not right!");
442 Register MULRegL =
TRI->lookThruSingleUseCopyChain(
444 Register MULRegR =
TRI->lookThruSingleUseCopyChain(
446 if (!MULRegL && !MULRegR)
449 if (MULRegL && !MULRegR) {
453 }
else if (!MULRegL && MULRegR) {
465 MULInstrL =
MRI->getVRegDef(MULRegL);
466 MULInstrR =
MRI->getVRegDef(MULRegR);
473 if (DoRegPressureReduce && IsRPReductionCandidate()) {
474 assert((MULInstrL && MULInstrR) &&
"wrong register preduction candidate!");
495 if (!IsReassociableFMA(Root, AddOpIdx, MulOpIdx,
false))
498 assert((AddOpIdx >= 0) &&
"add operand index not right!");
505 if (!IsReassociableFMA(*Prev, AddOpIdx, MulOpIdx,
false))
508 assert((AddOpIdx >= 0) &&
"add operand index not right!");
513 if (IsReassociableFMA(*Leaf, AddOpIdx, MulOpIdx,
true)) {
529 assert(!InsInstrs.
empty() &&
"Instructions set to be inserted is empty!");
562 assert(isa<llvm::ConstantFP>(
C) &&
"not a valid constant!");
565 APFloat F1((dyn_cast<ConstantFP>(
C))->getValueAPF());
567 Constant *NegC = ConstantFP::get(dyn_cast<ConstantFP>(
C)->getContext(), F1);
575 for (
auto *Inst : InsInstrs) {
577 assert(Operand.isReg() &&
"Invalid instruction in InsInstrs!");
578 if (Operand.getReg() == PPC::ZERO8) {
579 Placeholder = &Operand;
585 assert(Placeholder &&
"Placeholder does not exist!");
590 generateLoadForNewConst(ConstPoolIdx, &Root,
C->getType(), InsInstrs);
593 Placeholder->setReg(LoadNewConst);
614 if (!(Subtarget.
isPPC64() && Subtarget.hasP9Vector() &&
622 auto GetMBBPressure =
632 if (
MI.isDebugValue() ||
MI.isDebugLabel())
638 RPTracker.
recede(RegOpers);
648 unsigned VSSRCLimit =
TRI->getRegPressureSetLimit(
652 return GetMBBPressure(
MBB)[PPC::RegisterPressureSets::VSSRC] >
658 if (!
I->hasOneMemOperand())
662 return Op->isLoad() &&
Op->getPseudoValue() &&
666Register PPCInstrInfo::generateLoadForNewConst(
674 "Target not supported!\n");
680 Register VReg1 =
MRI->createVirtualRegister(&PPC::G8RC_and_G8RC_NOX0RegClass);
682 BuildMI(*MF,
MI->getDebugLoc(),
get(PPC::ADDIStocHA8), VReg1)
687 "Only float and double are supported!");
692 LoadOpcode = PPC::DFLOADf32;
694 LoadOpcode = PPC::DFLOADf64;
724 assert(
I->mayLoad() &&
"Should be a load instruction.\n");
725 for (
auto MO :
I->uses()) {
729 if (Reg == 0 || !Reg.isVirtual())
735 return (MCP->
getConstants())[MO2.getIndex()].Val.ConstVal;
742 bool DoRegPressureReduce)
const {
752 DoRegPressureReduce);
765 reassociateFMA(Root,
Pattern, InsInstrs, DelInstrs, InstrIdxForVirtReg);
770 DelInstrs, InstrIdxForVirtReg);
775void PPCInstrInfo::reassociateFMA(
786 MRI.constrainRegClass(RegC, RC);
789 int16_t
Idx = getFMAOpIdxInfo(FmaOp);
790 assert(
Idx >= 0 &&
"Root must be a FMA instruction");
792 bool IsILPReassociate =
812 Leaf =
MRI.getVRegDef(MULReg);
818 Leaf =
MRI.getVRegDef(MULReg);
824 if (IsILPReassociate)
832 MRI.constrainRegClass(Reg, RC);
833 KillFlag = Operand.
isKill();
838 bool &MulOp1KillFlag,
bool &MulOp2KillFlag,
839 bool &AddOpKillFlag) {
840 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx), MulOp1, MulOp1KillFlag);
841 GetOperandInfo(
Instr.getOperand(FirstMulOpIdx + 1), MulOp2, MulOp2KillFlag);
842 GetOperandInfo(
Instr.getOperand(AddOpIdx), AddOp, AddOpKillFlag);
845 Register RegM11, RegM12, RegX, RegY, RegM21, RegM22, RegM31, RegM32, RegA11,
847 bool KillX =
false, KillY =
false, KillM11 =
false, KillM12 =
false,
848 KillM21 =
false, KillM22 =
false, KillM31 =
false, KillM32 =
false,
849 KillA11 =
false, KillA21 =
false, KillB =
false;
851 GetFMAInstrInfo(Root, RegM31, RegM32, RegB, KillM31, KillM32, KillB);
853 if (IsILPReassociate)
854 GetFMAInstrInfo(*Prev, RegM21, RegM22, RegA21, KillM21, KillM22, KillA21);
857 GetFMAInstrInfo(*Leaf, RegM11, RegM12, RegA11, KillM11, KillM12, KillA11);
858 GetOperandInfo(Leaf->
getOperand(AddOpIdx), RegX, KillX);
860 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
861 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
864 GetOperandInfo(Leaf->
getOperand(1), RegX, KillX);
865 GetOperandInfo(Leaf->
getOperand(2), RegY, KillY);
875 InstrIdxForVirtReg.
insert(std::make_pair(NewVRA, 0));
878 if (IsILPReassociate) {
879 NewVRB =
MRI.createVirtualRegister(RC);
880 InstrIdxForVirtReg.
insert(std::make_pair(NewVRB, 1));
885 NewVRD =
MRI.createVirtualRegister(RC);
886 InstrIdxForVirtReg.
insert(std::make_pair(NewVRD, 2));
891 Register RegMul2,
bool KillRegMul2) {
892 MI->getOperand(AddOpIdx).setReg(RegAdd);
893 MI->getOperand(AddOpIdx).setIsKill(KillAdd);
894 MI->getOperand(FirstMulOpIdx).setReg(RegMul1);
895 MI->getOperand(FirstMulOpIdx).setIsKill(KillRegMul1);
896 MI->getOperand(FirstMulOpIdx + 1).setReg(RegMul2);
897 MI->getOperand(FirstMulOpIdx + 1).setIsKill(KillRegMul2);
918 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
919 AdjustOperandOrder(MINewA, RegY, KillY, RegM31, KillM31, RegM32, KillM32);
940 assert(NewVRD &&
"new FMA register not created!");
959 AdjustOperandOrder(MINewB, RegX, KillX, RegM21, KillM21, RegM22, KillM22);
960 AdjustOperandOrder(MINewD, NewVRA,
true, RegM31, KillM31, RegM32,
986 bool KillVarReg =
false;
989 KillVarReg = KillM31;
992 KillVarReg = KillM32;
1016 if (!IsILPReassociate) {
1025 "Insertion instructions set should not be empty!");
1029 if (IsILPReassociate)
1037 unsigned &SubIdx)
const {
1038 switch (
MI.getOpcode()) {
1039 default:
return false;
1042 case PPC::EXTSW_32_64:
1043 SrcReg =
MI.getOperand(1).getReg();
1044 DstReg =
MI.getOperand(0).getReg();
1045 SubIdx = PPC::sub_32;
1051 int &FrameIndex)
const {
1055 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1056 MI.getOperand(2).isFI()) {
1057 FrameIndex =
MI.getOperand(2).getIndex();
1058 return MI.getOperand(0).getReg();
1068 switch (
MI.getOpcode()) {
1078 case PPC::ADDIStocHA:
1079 case PPC::ADDIStocHA8:
1080 case PPC::ADDItocL8:
1081 case PPC::LOAD_STACK_GUARD:
1082 case PPC::PPCLdFixedAddr:
1084 case PPC::XXLXORspz:
1085 case PPC::XXLXORdpz:
1086 case PPC::XXLEQVOnes:
1087 case PPC::XXSPLTI32DX:
1089 case PPC::XXSPLTIDP:
1093 case PPC::V_SETALLONESB:
1094 case PPC::V_SETALLONESH:
1095 case PPC::V_SETALLONES:
1098 case PPC::XXSETACCZ:
1099 case PPC::XXSETACCZW:
1106 int &FrameIndex)
const {
1108 if (
MI.getOperand(1).isImm() && !
MI.getOperand(1).getImm() &&
1109 MI.getOperand(2).isFI()) {
1110 FrameIndex =
MI.getOperand(2).getIndex();
1111 return MI.getOperand(0).getReg();
1119 unsigned OpIdx2)
const {
1123 if (
MI.getOpcode() != PPC::RLWIMI &&
MI.getOpcode() != PPC::RLWIMI_rec)
1131 if (
MI.getOperand(3).getImm() != 0)
1142 assert(((OpIdx1 == 1 && OpIdx2 == 2) || (OpIdx1 == 2 && OpIdx2 == 1)) &&
1143 "Only the operands 1 and 2 can be swapped in RLSIMI/RLWIMI_rec.");
1147 unsigned SubReg1 =
MI.getOperand(1).getSubReg();
1148 unsigned SubReg2 =
MI.getOperand(2).getSubReg();
1149 bool Reg1IsKill =
MI.getOperand(1).isKill();
1150 bool Reg2IsKill =
MI.getOperand(2).isKill();
1151 bool ChangeReg0 =
false;
1157 "Expecting a two-address instruction!");
1158 assert(
MI.getOperand(0).getSubReg() == SubReg1 &&
"Tied subreg mismatch");
1164 unsigned MB =
MI.getOperand(4).getImm();
1165 unsigned ME =
MI.getOperand(5).getImm();
1169 if (MB == 0 && ME == 31)
1174 Register Reg0 = ChangeReg0 ? Reg2 :
MI.getOperand(0).getReg();
1175 bool Reg0IsDead =
MI.getOperand(0).isDead();
1176 return BuildMI(MF,
MI.getDebugLoc(),
MI.getDesc())
1185 MI.getOperand(0).setReg(Reg2);
1186 MI.getOperand(0).setSubReg(SubReg2);
1188 MI.getOperand(2).setReg(Reg1);
1189 MI.getOperand(1).setReg(Reg2);
1190 MI.getOperand(2).setSubReg(SubReg1);
1191 MI.getOperand(1).setSubReg(SubReg2);
1192 MI.getOperand(2).setIsKill(Reg1IsKill);
1193 MI.getOperand(1).setIsKill(Reg2IsKill);
1196 MI.getOperand(4).setImm((ME + 1) & 31);
1197 MI.getOperand(5).setImm((MB - 1) & 31);
1202 unsigned &SrcOpIdx1,
1203 unsigned &SrcOpIdx2)
const {
1214 return fixCommutedOpIndices(SrcOpIdx1, SrcOpIdx2, 2, 3);
1224 default: Opcode = PPC::NOP;
break;
1250 bool AllowModify)
const {
1251 bool isPPC64 = Subtarget.
isPPC64();
1258 if (!isUnpredicatedTerminator(*
I))
1264 if (
I->getOpcode() == PPC::B &&
1266 I->eraseFromParent();
1270 if (
I ==
MBB.
end() || !isUnpredicatedTerminator(*
I))
1279 if (
I ==
MBB.
begin() || !isUnpredicatedTerminator(*--
I)) {
1285 }
else if (LastInst.
getOpcode() == PPC::BCC) {
1293 }
else if (LastInst.
getOpcode() == PPC::BC) {
1301 }
else if (LastInst.
getOpcode() == PPC::BCn) {
1309 }
else if (LastInst.
getOpcode() == PPC::BDNZ8 ||
1320 }
else if (LastInst.
getOpcode() == PPC::BDZ8 ||
1341 if (
I !=
MBB.
begin() && isUnpredicatedTerminator(*--
I))
1345 if (SecondLastInst.
getOpcode() == PPC::BCC &&
1355 }
else if (SecondLastInst.
getOpcode() == PPC::BC &&
1365 }
else if (SecondLastInst.
getOpcode() == PPC::BCn &&
1375 }
else if ((SecondLastInst.
getOpcode() == PPC::BDNZ8 ||
1376 SecondLastInst.
getOpcode() == PPC::BDNZ) &&
1389 }
else if ((SecondLastInst.
getOpcode() == PPC::BDZ8 ||
1390 SecondLastInst.
getOpcode() == PPC::BDZ) &&
1413 I->eraseFromParent();
1422 int *BytesRemoved)
const {
1423 assert(!BytesRemoved &&
"code size not handled");
1429 if (
I->getOpcode() != PPC::B &&
I->getOpcode() != PPC::BCC &&
1430 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1431 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1432 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1436 I->eraseFromParent();
1442 if (
I->getOpcode() != PPC::BCC &&
1443 I->getOpcode() != PPC::BC &&
I->getOpcode() != PPC::BCn &&
1444 I->getOpcode() != PPC::BDNZ8 &&
I->getOpcode() != PPC::BDNZ &&
1445 I->getOpcode() != PPC::BDZ8 &&
I->getOpcode() != PPC::BDZ)
1449 I->eraseFromParent();
1458 int *BytesAdded)
const {
1460 assert(
TBB &&
"insertBranch must not be told to insert a fallthrough");
1462 "PPC branch conditions have two components!");
1463 assert(!BytesAdded &&
"code size not handled");
1465 bool isPPC64 = Subtarget.
isPPC64();
1473 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1474 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1490 (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ) :
1491 (isPPC64 ? PPC::BDZ8 : PPC::BDZ))).
addMBB(
TBB);
1509 Register FalseReg,
int &CondCycles,
1510 int &TrueCycles,
int &FalseCycles)
const {
1511 if (!Subtarget.hasISEL())
1514 if (
Cond.size() != 2)
1530 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1535 if (!PPC::GPRCRegClass.hasSubClassEq(RC) &&
1536 !PPC::GPRC_NOR0RegClass.hasSubClassEq(RC) &&
1537 !PPC::G8RCRegClass.hasSubClassEq(RC) &&
1538 !PPC::G8RC_NOX0RegClass.hasSubClassEq(RC))
1558 "PPC branch conditions have two components!");
1563 RI.getCommonSubClass(
MRI.getRegClass(TrueReg),
MRI.getRegClass(FalseReg));
1564 assert(RC &&
"TrueReg and FalseReg must have overlapping register classes");
1566 bool Is64Bit = PPC::G8RCRegClass.hasSubClassEq(RC) ||
1567 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC);
1569 PPC::GPRCRegClass.hasSubClassEq(RC) ||
1570 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) &&
1571 "isel is for regular integer GPRs only");
1573 unsigned OpCode = Is64Bit ? PPC::ISEL8 : PPC::ISEL;
1576 unsigned SubIdx = 0;
1577 bool SwapOps =
false;
1578 switch (SelectPred) {
1582 SubIdx = PPC::sub_eq; SwapOps =
false;
break;
1586 SubIdx = PPC::sub_eq; SwapOps =
true;
break;
1590 SubIdx = PPC::sub_lt; SwapOps =
false;
break;
1594 SubIdx = PPC::sub_lt; SwapOps =
true;
break;
1598 SubIdx = PPC::sub_gt; SwapOps =
false;
break;
1602 SubIdx = PPC::sub_gt; SwapOps =
true;
break;
1606 SubIdx = PPC::sub_un; SwapOps =
false;
break;
1610 SubIdx = PPC::sub_un; SwapOps =
true;
break;
1615 Register FirstReg = SwapOps ? FalseReg : TrueReg,
1616 SecondReg = SwapOps ? TrueReg : FalseReg;
1621 if (
MRI.getRegClass(FirstReg)->contains(PPC::R0) ||
1622 MRI.getRegClass(FirstReg)->contains(PPC::X0)) {
1624 MRI.getRegClass(FirstReg)->contains(PPC::X0) ?
1625 &PPC::G8RC_NOX0RegClass : &PPC::GPRC_NOR0RegClass;
1627 FirstReg =
MRI.createVirtualRegister(FirstRC);
1639 if (CRBit == PPC::CR0LT || CRBit == PPC::CR1LT ||
1640 CRBit == PPC::CR2LT || CRBit == PPC::CR3LT ||
1641 CRBit == PPC::CR4LT || CRBit == PPC::CR5LT ||
1642 CRBit == PPC::CR6LT || CRBit == PPC::CR7LT)
1644 if (CRBit == PPC::CR0GT || CRBit == PPC::CR1GT ||
1645 CRBit == PPC::CR2GT || CRBit == PPC::CR3GT ||
1646 CRBit == PPC::CR4GT || CRBit == PPC::CR5GT ||
1647 CRBit == PPC::CR6GT || CRBit == PPC::CR7GT)
1649 if (CRBit == PPC::CR0EQ || CRBit == PPC::CR1EQ ||
1650 CRBit == PPC::CR2EQ || CRBit == PPC::CR3EQ ||
1651 CRBit == PPC::CR4EQ || CRBit == PPC::CR5EQ ||
1652 CRBit == PPC::CR6EQ || CRBit == PPC::CR7EQ)
1654 if (CRBit == PPC::CR0UN || CRBit == PPC::CR1UN ||
1655 CRBit == PPC::CR2UN || CRBit == PPC::CR3UN ||
1656 CRBit == PPC::CR4UN || CRBit == PPC::CR5UN ||
1657 CRBit == PPC::CR6UN || CRBit == PPC::CR7UN)
1660 assert(Ret != 4 &&
"Invalid CR bit register");
1671 if (PPC::F8RCRegClass.
contains(DestReg) &&
1672 PPC::VSRCRegClass.
contains(SrcReg)) {
1674 TRI->getMatchingSuperReg(DestReg, PPC::sub_64, &PPC::VSRCRegClass);
1680 }
else if (PPC::F8RCRegClass.
contains(SrcReg) &&
1681 PPC::VSRCRegClass.
contains(DestReg)) {
1683 TRI->getMatchingSuperReg(SrcReg, PPC::sub_64, &PPC::VSRCRegClass);
1692 if (PPC::CRBITRCRegClass.
contains(SrcReg) &&
1693 PPC::GPRCRegClass.
contains(DestReg)) {
1705 }
else if (PPC::CRRCRegClass.
contains(SrcReg) &&
1706 (PPC::G8RCRegClass.
contains(DestReg) ||
1707 PPC::GPRCRegClass.
contains(DestReg))) {
1708 bool Is64Bit = PPC::G8RCRegClass.contains(DestReg);
1709 unsigned MvCode = Is64Bit ? PPC::MFOCRF8 : PPC::MFOCRF;
1710 unsigned ShCode = Is64Bit ? PPC::RLWINM8 : PPC::RLWINM;
1711 unsigned CRNum =
TRI->getEncodingValue(SrcReg);
1723 }
else if (PPC::G8RCRegClass.
contains(SrcReg) &&
1724 PPC::VSFRCRegClass.
contains(DestReg)) {
1725 assert(Subtarget.hasDirectMove() &&
1726 "Subtarget doesn't support directmove, don't know how to copy.");
1731 }
else if (PPC::VSFRCRegClass.
contains(SrcReg) &&
1732 PPC::G8RCRegClass.
contains(DestReg)) {
1733 assert(Subtarget.hasDirectMove() &&
1734 "Subtarget doesn't support directmove, don't know how to copy.");
1738 }
else if (PPC::SPERCRegClass.
contains(SrcReg) &&
1739 PPC::GPRCRegClass.
contains(DestReg)) {
1743 }
else if (PPC::GPRCRegClass.
contains(SrcReg) &&
1744 PPC::SPERCRegClass.
contains(DestReg)) {
1751 if (PPC::GPRCRegClass.
contains(DestReg, SrcReg))
1753 else if (PPC::G8RCRegClass.
contains(DestReg, SrcReg))
1755 else if (PPC::F4RCRegClass.
contains(DestReg, SrcReg))
1757 else if (PPC::CRRCRegClass.
contains(DestReg, SrcReg))
1759 else if (PPC::VRRCRegClass.
contains(DestReg, SrcReg))
1761 else if (PPC::VSRCRegClass.
contains(DestReg, SrcReg))
1771 else if (PPC::VSFRCRegClass.
contains(DestReg, SrcReg) ||
1772 PPC::VSSRCRegClass.
contains(DestReg, SrcReg))
1773 Opc = (Subtarget.hasP9Vector()) ? PPC::XSCPSGNDP : PPC::XXLORf;
1774 else if (Subtarget.pairedVectorMemops() &&
1775 PPC::VSRpRCRegClass.contains(DestReg, SrcReg)) {
1776 if (SrcReg > PPC::VSRp15)
1777 SrcReg = PPC::V0 + (SrcReg - PPC::VSRp16) * 2;
1779 SrcReg = PPC::VSL0 + (SrcReg - PPC::VSRp0) * 2;
1780 if (DestReg > PPC::VSRp15)
1781 DestReg = PPC::V0 + (DestReg - PPC::VSRp16) * 2;
1783 DestReg = PPC::VSL0 + (DestReg - PPC::VSRp0) * 2;
1790 else if (PPC::CRBITRCRegClass.
contains(DestReg, SrcReg))
1792 else if (PPC::SPERCRegClass.
contains(DestReg, SrcReg))
1794 else if ((PPC::ACCRCRegClass.
contains(DestReg) ||
1795 PPC::UACCRCRegClass.
contains(DestReg)) &&
1796 (PPC::ACCRCRegClass.
contains(SrcReg) ||
1797 PPC::UACCRCRegClass.
contains(SrcReg))) {
1803 bool DestPrimed = PPC::ACCRCRegClass.contains(DestReg);
1804 bool SrcPrimed = PPC::ACCRCRegClass.contains(SrcReg);
1806 PPC::VSL0 + (SrcReg - (SrcPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1808 PPC::VSL0 + (DestReg - (DestPrimed ? PPC::ACC0 : PPC::UACC0)) * 4;
1817 if (SrcPrimed && !KillSrc)
1820 }
else if (PPC::G8pRCRegClass.
contains(DestReg) &&
1821 PPC::G8pRCRegClass.
contains(SrcReg)) {
1823 unsigned DestRegIdx = DestReg - PPC::G8p0;
1824 MCRegister DestRegSub0 = PPC::X0 + 2 * DestRegIdx;
1825 MCRegister DestRegSub1 = PPC::X0 + 2 * DestRegIdx + 1;
1826 unsigned SrcRegIdx = SrcReg - PPC::G8p0;
1827 MCRegister SrcRegSub0 = PPC::X0 + 2 * SrcRegIdx;
1828 MCRegister SrcRegSub1 = PPC::X0 + 2 * SrcRegIdx + 1;
1850 if (PPC::GPRCRegClass.hasSubClassEq(RC) ||
1851 PPC::GPRC_NOR0RegClass.hasSubClassEq(RC)) {
1853 }
else if (PPC::G8RCRegClass.hasSubClassEq(RC) ||
1854 PPC::G8RC_NOX0RegClass.hasSubClassEq(RC)) {
1856 }
else if (PPC::F8RCRegClass.hasSubClassEq(RC)) {
1858 }
else if (PPC::F4RCRegClass.hasSubClassEq(RC)) {
1860 }
else if (PPC::SPERCRegClass.hasSubClassEq(RC)) {
1862 }
else if (PPC::CRRCRegClass.hasSubClassEq(RC)) {
1864 }
else if (PPC::CRBITRCRegClass.hasSubClassEq(RC)) {
1866 }
else if (PPC::VRRCRegClass.hasSubClassEq(RC)) {
1868 }
else if (PPC::VSRCRegClass.hasSubClassEq(RC)) {
1870 }
else if (PPC::VSFRCRegClass.hasSubClassEq(RC)) {
1872 }
else if (PPC::VSSRCRegClass.hasSubClassEq(RC)) {
1874 }
else if (PPC::SPILLTOVSRRCRegClass.hasSubClassEq(RC)) {
1876 }
else if (PPC::ACCRCRegClass.hasSubClassEq(RC)) {
1877 assert(Subtarget.pairedVectorMemops() &&
1878 "Register unexpected when paired memops are disabled.");
1880 }
else if (PPC::UACCRCRegClass.hasSubClassEq(RC)) {
1881 assert(Subtarget.pairedVectorMemops() &&
1882 "Register unexpected when paired memops are disabled.");
1884 }
else if (PPC::WACCRCRegClass.hasSubClassEq(RC)) {
1885 assert(Subtarget.pairedVectorMemops() &&
1886 "Register unexpected when paired memops are disabled.");
1888 }
else if (PPC::VSRpRCRegClass.hasSubClassEq(RC)) {
1889 assert(Subtarget.pairedVectorMemops() &&
1890 "Register unexpected when paired memops are disabled.");
1892 }
else if (PPC::G8pRCRegClass.hasSubClassEq(RC)) {
1903 return OpcodesForSpill[getSpillIndex(RC)];
1909 return OpcodesForSpill[getSpillIndex(RC)];
1912void PPCInstrInfo::StoreRegToStackSlot(
1926 if (PPC::CRRCRegClass.hasSubClassEq(RC) ||
1927 PPC::CRBITRCRegClass.hasSubClassEq(RC))
1941 StoreRegToStackSlot(MF, SrcReg, isKill, FrameIdx, RC, NewMIs);
1943 for (
unsigned i = 0, e = NewMIs.
size(); i != e; ++i)
1951 NewMIs.
back()->addMemOperand(MF, MMO);
1970 unsigned DestReg,
int FrameIdx,
1988 LoadRegFromStackSlot(MF,
DL, DestReg, FrameIdx, RC, NewMIs);
1990 for (
unsigned i = 0, e = NewMIs.
size(); i != e; ++i)
1998 NewMIs.
back()->addMemOperand(MF, MMO);
2021 assert(
Cond.size() == 2 &&
"Invalid PPC branch opcode!");
2023 Cond[0].setImm(
Cond[0].getImm() == 0 ? 1 : 0);
2036 unsigned DefOpc =
DefMI.getOpcode();
2037 if (DefOpc != PPC::LI && DefOpc != PPC::LI8)
2039 if (!
DefMI.getOperand(1).isImm())
2041 if (
DefMI.getOperand(1).getImm() != 0)
2057 for (UseIdx = 0; UseIdx <
UseMI.getNumOperands(); ++UseIdx)
2058 if (
UseMI.getOperand(UseIdx).isReg() &&
2062 assert(UseIdx <
UseMI.getNumOperands() &&
"Cannot find Reg in UseMI");
2073 if (UseInfo->
RegClass != PPC::GPRC_NOR0RegClassID &&
2074 UseInfo->
RegClass != PPC::G8RC_NOX0RegClassID)
2086 bool isPPC64 = Subtarget.
isPPC64();
2087 ZeroReg = isPPC64 ? PPC::ZERO8 : PPC::ZERO;
2089 ZeroReg = UseInfo->
RegClass == PPC::G8RC_NOX0RegClassID ?
2090 PPC::ZERO8 : PPC::ZERO;
2095 UseMI.getOperand(UseIdx).setReg(ZeroReg);
2107 if (
MRI->use_nodbg_empty(Reg))
2108 DefMI.eraseFromParent();
2114 if (
MI.definesRegister(PPC::CTR) ||
MI.definesRegister(PPC::CTR8))
2126 unsigned NumT,
unsigned ExtraT,
2128 unsigned NumF,
unsigned ExtraF,
2148 switch (
MI.getOpcode()) {
2164 unsigned OpC =
MI.getOpcode();
2165 if (OpC == PPC::BLR || OpC == PPC::BLR8) {
2166 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2167 bool isPPC64 = Subtarget.
isPPC64();
2168 MI.setDesc(
get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZLR8 : PPC::BDNZLR)
2169 : (isPPC64 ? PPC::BDZLR8 : PPC::BDZLR)));
2175 MI.setDesc(
get(PPC::BCLR));
2178 MI.setDesc(
get(PPC::BCLRn));
2181 MI.setDesc(
get(PPC::BCCLR));
2183 .
addImm(Pred[0].getImm())
2188 }
else if (OpC == PPC::B) {
2189 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR) {
2190 bool isPPC64 = Subtarget.
isPPC64();
2191 MI.setDesc(
get(Pred[0].getImm() ? (isPPC64 ? PPC::BDNZ8 : PPC::BDNZ)
2192 : (isPPC64 ? PPC::BDZ8 : PPC::BDZ)));
2199 MI.removeOperand(0);
2201 MI.setDesc(
get(PPC::BC));
2207 MI.removeOperand(0);
2209 MI.setDesc(
get(PPC::BCn));
2215 MI.removeOperand(0);
2217 MI.setDesc(
get(PPC::BCC));
2219 .
addImm(Pred[0].getImm())
2225 }
else if (OpC == PPC::BCTR || OpC == PPC::BCTR8 || OpC == PPC::BCTRL ||
2226 OpC == PPC::BCTRL8 || OpC == PPC::BCTRL_RM ||
2227 OpC == PPC::BCTRL8_RM) {
2228 if (Pred[1].
getReg() == PPC::CTR8 || Pred[1].
getReg() == PPC::CTR)
2231 bool setLR = OpC == PPC::BCTRL || OpC == PPC::BCTRL8 ||
2232 OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM;
2233 bool isPPC64 = Subtarget.
isPPC64();
2236 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8 : PPC::BCCTR8)
2237 : (setLR ? PPC::BCCTRL : PPC::BCCTR)));
2240 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCTRL8n : PPC::BCCTR8n)
2241 : (setLR ? PPC::BCCTRLn : PPC::BCCTRn)));
2244 MI.setDesc(
get(isPPC64 ? (setLR ? PPC::BCCCTRL8 : PPC::BCCCTR8)
2245 : (setLR ? PPC::BCCCTRL : PPC::BCCCTR)));
2247 .
addImm(Pred[0].getImm())
2256 if (OpC == PPC::BCTRL_RM || OpC == PPC::BCTRL8_RM)
2268 assert(Pred1.
size() == 2 &&
"Invalid PPC first predicate");
2269 assert(Pred2.
size() == 2 &&
"Invalid PPC second predicate");
2271 if (Pred1[1].
getReg() == PPC::CTR8 || Pred1[1].
getReg() == PPC::CTR)
2273 if (Pred2[1].
getReg() == PPC::CTR8 || Pred2[1].
getReg() == PPC::CTR)
2298 std::vector<MachineOperand> &Pred,
2299 bool SkipDead)
const {
2307 { &PPC::CRRCRegClass, &PPC::CRBITRCRegClass,
2308 &PPC::CTRRCRegClass, &PPC::CTRRC8RegClass };
2312 for (
unsigned c = 0; c < std::size(RCs) && !Found; ++c) {
2315 if (MO.isDef() && RC->
contains(MO.getReg())) {
2319 }
else if (MO.isRegMask()) {
2321 if (MO.clobbersPhysReg(R)) {
2334 int64_t &
Value)
const {
2335 unsigned Opc =
MI.getOpcode();
2338 default:
return false;
2343 SrcReg =
MI.getOperand(1).getReg();
2345 Value =
MI.getOperand(2).getImm();
2354 SrcReg =
MI.getOperand(1).getReg();
2355 SrcReg2 =
MI.getOperand(2).getReg();
2374 if (OpC == PPC::FCMPUS || OpC == PPC::FCMPUD)
2386 bool isPPC64 = Subtarget.
isPPC64();
2387 bool is32BitSignedCompare = OpC == PPC::CMPWI || OpC == PPC::CMPW;
2388 bool is32BitUnsignedCompare = OpC == PPC::CMPLWI || OpC == PPC::CMPLW;
2389 bool is64BitUnsignedCompare = OpC == PPC::CMPLDI || OpC == PPC::CMPLD;
2398 if (!
MI)
return false;
2400 bool equalityOnly =
false;
2403 if (is32BitSignedCompare) {
2409 }
else if (is32BitUnsignedCompare) {
2414 equalityOnly =
true;
2418 equalityOnly = is64BitUnsignedCompare;
2420 equalityOnly = is32BitUnsignedCompare;
2426 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2438 if (SubIdx != PPC::sub_eq)
2450 bool FoundUse =
false;
2452 J =
MRI->use_instr_begin(CRReg), JE =
MRI->use_instr_end();
2479 else if (
Value != 0) {
2488 if (equalityOnly || !
MRI->hasOneUse(CRReg))
2498 int16_t Immed = (int16_t)
Value;
2532 for (;
I !=
E && !noSub; --
I) {
2534 unsigned IOpC = Instr.getOpcode();
2536 if (&*
I != &CmpInstr && (Instr.modifiesRegister(PPC::CR0,
TRI) ||
2537 Instr.readsRegister(PPC::CR0,
TRI)))
2546 if ((OpC == PPC::CMPW || OpC == PPC::CMPLW ||
2547 OpC == PPC::CMPD || OpC == PPC::CMPLD) &&
2548 (IOpC == PPC::SUBF || IOpC == PPC::SUBF8) &&
2549 ((Instr.getOperand(1).getReg() == SrcReg &&
2550 Instr.getOperand(2).getReg() == SrcReg2) ||
2551 (Instr.getOperand(1).getReg() == SrcReg2 &&
2552 Instr.getOperand(2).getReg() == SrcReg))) {
2570 int MIOpC =
MI->getOpcode();
2571 if (MIOpC == PPC::ANDI_rec || MIOpC == PPC::ANDI8_rec ||
2572 MIOpC == PPC::ANDIS_rec || MIOpC == PPC::ANDIS8_rec)
2575 NewOpC = PPC::getRecordFormOpcode(MIOpC);
2593 if (!equalityOnly && (NewOpC == PPC::SUBF_rec || NewOpC == PPC::SUBF8_rec) &&
2603 bool ShouldSwap =
false;
2604 if (Sub &&
Value == 0) {
2610 ShouldSwap = !ShouldSwap;
2615 I =
MRI->use_instr_begin(CRReg), IE =
MRI->use_instr_end();
2623 "Invalid predicate for equality-only optimization");
2630 assert((!equalityOnly || NewSubReg == PPC::sub_eq) &&
2631 "Invalid CR bit for equality-only optimization");
2633 if (NewSubReg == PPC::sub_lt)
2634 NewSubReg = PPC::sub_gt;
2635 else if (NewSubReg == PPC::sub_gt)
2636 NewSubReg = PPC::sub_lt;
2644 "Non-zero immediate support and ShouldSwap"
2645 "may conflict in updating predicate");
2653 BuildMI(*
MI->getParent(), std::next(MII),
MI->getDebugLoc(),
2654 get(TargetOpcode::COPY), CRReg)
2659 MI->clearRegisterDeads(PPC::CR0);
2661 if (MIOpC != NewOpC) {
2671 if (MIOpC == PPC::RLWINM || MIOpC == PPC::RLWINM8) {
2672 Register GPRRes =
MI->getOperand(0).getReg();
2673 int64_t SH =
MI->getOperand(2).getImm();
2674 int64_t MB =
MI->getOperand(3).getImm();
2675 int64_t ME =
MI->getOperand(4).getImm();
2678 bool MBInLoHWord = MB >= 16;
2679 bool MEInLoHWord = ME >= 16;
2682 if (MB <= ME && MBInLoHWord == MEInLoHWord && SH == 0) {
2683 Mask = ((1LLU << (32 - MB)) - 1) & ~((1LLU << (31 - ME)) - 1);
2685 Mask >>= MBInLoHWord ? 0 : 16;
2686 NewOpC = MIOpC == PPC::RLWINM
2687 ? (MBInLoHWord ? PPC::ANDI_rec : PPC::ANDIS_rec)
2688 : (MBInLoHWord ? PPC::ANDI8_rec : PPC::ANDIS8_rec);
2689 }
else if (
MRI->use_empty(GPRRes) && (ME == 31) &&
2690 (ME - MB + 1 == SH) && (MB >= 16)) {
2694 Mask = ((1LLU << 32) - 1) & ~((1LLU << (32 - SH)) - 1);
2696 NewOpC = MIOpC == PPC::RLWINM ? PPC::ANDIS_rec : PPC::ANDIS8_rec;
2699 if (Mask != ~0LLU) {
2700 MI->removeOperand(4);
2701 MI->removeOperand(3);
2702 MI->getOperand(2).setImm(Mask);
2703 NumRcRotatesConvertedToRcAnd++;
2705 }
else if (MIOpC == PPC::RLDICL &&
MI->getOperand(2).getImm() == 0) {
2706 int64_t MB =
MI->getOperand(3).getImm();
2708 uint64_t Mask = (1LLU << (63 - MB + 1)) - 1;
2709 NewOpC = PPC::ANDI8_rec;
2710 MI->removeOperand(3);
2711 MI->getOperand(2).setImm(Mask);
2712 NumRcRotatesConvertedToRcAnd++;
2717 MI->setDesc(NewDesc);
2720 if (!
MI->definesRegister(ImpDef)) {
2721 MI->addOperand(*
MI->getParent()->getParent(),
2726 if (!
MI->readsRegister(ImpUse)) {
2727 MI->addOperand(*
MI->getParent()->getParent(),
2732 assert(
MI->definesRegister(PPC::CR0) &&
2733 "Record-form instruction does not define cr0?");
2738 for (
unsigned i = 0, e = PredsToUpdate.
size(); i < e; i++)
2739 PredsToUpdate[i].first->setImm(PredsToUpdate[i].second);
2741 for (
unsigned i = 0, e = SubRegsToUpdate.
size(); i < e; i++)
2742 SubRegsToUpdate[i].first->setSubReg(SubRegsToUpdate[i].second);
2753 int64_t CmpMask, CmpValue;
2758 if (CmpValue || !CmpMask || SrcReg2)
2766 if (Opc == PPC::CMPLWI || Opc == PPC::CMPLDI)
2773 if (Subtarget.
isPPC64() && Opc == PPC::CMPWI)
2780 bool SrcRegHasOtherUse =
false;
2787 if (CRReg != PPC::CR0)
2791 bool SeenUseOfCRReg =
false;
2792 bool IsCRRegKilled =
false;
2793 if (!isRegElgibleForForwarding(RegMO, *SrcMI, CmpMI,
false, IsCRRegKilled,
2799 int NewOpC = PPC::getRecordFormOpcode(SrcMIOpc);
2813 "Record-form instruction does not define cr0?");
2827 OffsetIsScalable =
false;
2862 case PPC::DFSTOREf64:
2863 return FirstOpc == SecondOpc;
2869 return SecondOpc == PPC::STW || SecondOpc == PPC::STW8;
2876 int64_t OpOffset2,
bool OffsetIsScalable2,
unsigned ClusterSize,
2877 unsigned NumBytes)
const {
2883 "Only base registers and frame indices are supported.");
2888 if (ClusterSize > 2)
2902 unsigned FirstOpc = FirstLdSt.
getOpcode();
2903 unsigned SecondOpc = SecondLdSt.
getOpcode();
2915 int64_t Offset1 = 0, Offset2 = 0;
2923 assert(Base1 == &BaseOp1 && Base2 == &BaseOp2 &&
2924 "getMemOperandWithOffsetWidth return incorrect base op");
2926 assert(Offset1 <= Offset2 &&
"Caller should have ordered offsets.");
2927 return Offset1 + (int64_t)Width1.
getValue() == Offset2;
2934 unsigned Opcode =
MI.getOpcode();
2936 if (Opcode == PPC::INLINEASM || Opcode == PPC::INLINEASM_BR) {
2938 const char *AsmStr =
MI.getOperand(0).getSymbolName();
2940 }
else if (Opcode == TargetOpcode::STACKMAP) {
2943 }
else if (Opcode == TargetOpcode::PATCHPOINT) {
2947 return get(Opcode).getSize();
2951std::pair<unsigned, unsigned>
2954 return std::make_pair(TF, 0u);
2959 using namespace PPCII;
2960 static const std::pair<unsigned, const char *> TargetFlags[] = {
2961 {MO_PLT,
"ppc-plt"},
2962 {MO_PIC_FLAG,
"ppc-pic"},
2963 {MO_PCREL_FLAG,
"ppc-pcrel"},
2964 {MO_GOT_FLAG,
"ppc-got"},
2965 {MO_PCREL_OPT_FLAG,
"ppc-opt-pcrel"},
2966 {MO_TLSGD_FLAG,
"ppc-tlsgd"},
2967 {MO_TPREL_FLAG,
"ppc-tprel"},
2968 {MO_TLSLDM_FLAG,
"ppc-tlsldm"},
2969 {MO_TLSLD_FLAG,
"ppc-tlsld"},
2970 {MO_TLSGDM_FLAG,
"ppc-tlsgdm"},
2971 {MO_GOT_TLSGD_PCREL_FLAG,
"ppc-got-tlsgd-pcrel"},
2972 {MO_GOT_TLSLD_PCREL_FLAG,
"ppc-got-tlsld-pcrel"},
2973 {MO_GOT_TPREL_PCREL_FLAG,
"ppc-got-tprel-pcrel"},
2976 {MO_TPREL_LO,
"ppc-tprel-lo"},
2977 {MO_TPREL_HA,
"ppc-tprel-ha"},
2978 {MO_DTPREL_LO,
"ppc-dtprel-lo"},
2979 {MO_TLSLD_LO,
"ppc-tlsld-lo"},
2980 {MO_TOC_LO,
"ppc-toc-lo"},
2981 {MO_TLS,
"ppc-tls"},
2982 {MO_PIC_HA_FLAG,
"ppc-ha-pic"},
2983 {MO_PIC_LO_FLAG,
"ppc-lo-pic"},
2984 {MO_TPREL_PCREL_FLAG,
"ppc-tprel-pcrel"},
2985 {MO_TLS_PCREL_FLAG,
"ppc-tls-pcrel"},
2986 {MO_GOT_PCREL_FLAG,
"ppc-got-pcrel"},
2998 unsigned UpperOpcode, LowerOpcode;
2999 switch (
MI.getOpcode()) {
3000 case PPC::DFLOADf32:
3001 UpperOpcode = PPC::LXSSP;
3002 LowerOpcode = PPC::LFS;
3004 case PPC::DFLOADf64:
3005 UpperOpcode = PPC::LXSD;
3006 LowerOpcode = PPC::LFD;
3008 case PPC::DFSTOREf32:
3009 UpperOpcode = PPC::STXSSP;
3010 LowerOpcode = PPC::STFS;
3012 case PPC::DFSTOREf64:
3013 UpperOpcode = PPC::STXSD;
3014 LowerOpcode = PPC::STFD;
3016 case PPC::XFLOADf32:
3017 UpperOpcode = PPC::LXSSPX;
3018 LowerOpcode = PPC::LFSX;
3020 case PPC::XFLOADf64:
3021 UpperOpcode = PPC::LXSDX;
3022 LowerOpcode = PPC::LFDX;
3024 case PPC::XFSTOREf32:
3025 UpperOpcode = PPC::STXSSPX;
3026 LowerOpcode = PPC::STFSX;
3028 case PPC::XFSTOREf64:
3029 UpperOpcode = PPC::STXSDX;
3030 LowerOpcode = PPC::STFDX;
3033 UpperOpcode = PPC::LXSIWAX;
3034 LowerOpcode = PPC::LFIWAX;
3037 UpperOpcode = PPC::LXSIWZX;
3038 LowerOpcode = PPC::LFIWZX;
3041 UpperOpcode = PPC::STXSIWX;
3042 LowerOpcode = PPC::STFIWX;
3048 Register TargetReg =
MI.getOperand(0).getReg();
3050 if ((TargetReg >= PPC::F0 && TargetReg <= PPC::F31) ||
3051 (TargetReg >= PPC::VSL0 && TargetReg <= PPC::VSL31))
3052 Opcode = LowerOpcode;
3054 Opcode = UpperOpcode;
3055 MI.setDesc(
get(Opcode));
3064 auto &
MBB = *
MI.getParent();
3065 auto DL =
MI.getDebugLoc();
3067 switch (
MI.getOpcode()) {
3068 case PPC::BUILD_UACC: {
3071 if (ACC - PPC::ACC0 != UACC - PPC::UACC0) {
3072 MCRegister SrcVSR = PPC::VSL0 + (UACC - PPC::UACC0) * 4;
3073 MCRegister DstVSR = PPC::VSL0 + (ACC - PPC::ACC0) * 4;
3077 for (
int VecNo = 0; VecNo < 4; VecNo++)
3079 .addReg(SrcVSR + VecNo)
3087 case PPC::KILL_PAIR: {
3088 MI.setDesc(
get(PPC::UNENCODED_NOP));
3089 MI.removeOperand(1);
3090 MI.removeOperand(0);
3093 case TargetOpcode::LOAD_STACK_GUARD: {
3095 "Only Linux target is expected to contain LOAD_STACK_GUARD");
3096 const int64_t
Offset = Subtarget.
isPPC64() ? -0x7010 : -0x7008;
3097 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3098 MI.setDesc(
get(Subtarget.
isPPC64() ? PPC::LD : PPC::LWZ));
3104 case PPC::PPCLdFixedAddr: {
3106 "Only targets with Glibc expected to contain PPCLdFixedAddr");
3108 const unsigned Reg = Subtarget.
isPPC64() ? PPC::X13 : PPC::R2;
3109 MI.setDesc(
get(PPC::LWZ));
3111#undef PPC_LNX_FEATURE
3113#define PPC_LNX_DEFINE_OFFSETS
3114#include "llvm/TargetParser/PPCTargetParser.def"
3116 bool Is64 = Subtarget.
isPPC64();
3117 if (FAType == PPC_FAWORD_HWCAP) {
3119 Offset = Is64 ? PPC_HWCAP_OFFSET_LE64 : PPC_HWCAP_OFFSET_LE32;
3121 Offset = Is64 ? PPC_HWCAP_OFFSET_BE64 : PPC_HWCAP_OFFSET_BE32;
3122 }
else if (FAType == PPC_FAWORD_HWCAP2) {
3124 Offset = Is64 ? PPC_HWCAP2_OFFSET_LE64 : PPC_HWCAP2_OFFSET_LE32;
3126 Offset = Is64 ? PPC_HWCAP2_OFFSET_BE64 : PPC_HWCAP2_OFFSET_BE32;
3127 }
else if (FAType == PPC_FAWORD_CPUID) {
3129 Offset = Is64 ? PPC_CPUID_OFFSET_LE64 : PPC_CPUID_OFFSET_LE32;
3131 Offset = Is64 ? PPC_CPUID_OFFSET_BE64 : PPC_CPUID_OFFSET_BE32;
3133 assert(
Offset &&
"Do not know the offset for this fixed addr load");
3134 MI.removeOperand(1);
3140#define PPC_TGT_PARSER_UNDEF_MACROS
3141#include "llvm/TargetParser/PPCTargetParser.def"
3142#undef PPC_TGT_PARSER_UNDEF_MACROS
3144 case PPC::DFLOADf32:
3145 case PPC::DFLOADf64:
3146 case PPC::DFSTOREf32:
3147 case PPC::DFSTOREf64: {
3148 assert(Subtarget.hasP9Vector() &&
3149 "Invalid D-Form Pseudo-ops on Pre-P9 target.");
3152 "D-form op must have register and immediate operands");
3155 case PPC::XFLOADf32:
3156 case PPC::XFSTOREf32:
3160 assert(Subtarget.hasP8Vector() &&
3161 "Invalid X-Form Pseudo-ops on Pre-P8 target.");
3162 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3163 "X-form op must have register and register operands");
3166 case PPC::XFLOADf64:
3167 case PPC::XFSTOREf64: {
3168 assert(Subtarget.hasVSX() &&
3169 "Invalid X-Form Pseudo-ops on target that has no VSX.");
3170 assert(
MI.getOperand(2).isReg() &&
MI.getOperand(1).isReg() &&
3171 "X-form op must have register and register operands");
3174 case PPC::SPILLTOVSR_LD: {
3175 Register TargetReg =
MI.getOperand(0).getReg();
3176 if (PPC::VSFRCRegClass.
contains(TargetReg)) {
3177 MI.setDesc(
get(PPC::DFLOADf64));
3181 MI.setDesc(
get(PPC::LD));
3184 case PPC::SPILLTOVSR_ST: {
3186 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3187 NumStoreSPILLVSRRCAsVec++;
3188 MI.setDesc(
get(PPC::DFSTOREf64));
3191 NumStoreSPILLVSRRCAsGpr++;
3192 MI.setDesc(
get(PPC::STD));
3196 case PPC::SPILLTOVSR_LDX: {
3197 Register TargetReg =
MI.getOperand(0).getReg();
3198 if (PPC::VSFRCRegClass.
contains(TargetReg))
3199 MI.setDesc(
get(PPC::LXSDX));
3201 MI.setDesc(
get(PPC::LDX));
3204 case PPC::SPILLTOVSR_STX: {
3206 if (PPC::VSFRCRegClass.
contains(SrcReg)) {
3207 NumStoreSPILLVSRRCAsVec++;
3208 MI.setDesc(
get(PPC::STXSDX));
3210 NumStoreSPILLVSRRCAsGpr++;
3211 MI.setDesc(
get(PPC::STDX));
3218 case PPC::CFENCE8: {
3219 auto Val =
MI.getOperand(0).getReg();
3220 unsigned CmpOp = Subtarget.
isPPC64() ? PPC::CMPD : PPC::CMPW;
3226 MI.setDesc(
get(PPC::ISYNC));
3227 MI.removeOperand(0);
3238static unsigned selectReg(int64_t Imm1, int64_t Imm2,
unsigned CompareOpc,
3239 unsigned TrueReg,
unsigned FalseReg,
3240 unsigned CRSubReg) {
3242 if (CompareOpc == PPC::CMPWI || CompareOpc == PPC::CMPDI) {
3246 return Imm1 < Imm2 ? TrueReg : FalseReg;
3248 return Imm1 > Imm2 ? TrueReg : FalseReg;
3250 return Imm1 == Imm2 ? TrueReg : FalseReg;
3254 else if (CompareOpc == PPC::CMPLWI || CompareOpc == PPC::CMPLDI) {
3262 return Imm1 == Imm2 ? TrueReg : FalseReg;
3265 return PPC::NoRegister;
3270 int64_t Imm)
const {
3271 assert(
MI.getOperand(OpNo).isReg() &&
"Operand must be a REG");
3273 Register InUseReg =
MI.getOperand(OpNo).getReg();
3274 MI.getOperand(OpNo).ChangeToImmediate(Imm);
3282 int UseOpIdx =
MI.findRegisterUseOperandIdx(InUseReg,
false,
TRI);
3283 if (UseOpIdx >= 0) {
3293 MI.removeOperand(UseOpIdx);
3302 int OperandToKeep = LII.
SetCR ? 1 : 0;
3303 for (
int i =
MI.getNumOperands() - 1; i > OperandToKeep; i--)
3304 MI.removeOperand(i);
3308 MI.setDesc(
get(LII.
Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3323 bool &SeenIntermediateUse)
const {
3324 assert(!
MI.getParent()->getParent()->getRegInfo().isSSA() &&
3325 "Should be called after register allocation.");
3329 SeenIntermediateUse =
false;
3330 for (; It !=
E; ++It) {
3331 if (It->modifiesRegister(Reg,
TRI))
3333 if (It->readsRegister(Reg,
TRI))
3334 SeenIntermediateUse =
true;
3342 int64_t Imm)
const {
3344 "Register should be in non-SSA form after RA");
3345 bool isPPC64 = Subtarget.
isPPC64();
3349 if (isInt<16>(Imm)) {
3351 }
else if (isInt<32>(Imm)) {
3359 assert(isPPC64 &&
"Materializing 64-bit immediate to single register is "
3360 "only supported in PPC64");
3362 if ((Imm >> 32) & 0xFFFF)
3365 .
addImm((Imm >> 32) & 0xFFFF);
3372 .
addImm((Imm >> 16) & 0xFFFF);
3382 unsigned &OpNoForForwarding,
3383 bool &SeenIntermediateUse)
const {
3384 OpNoForForwarding = ~0U;
3392 for (
int i = 1, e =
MI.getNumOperands(); i < e; i++) {
3393 if (!
MI.getOperand(i).isReg())
3396 if (!Reg.isVirtual())
3401 if (DefMIForTrueReg->
getOpcode() == PPC::LI ||
3402 DefMIForTrueReg->
getOpcode() == PPC::LI8 ||
3403 DefMIForTrueReg->
getOpcode() == PPC::ADDI ||
3404 DefMIForTrueReg->
getOpcode() == PPC::ADDI8) {
3405 OpNoForForwarding = i;
3406 DefMI = DefMIForTrueReg;
3421 unsigned Opc =
MI.getOpcode();
3422 bool ConvertibleImmForm =
3423 Opc == PPC::CMPWI || Opc == PPC::CMPLWI || Opc == PPC::CMPDI ||
3424 Opc == PPC::CMPLDI || Opc == PPC::ADDI || Opc == PPC::ADDI8 ||
3425 Opc == PPC::ORI || Opc == PPC::ORI8 || Opc == PPC::XORI ||
3426 Opc == PPC::XORI8 || Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec ||
3427 Opc == PPC::RLDICL_32 || Opc == PPC::RLDICL_32_64 ||
3428 Opc == PPC::RLWINM || Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8 ||
3429 Opc == PPC::RLWINM8_rec;
3430 bool IsVFReg = (
MI.getNumOperands() &&
MI.getOperand(0).isReg())
3437 if ((Opc == PPC::OR || Opc == PPC::OR8) &&
3438 MI.getOperand(1).getReg() ==
MI.getOperand(2).getReg())
3440 for (
int i = 1, e =
MI.getNumOperands(); i <
e; i++) {
3442 SeenIntermediateUse =
false;
3456 case PPC::ADDItocL8:
3459 OpNoForForwarding = i;
3466 return OpNoForForwarding == ~0
U ? nullptr :
DefMI;
3469unsigned PPCInstrInfo::getSpillTarget()
const {
3472 bool IsP10Variant = Subtarget.isISA3_1() || Subtarget.pairedVectorMemops();
3473 return Subtarget.isISAFuture() ? 3 : IsP10Variant ?
3474 2 : Subtarget.hasP9Vector() ?
3513 bool PostRA = !
MRI->isSSA();
3519 unsigned ToBeDeletedReg = 0;
3520 int64_t OffsetImm = 0;
3521 unsigned XFormOpcode = 0;
3529 bool OtherIntermediateUse =
false;
3533 if (OtherIntermediateUse || !ADDMI)
3540 unsigned ScaleRegIdx = 0;
3541 int64_t OffsetAddi = 0;
3555 assert(ADDIMI &&
"There should be ADDIMI for valid ToBeChangedReg.");
3560 for (
auto It = ++Start; It !=
End; It++)
3569 (ScaleReg == PPC::R0 || ScaleReg == PPC::X0))
3574 if (NewDefFor(ToBeChangedReg, *ADDMI,
MI) || NewDefFor(ScaleReg, *ADDMI,
MI))
3590 MI.setDesc(
get(XFormOpcode));
3592 .ChangeToRegister(ScaleReg,
false,
false,
3596 .ChangeToRegister(ToBeChangedReg,
false,
false,
true);
3608 int64_t &Imm)
const {
3612 if (Opc != PPC::ADDI && Opc != PPC::ADDI8)
3628 return Opc == PPC::ADD4 || Opc == PPC::ADD8;
3632 unsigned &ToBeDeletedReg,
3633 unsigned &XFormOpcode,
3637 if (!
MI.mayLoadOrStore())
3640 unsigned Opc =
MI.getOpcode();
3645 if (XFormOpcode == PPC::INSTRUCTION_LIST_END)
3659 if (!ImmOperand.
isImm())
3662 assert(RegOperand.
isReg() &&
"Instruction format is not right");
3665 if (!RegOperand.
isKill())
3668 ToBeDeletedReg = RegOperand.
getReg();
3669 OffsetImm = ImmOperand.
getImm();
3676 int64_t &OffsetAddi,
3677 int64_t OffsetImm)
const {
3684 bool OtherIntermediateUse =
false;
3705 if (OtherIntermediateUse || !ADDIMI)
3711 if (isInt<16>(OffsetAddi + OffsetImm))
3724 bool PostRA = !
MRI->isSSA();
3725 bool SeenIntermediateUse =
true;
3726 unsigned ForwardingOperand = ~0U;
3728 SeenIntermediateUse);
3731 assert(ForwardingOperand <
MI.getNumOperands() &&
3732 "The forwarding operand needs to be valid at this point");
3733 bool IsForwardingOperandKilled =
MI.getOperand(ForwardingOperand).isKill();
3734 bool KillFwdDefMI = !SeenIntermediateUse && IsForwardingOperandKilled;
3735 if (KilledDef && KillFwdDefMI)
3750 PPC::INSTRUCTION_LIST_END &&
3751 transformToNewImmFormFedByAdd(
MI, *
DefMI, ForwardingOperand))
3755 bool IsVFReg =
MI.getOperand(0).isReg()
3763 transformToImmFormFedByAdd(
MI, III, ForwardingOperand, *
DefMI,
3770 transformToImmFormFedByLI(
MI, III, ForwardingOperand, *
DefMI))
3775 if (!HasImmForm && simplifyToLI(
MI, *
DefMI, ForwardingOperand, KilledDef))
3784 Register FoldingReg =
MI.getOperand(1).getReg();
3788 if (SrcMI->
getOpcode() != PPC::RLWINM &&
3789 SrcMI->
getOpcode() != PPC::RLWINM_rec &&
3793 assert((
MI.getOperand(2).isImm() &&
MI.getOperand(3).isImm() &&
3796 "Invalid PPC::RLWINM Instruction!");
3804 assert((MEMI < 32 && MESrc < 32 && MBMI < 32 && MBSrc < 32) &&
3805 "Invalid PPC::RLWINM Instruction!");
3827 bool SrcMaskFull = (MBSrc - MESrc == 1) || (MBSrc == 0 && MESrc == 31);
3830 if ((MBMI > MEMI) && !SrcMaskFull)
3840 APInt RotatedSrcMask = MaskSrc.
rotl(SHMI);
3841 APInt FinalMask = RotatedSrcMask & MaskMI;
3843 bool Simplified =
false;
3846 if (FinalMask.
isZero()) {
3848 (
MI.getOpcode() == PPC::RLWINM8 ||
MI.getOpcode() == PPC::RLWINM8_rec);
3853 if (
MI.getOpcode() == PPC::RLWINM ||
MI.getOpcode() == PPC::RLWINM8) {
3855 MI.removeOperand(4);
3856 MI.removeOperand(3);
3857 MI.removeOperand(2);
3858 MI.getOperand(1).ChangeToImmediate(0);
3859 MI.setDesc(
get(Is64Bit ? PPC::LI8 : PPC::LI));
3862 MI.removeOperand(4);
3863 MI.removeOperand(3);
3864 MI.getOperand(2).setImm(0);
3865 MI.setDesc(
get(Is64Bit ? PPC::ANDI8_rec : PPC::ANDI_rec));
3868 MI.getOperand(1).setIsKill(
true);
3872 MI.getOperand(1).setIsKill(
false);
3888 uint16_t NewSH = (SHSrc + SHMI) % 32;
3889 MI.getOperand(2).setImm(NewSH);
3892 MI.getOperand(3).setImm(NewMB);
3893 MI.getOperand(4).setImm(NewME);
3897 MI.getOperand(1).setIsKill(
true);
3901 MI.getOperand(1).setIsKill(
false);
3906 if (Simplified &
MRI->use_nodbg_empty(FoldingReg) &&
3931 default:
return false;
3939 III.
ImmOpcode = Opc == PPC::ADD4 ? PPC::ADDI : PPC::ADDI8;
3948 III.
ImmOpcode = Opc == PPC::ADDC ? PPC::ADDIC : PPC::ADDIC8;
3964 III.
ImmOpcode = Opc == PPC::SUBFC ? PPC::SUBFIC : PPC::SUBFIC8;
3972 III.
ImmOpcode = Opc == PPC::CMPW ? PPC::CMPWI : PPC::CMPDI;
3980 III.
ImmOpcode = Opc == PPC::CMPLW ? PPC::CMPLWI : PPC::CMPLDI;
4000 case PPC::OR: III.
ImmOpcode = PPC::ORI;
break;
4001 case PPC::OR8: III.
ImmOpcode = PPC::ORI8;
break;
4002 case PPC::XOR: III.
ImmOpcode = PPC::XORI;
break;
4003 case PPC::XOR8: III.
ImmOpcode = PPC::XORI8;
break;
4008 case PPC::RLWNM_rec:
4009 case PPC::RLWNM8_rec:
4029 if (Opc == PPC::RLWNM || Opc == PPC::RLWNM8 || Opc == PPC::RLWNM_rec ||
4030 Opc == PPC::RLWNM8_rec)
4036 case PPC::RLWNM: III.
ImmOpcode = PPC::RLWINM;
break;
4037 case PPC::RLWNM8: III.
ImmOpcode = PPC::RLWINM8;
break;
4038 case PPC::RLWNM_rec:
4041 case PPC::RLWNM8_rec:
4044 case PPC::SLW: III.
ImmOpcode = PPC::RLWINM;
break;
4045 case PPC::SLW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4052 case PPC::SRW: III.
ImmOpcode = PPC::RLWINM;
break;
4053 case PPC::SRW8: III.
ImmOpcode = PPC::RLWINM8;
break;
4073 case PPC::RLDCL_rec:
4075 case PPC::RLDCR_rec:
4091 if (Opc == PPC::RLDCL || Opc == PPC::RLDCL_rec || Opc == PPC::RLDCR ||
4092 Opc == PPC::RLDCR_rec)
4098 case PPC::RLDCL: III.
ImmOpcode = PPC::RLDICL;
break;
4099 case PPC::RLDCL_rec:
4102 case PPC::RLDCR: III.
ImmOpcode = PPC::RLDICR;
break;
4103 case PPC::RLDCR_rec:
4106 case PPC::SLD: III.
ImmOpcode = PPC::RLDICR;
break;
4110 case PPC::SRD: III.
ImmOpcode = PPC::RLDICL;
break;
4157 case PPC::LBZX: III.
ImmOpcode = PPC::LBZ;
break;
4158 case PPC::LBZX8: III.
ImmOpcode = PPC::LBZ8;
break;
4159 case PPC::LHZX: III.
ImmOpcode = PPC::LHZ;
break;
4160 case PPC::LHZX8: III.
ImmOpcode = PPC::LHZ8;
break;
4161 case PPC::LHAX: III.
ImmOpcode = PPC::LHA;
break;
4162 case PPC::LHAX8: III.
ImmOpcode = PPC::LHA8;
break;
4163 case PPC::LWZX: III.
ImmOpcode = PPC::LWZ;
break;
4164 case PPC::LWZX8: III.
ImmOpcode = PPC::LWZ8;
break;
4170 case PPC::LFSX: III.
ImmOpcode = PPC::LFS;
break;
4171 case PPC::LFDX: III.
ImmOpcode = PPC::LFD;
break;
4172 case PPC::STBX: III.
ImmOpcode = PPC::STB;
break;
4173 case PPC::STBX8: III.
ImmOpcode = PPC::STB8;
break;
4174 case PPC::STHX: III.
ImmOpcode = PPC::STH;
break;
4175 case PPC::STHX8: III.
ImmOpcode = PPC::STH8;
break;
4176 case PPC::STWX: III.
ImmOpcode = PPC::STW;
break;
4177 case PPC::STWX8: III.
ImmOpcode = PPC::STW8;
break;
4182 case PPC::STFSX: III.
ImmOpcode = PPC::STFS;
break;
4183 case PPC::STFDX: III.
ImmOpcode = PPC::STFD;
break;
4215 case PPC::LBZUX: III.
ImmOpcode = PPC::LBZU;
break;
4216 case PPC::LBZUX8: III.
ImmOpcode = PPC::LBZU8;
break;
4217 case PPC::LHZUX: III.
ImmOpcode = PPC::LHZU;
break;
4218 case PPC::LHZUX8: III.
ImmOpcode = PPC::LHZU8;
break;
4219 case PPC::LHAUX: III.
ImmOpcode = PPC::LHAU;
break;
4220 case PPC::LHAUX8: III.
ImmOpcode = PPC::LHAU8;
break;
4221 case PPC::LWZUX: III.
ImmOpcode = PPC::LWZU;
break;
4222 case PPC::LWZUX8: III.
ImmOpcode = PPC::LWZU8;
break;
4227 case PPC::LFSUX: III.
ImmOpcode = PPC::LFSU;
break;
4228 case PPC::LFDUX: III.
ImmOpcode = PPC::LFDU;
break;
4229 case PPC::STBUX: III.
ImmOpcode = PPC::STBU;
break;
4230 case PPC::STBUX8: III.
ImmOpcode = PPC::STBU8;
break;
4231 case PPC::STHUX: III.
ImmOpcode = PPC::STHU;
break;
4232 case PPC::STHUX8: III.
ImmOpcode = PPC::STHU8;
break;
4233 case PPC::STWUX: III.
ImmOpcode = PPC::STWU;
break;
4234 case PPC::STWUX8: III.
ImmOpcode = PPC::STWU8;
break;
4239 case PPC::STFSUX: III.
ImmOpcode = PPC::STFSU;
break;
4240 case PPC::STFDUX: III.
ImmOpcode = PPC::STFDU;
break;
4253 case PPC::XFLOADf32:
4254 case PPC::XFLOADf64:
4255 case PPC::XFSTOREf32:
4256 case PPC::XFSTOREf64:
4257 if (!Subtarget.hasP9Vector())
4284 case PPC::XFLOADf32:
4298 case PPC::XFLOADf64:
4316 case PPC::XFSTOREf32:
4330 case PPC::XFSTOREf64:
4341 assert(Op1 != Op2 &&
"Cannot swap operand with itself.");
4343 unsigned MaxOp = std::max(Op1, Op2);
4344 unsigned MinOp = std::min(Op1, Op2);
4347 MI.removeOperand(std::max(Op1, Op2));
4348 MI.removeOperand(std::min(Op1, Op2));
4352 if (MaxOp - MinOp == 1 &&
MI.getNumOperands() == MinOp) {
4353 MI.addOperand(MOp2);
4354 MI.addOperand(MOp1);
4359 unsigned TotalOps =
MI.getNumOperands() + 2;
4360 for (
unsigned i =
MI.getNumOperands() - 1; i >= MinOp; i--) {
4362 MI.removeOperand(i);
4365 MI.addOperand(MOp2);
4367 for (
unsigned i =
MI.getNumOperands(); i < TotalOps; i++) {
4369 MI.addOperand(MOp1);
4371 MI.addOperand(MOps.
back());
4382 unsigned OpNoForForwarding
4422 unsigned Opc =
DefMI.getOpcode();
4423 if (Opc != PPC::ADDItocL8 && Opc != PPC::ADDI && Opc != PPC::ADDI8)
4427 "Add inst must have at least three operands");
4428 RegMO = &
DefMI.getOperand(1);
4429 ImmMO = &
DefMI.getOperand(2);
4432 if (!RegMO->
isReg())
4441bool PPCInstrInfo::isRegElgibleForForwarding(
4444 bool &IsFwdFeederRegKilled,
bool &SeenIntermediateUse)
const {
4461 for (; It !=
E; ++It) {
4465 IsFwdFeederRegKilled =
true;
4467 SeenIntermediateUse =
true;
4469 if ((&*It) == &
DefMI)
4482bool PPCInstrInfo::isImmElgibleForForwarding(
const MachineOperand &ImmMO,
4486 int64_t BaseImm)
const {
4488 if (
DefMI.getOpcode() == PPC::ADDItocL8) {
4509 if (ImmMO.
isImm()) {
4514 APInt ActualValue(64, ImmMO.
getImm() + BaseImm,
true);
4519 Imm = SignExtend64<16>(ImmMO.
getImm() + BaseImm);
4535 unsigned OpNoForForwarding,
4537 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4538 !
DefMI.getOperand(1).isImm())
4545 int64_t Immediate =
DefMI.getOperand(1).getImm();
4547 int64_t SExtImm = SignExtend64<16>(Immediate);
4549 bool ReplaceWithLI =
false;
4550 bool Is64BitLI =
false;
4553 unsigned Opc =
MI.getOpcode();
4574 bool Changed =
false;
4576 int64_t Comparand =
MI.getOperand(2).getImm();
4577 int64_t SExtComparand = ((
uint64_t)Comparand & ~0x7FFFuLL) != 0
4578 ? (Comparand | 0xFFFFFFFFFFFF0000)
4581 for (
auto &CompareUseMI :
MRI->use_instructions(DefReg)) {
4582 unsigned UseOpc = CompareUseMI.getOpcode();
4583 if (UseOpc != PPC::ISEL && UseOpc != PPC::ISEL8)
4585 unsigned CRSubReg = CompareUseMI.getOperand(3).getSubReg();
4586 Register TrueReg = CompareUseMI.getOperand(1).getReg();
4587 Register FalseReg = CompareUseMI.getOperand(2).getReg();
4588 unsigned RegToCopy =
4589 selectReg(SExtImm, SExtComparand, Opc, TrueReg, FalseReg, CRSubReg);
4590 if (RegToCopy == PPC::NoRegister)
4593 if (RegToCopy == PPC::ZERO || RegToCopy == PPC::ZERO8) {
4594 CompareUseMI.setDesc(
get(UseOpc == PPC::ISEL8 ? PPC::LI8 : PPC::LI));
4596 CompareUseMI.removeOperand(3);
4597 CompareUseMI.removeOperand(2);
4601 dbgs() <<
"Found LI -> CMPI -> ISEL, replacing with a copy.\n");
4605 CompareUseMI.setDesc(
get(PPC::COPY));
4606 CompareUseMI.removeOperand(3);
4607 CompareUseMI.removeOperand(RegToCopy == TrueReg ? 2 : 1);
4608 CmpIselsConverted++;
4617 MissedConvertibleImmediateInstrs++;
4625 int64_t Addend =
MI.getOperand(2).getImm();
4626 if (isInt<16>(Addend + SExtImm)) {
4627 ReplaceWithLI =
true;
4628 Is64BitLI = Opc == PPC::ADDI8;
4629 NewImm = Addend + SExtImm;
4635 case PPC::SUBFIC8: {
4637 if (
MI.getNumOperands() > 3 && !
MI.getOperand(3).isDead())
4639 int64_t Minuend =
MI.getOperand(2).getImm();
4640 if (isInt<16>(Minuend - SExtImm)) {
4641 ReplaceWithLI =
true;
4642 Is64BitLI = Opc == PPC::SUBFIC8;
4643 NewImm = Minuend - SExtImm;
4649 case PPC::RLDICL_rec:
4650 case PPC::RLDICL_32:
4651 case PPC::RLDICL_32_64: {
4653 int64_t SH =
MI.getOperand(2).getImm();
4654 int64_t MB =
MI.getOperand(3).getImm();
4655 APInt InVal((Opc == PPC::RLDICL || Opc == PPC::RLDICL_rec) ? 64 : 32,
4657 InVal = InVal.rotl(SH);
4663 if (isUInt<15>(InVal.getSExtValue()) ||
4664 (Opc == PPC::RLDICL_rec && isUInt<16>(InVal.getSExtValue()))) {
4665 ReplaceWithLI =
true;
4666 Is64BitLI = Opc != PPC::RLDICL_32;
4667 NewImm = InVal.getSExtValue();
4668 SetCR = Opc == PPC::RLDICL_rec;
4675 case PPC::RLWINM_rec:
4676 case PPC::RLWINM8_rec: {
4677 int64_t SH =
MI.getOperand(2).getImm();
4678 int64_t MB =
MI.getOperand(3).getImm();
4679 int64_t ME =
MI.getOperand(4).getImm();
4680 APInt InVal(32, SExtImm,
true);
4681 InVal = InVal.rotl(SH);
4687 bool ValueFits = isUInt<15>(InVal.getSExtValue());
4688 ValueFits |= ((Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec) &&
4689 isUInt<16>(InVal.getSExtValue()));
4691 ReplaceWithLI =
true;
4692 Is64BitLI = Opc == PPC::RLWINM8 || Opc == PPC::RLWINM8_rec;
4693 NewImm = InVal.getSExtValue();
4694 SetCR = Opc == PPC::RLWINM_rec || Opc == PPC::RLWINM8_rec;
4703 int64_t LogicalImm =
MI.getOperand(2).getImm();
4705 if (Opc == PPC::ORI || Opc == PPC::ORI8)
4706 Result = LogicalImm | SExtImm;
4708 Result = LogicalImm ^ SExtImm;
4709 if (isInt<16>(Result)) {
4710 ReplaceWithLI =
true;
4711 Is64BitLI = Opc == PPC::ORI8 || Opc == PPC::XORI8;
4719 if (ReplaceWithLI) {
4724 bool ImmChanged = (SExtImm & NewImm) != NewImm;
4725 if (PostRA && ImmChanged)
4732 DefMI.getOperand(1).setImm(NewImm);
4736 else if (
MRI->use_empty(
MI.getOperand(0).getReg())) {
4738 assert(Immediate &&
"Transformation converted zero to non-zero?");
4741 }
else if (ImmChanged)
4756 if (KilledDef && SetCR)
4757 *KilledDef =
nullptr;
4770bool PPCInstrInfo::transformToNewImmFormFedByAdd(
4780 if (!
MI.mayLoadOrStore())
4785 assert((XFormOpcode != PPC::INSTRUCTION_LIST_END) &&
4786 "MI must have x-form opcode");
4790 bool IsVFReg =
MI.getOperand(0).isReg()
4804 if (!ImmOperandMI.
isImm())
4810 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4812 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4817 int64_t ImmBase = ImmOperandMI.
getImm();
4819 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm, ImmBase))
4823 LLVM_DEBUG(
dbgs() <<
"Replacing existing reg+imm instruction:\n");
4840bool PPCInstrInfo::transformToImmFormFedByAdd(
4850 if (!isUseMIElgibleForForwarding(
MI, III, OpNoForForwarding))
4857 if (!isDefMIElgibleForForwarding(
DefMI, III, ImmMO, RegMO))
4859 assert(ImmMO && RegMO &&
"Imm and Reg operand must have been set");
4864 if (!isImmElgibleForForwarding(*ImmMO,
DefMI, III, Imm))
4867 bool IsFwdFeederRegKilled =
false;
4868 bool SeenIntermediateUse =
false;
4870 if (!isRegElgibleForForwarding(*RegMO,
DefMI,
MI, KillDefMI,
4871 IsFwdFeederRegKilled, SeenIntermediateUse))
4891 if (ImmMO->
isImm()) {
4902 if (
DefMI.getOpcode() == PPC::ADDItocL8)
4912 MI.removeOperand(i);
4918 MI.addOperand(*ImmMO);
4920 for (
auto &MO : MOps)
4937 unsigned ConstantOpNo,
4940 if ((
DefMI.getOpcode() != PPC::LI &&
DefMI.getOpcode() != PPC::LI8) ||
4941 !
DefMI.getOperand(1).isImm())
4945 int64_t
Imm = SignExtend64<16>(
DefMI.getOperand(1).getImm());
4957 APInt ActualValue(64, Imm,
true);
4958 if (!ActualValue.isSignedIntN(III.
ImmWidth))
4972 Register OrigZeroReg =
MI.getOperand(PosForOrigZero).getReg();
4976 if ((NewZeroReg == PPC::R0 || NewZeroReg == PPC::X0) &&
4979 if ((OrigZeroReg == PPC::R0 || OrigZeroReg == PPC::X0) &&
4980 ConstantOpNo != PosForOrigZero)
4984 unsigned Opc =
MI.getOpcode();
4985 bool SpecialShift32 = Opc == PPC::SLW || Opc == PPC::SLW_rec ||
4986 Opc == PPC::SRW || Opc == PPC::SRW_rec ||
4987 Opc == PPC::SLW8 || Opc == PPC::SLW8_rec ||
4988 Opc == PPC::SRW8 || Opc == PPC::SRW8_rec;
4989 bool SpecialShift64 = Opc == PPC::SLD || Opc == PPC::SLD_rec ||
4990 Opc == PPC::SRD || Opc == PPC::SRD_rec;
4991 bool SetCR = Opc == PPC::SLW_rec || Opc == PPC::SRW_rec ||
4992 Opc == PPC::SLD_rec || Opc == PPC::SRD_rec;
4993 bool RightShift = Opc == PPC::SRW || Opc == PPC::SRW_rec || Opc == PPC::SRD ||
4994 Opc == PPC::SRD_rec;
5008 if (SpecialShift32 || SpecialShift64) {
5013 uint64_t ShAmt =
Imm & (SpecialShift32 ? 0x1F : 0x3F);
5014 if (Imm & (SpecialShift32 ? 0x20 : 0x40))
5019 else if (!SetCR && ShAmt == 0 && !PostRA) {
5020 MI.removeOperand(2);
5021 MI.setDesc(
get(PPC::COPY));
5024 if (SpecialShift32) {
5070 MRI.getRegClass(RegToModify)->hasSuperClassEq(&PPC::GPRCRegClass) ?
5071 &PPC::GPRC_and_GPRC_NOR0RegClass : &PPC::G8RC_and_G8RC_NOX0RegClass;
5072 MRI.setRegClass(RegToModify, NewRC);
5088 if (Subtarget.hasVSX() && RC == &PPC::VRRCRegClass)
5089 return &PPC::VSRCRegClass;
5094 return PPC::getRecordFormOpcode(Opcode);
5098 return (Opcode == PPC::LBZU || Opcode == PPC::LBZUX || Opcode == PPC::LBZU8 ||
5099 Opcode == PPC::LBZUX8 || Opcode == PPC::LHZU ||
5100 Opcode == PPC::LHZUX || Opcode == PPC::LHZU8 ||
5101 Opcode == PPC::LHZUX8);
5114 int Opcode =
MI->getOpcode();
5117 if (
TII->isSExt32To64(Opcode))
5126 if (Opcode == PPC::RLDICL &&
MI->getOperand(3).getImm() >= 33)
5132 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5133 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec) &&
5134 MI->getOperand(3).getImm() > 0 &&
5135 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5140 if (Opcode == PPC::ANDIS_rec || Opcode == PPC::ANDIS8_rec) {
5142 if ((Imm & 0x8000) == 0)
5161 int Opcode =
MI->getOpcode();
5164 if (
TII->isZExt32To64(Opcode))
5169 Opcode == PPC::LWZUX || Opcode == PPC::LWZU8 || Opcode == PPC::LWZUX8) &&
5170 MI->getOperand(0).getReg() == Reg)
5175 if (Opcode == PPC::LI || Opcode == PPC::LI8 ||
5176 Opcode == PPC::LIS || Opcode == PPC::LIS8) {
5177 int64_t Imm =
MI->getOperand(1).getImm();
5178 if (((
uint64_t)Imm & ~0x7FFFuLL) == 0)
5184 if ((Opcode == PPC::RLDICL || Opcode == PPC::RLDICL_rec ||
5185 Opcode == PPC::RLDCL || Opcode == PPC::RLDCL_rec ||
5186 Opcode == PPC::RLDICL_32_64) &&
5187 MI->getOperand(3).getImm() >= 32)
5190 if ((Opcode == PPC::RLDIC || Opcode == PPC::RLDIC_rec) &&
5191 MI->getOperand(3).getImm() >= 32 &&
5192 MI->getOperand(3).getImm() <= 63 -
MI->getOperand(2).getImm())
5195 if ((Opcode == PPC::RLWINM || Opcode == PPC::RLWINM_rec ||
5196 Opcode == PPC::RLWNM || Opcode == PPC::RLWNM_rec ||
5197 Opcode == PPC::RLWINM8 || Opcode == PPC::RLWNM8) &&
5198 MI->getOperand(3).getImm() <=
MI->getOperand(4).getImm())
5207 if (!
MI.getOperand(1).isImm() || !
MI.getOperand(2).isReg())
5211 Register StackReg =
MI.getOperand(2).getReg();
5213 if (StackReg == SPReg &&
StackOffset == TOCSaveOffset)
5227std::pair<bool, bool>
5229 const unsigned BinOpDepth,
5232 return std::pair<bool, bool>(
false,
false);
5236 return std::pair<bool, bool>(
false,
false);
5243 if (IsSExt && IsZExt)
5244 return std::pair<bool, bool>(IsSExt, IsZExt);
5246 switch (
MI->getOpcode()) {
5248 Register SrcReg =
MI->getOperand(1).getReg();
5257 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5258 SrcExt.second || IsZExt);
5264 if (
MI->getParent()->getBasicBlock() ==
5270 return std::pair<bool, bool>(IsSExt, IsZExt);
5274 if (SrcReg != PPC::X3) {
5277 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5278 SrcExt.second || IsZExt);
5288 std::pair<bool, bool> IsExtendPair = std::pair<bool, bool>(IsSExt, IsZExt);
5291 if (II ==
MBB->
instr_begin() || (--II)->getOpcode() != PPC::ADJCALLSTACKUP)
5292 return IsExtendPair;
5296 return IsExtendPair;
5301 return IsExtendPair;
5305 IsSExt |= Attrs.hasAttribute(Attribute::SExt);
5306 IsZExt |= Attrs.hasAttribute(Attribute::ZExt);
5307 return std::pair<bool, bool>(IsSExt, IsZExt);
5310 return IsExtendPair;
5319 Register SrcReg =
MI->getOperand(1).getReg();
5321 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5322 SrcExt.second || IsZExt);
5333 Register SrcReg =
MI->getOperand(1).getReg();
5337 return std::pair<bool, bool>(
false, SrcExt.second || IsZExt);
5339 return std::pair<bool, bool>(SrcExt.first || IsSExt,
5340 SrcExt.second || IsZExt);
5350 return std::pair<bool, bool>(
false,
false);
5354 unsigned OperandEnd = 3, OperandStride = 1;
5355 if (
MI->getOpcode() == PPC::PHI) {
5356 OperandEnd =
MI->getNumOperands();
5362 for (
unsigned I = 1;
I != OperandEnd;
I += OperandStride) {
5363 if (!
MI->getOperand(
I).isReg())
5364 return std::pair<bool, bool>(
false,
false);
5368 IsSExt &= SrcExt.first;
5369 IsZExt &= SrcExt.second;
5371 return std::pair<bool, bool>(IsSExt, IsZExt);
5380 return std::pair<bool, bool>(
false,
false);
5382 Register SrcReg1 =
MI->getOperand(1).getReg();
5383 Register SrcReg2 =
MI->getOperand(2).getReg();
5386 return std::pair<bool, bool>(Src1Ext.first && Src2Ext.first,
5387 Src1Ext.second || Src2Ext.second);
5393 return std::pair<bool, bool>(IsSExt, IsZExt);
5397 return (Opcode == (Subtarget.
isPPC64() ? PPC::BDNZ8 : PPC::BDNZ));
5410 :
Loop(
Loop), EndLoop(EndLoop), LoopCount(LoopCount),
5412 TII(MF->getSubtarget().getInstrInfo()) {
5421 bool shouldIgnoreForPipelining(
const MachineInstr *
MI)
const override {
5423 return MI == EndLoop;
5426 std::optional<bool> createTripCountGreaterCondition(
5429 if (TripCount == -1) {
5439 return TripCount > TC;
5447 void adjustTripCount(
int TripCountAdjust)
override {
5450 if (LoopCount->
getOpcode() == PPC::LI8 ||
5461 void disposed()
override {
5462 Loop->eraseFromParent();
5469std::unique_ptr<TargetInstrInfo::PipelinerLoopInfo>
5474 if (Preheader == LoopBB)
5475 Preheader = *std::next(LoopBB->
pred_begin());
5478 if (
I != LoopBB->
end() &&
isBDNZ(
I->getOpcode())) {
5481 Register LoopCountReg = LoopInst->getOperand(0).getReg();
5484 return std::make_unique<PPCPipelinerLoopInfo>(LoopInst, &*
I, LoopCount);
5494 unsigned LOOPi = (Subtarget.
isPPC64() ? PPC::MTCTR8loop : PPC::MTCTRloop);
5497 for (
auto &
I : PreHeader.
instrs())
5498 if (
I.getOpcode() == LOOPi)
5544 int64_t OffsetA = 0, OffsetB = 0;
5549 int LowOffset = std::min(OffsetA, OffsetB);
5550 int HighOffset = std::max(OffsetA, OffsetB);
5551 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
5553 LowOffset + (int)LowWidth.
getValue() <= HighOffset)
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
static const Function * getParent(const Value *V)
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
const HexagonInstrInfo * TII
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
unsigned const TargetRegisterInfo * TRI
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isOpZeroOfSubwordPreincLoad(int Opcode)
static bool MBBDefinesCTR(MachineBasicBlock &MBB)
static bool definedByZeroExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< float > FMARPFactor("ppc-fma-rp-factor", cl::Hidden, cl::init(1.5), cl::desc("register pressure factor for the transformations."))
#define InfoArrayIdxMULOpIdx
static unsigned selectReg(int64_t Imm1, int64_t Imm2, unsigned CompareOpc, unsigned TrueReg, unsigned FalseReg, unsigned CRSubReg)
static unsigned getCRBitValue(unsigned CRBit)
static bool isAnImmediateOperand(const MachineOperand &MO)
static const uint16_t FMAOpIdxInfo[][6]
static cl::opt< bool > DisableCTRLoopAnal("disable-ppc-ctrloop-analysis", cl::Hidden, cl::desc("Disable analysis for CTR loops"))
#define InfoArrayIdxAddOpIdx
static cl::opt< bool > UseOldLatencyCalc("ppc-old-latency-calc", cl::Hidden, cl::desc("Use the old (incorrect) instruction latency calculation"))
#define InfoArrayIdxFMAInst
static bool isClusterableLdStOpcPair(unsigned FirstOpc, unsigned SecondOpc, const PPCSubtarget &Subtarget)
static cl::opt< bool > EnableFMARegPressureReduction("ppc-fma-rp-reduction", cl::Hidden, cl::init(true), cl::desc("enable register pressure reduce in machine combiner pass."))
static bool isLdStSafeToCluster(const MachineInstr &LdSt, const TargetRegisterInfo *TRI)
const unsigned MAX_BINOP_DEPTH
static cl::opt< bool > DisableCmpOpt("disable-ppc-cmp-opt", cl::desc("Disable compare instruction optimization"), cl::Hidden)
#define InfoArrayIdxFSubInst
#define InfoArrayIdxFAddInst
#define InfoArrayIdxFMULInst
static bool definedBySignExtendingOp(const unsigned Reg, const MachineRegisterInfo *MRI)
static cl::opt< bool > VSXSelfCopyCrash("crash-on-ppc-vsx-self-copy", cl::desc("Causes the backend to crash instead of generating a nop VSX copy"), cl::Hidden)
static void swapMIOperands(MachineInstr &MI, unsigned Op1, unsigned Op2)
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
static unsigned getSize(unsigned Kind)
Class for arbitrary precision integers.
uint64_t getZExtValue() const
Get zero extended value.
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
APInt rotl(unsigned rotateAmt) const
Rotate left by rotateAmt.
static APInt getBitsSetWithWrap(unsigned numBits, unsigned loBit, unsigned hiBit)
Wrap version of getBitsSet.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
const T & front() const
front - Get the first element.
size_t size() const
size - Get the array size.
AttributeSet getRetAttrs() const
The attributes for the ret value are returned.
This is an important base class in LLVM.
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Align getPrefTypeAlign(Type *Ty) const
Returns the preferred stack/global alignment for the specified type.
std::pair< iterator, bool > insert(const std::pair< KeyT, ValueT > &KV)
const BasicBlock & getEntryBlock() const
AttributeList getAttributes() const
Return the attribute list for this Function.
Type * getReturnType() const
Returns the type of the ret val.
A possibly irreducible generalization of a Loop.
Module * getParent()
Get the module that this global value is contained inside of...
Itinerary data supplied by a subtarget to be used by a target.
std::optional< unsigned > getOperandCycle(unsigned ItinClassIndx, unsigned OperandIdx) const
Return the cycle for the given class and operand.
Class to represent integer types.
unsigned getBitWidth() const
Get the number of bits in this IntegerType.
TypeSize getValue() const
Represents a single loop in the control flow graph.
Instances of this class represent a single low-level machine instruction.
void setOpcode(unsigned Op)
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
ArrayRef< MCPhysReg > implicit_uses() const
Return a list of registers that are potentially read by any instance of this machine instruction.
bool isPseudo() const
Return true if this is a pseudo instruction that doesn't correspond to a real machine instruction.
This holds information about one operand of a machine instruction, indicating the register class for ...
uint16_t Constraints
Operand constraints (see OperandConstraint enum).
bool isLookupPtrRegClass() const
Set if this operand is a pointer value and it requires a callback to look up its register class.
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Wrapper class representing physical registers. Should be passed by value.
instr_iterator instr_begin()
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
iterator getFirstTerminator()
Returns an iterator to the first terminator instruction of this basic block.
pred_iterator pred_begin()
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
Instructions::const_iterator const_instr_iterator
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
The MachineConstantPool class keeps track of constants referenced by a function which must be spilled...
const std::vector< MachineConstantPoolEntry > & getConstants() const
unsigned getConstantPoolIndex(const Constant *C, Align Alignment)
getConstantPoolIndex - Create a new entry in the constant pool or return an existing one.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
Align getObjectAlign(int ObjectIdx) const
Return the alignment of the specified stack object.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
MachineMemOperand * getMachineMemOperand(MachinePointerInfo PtrInfo, MachineMemOperand::Flags f, uint64_t s, Align base_alignment, const AAMDNodes &AAInfo=AAMDNodes(), const MDNode *Ranges=nullptr, SyncScope::ID SSID=SyncScope::System, AtomicOrdering Ordering=AtomicOrdering::NotAtomic, AtomicOrdering FailureOrdering=AtomicOrdering::NotAtomic)
getMachineMemOperand - Allocate a new MachineMemOperand.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
const DataLayout & getDataLayout() const
Return the DataLayout attached to the Module associated to this MF.
Function & getFunction()
Return the LLVM function that this machine code represents.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
MachineConstantPool * getConstantPool()
getConstantPool - Return the constant pool object for the current function.
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addConstantPoolIndex(unsigned Idx, int Offset=0, unsigned TargetFlags=0) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
Representation of each machine instruction.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
const MachineBasicBlock * getParent() const
bool isCall(QueryType Type=AnyInBundle) const
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
bool modifiesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr modifies (fully define or partially define) the specified register.
unsigned getNumExplicitOperands() const
Returns the number of non-implicit operands.
bool hasImplicitDef() const
Returns true if the instruction has implicit definition.
bool hasUnmodeledSideEffects() const
Return true if this instruction has side effects that are not modeled by mayLoad / mayStore,...
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
iterator_range< mop_iterator > operands()
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
bool hasOrderedMemoryRef() const
Return true if this instruction may have an ordered or volatile memory reference, or if the informati...
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr fully defines the specified register.
const MachineFunction * getMF() const
Return the function that contains the basic block that this instruction belongs to.
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
void clearRegisterDeads(Register Reg)
Clear all dead flags on operands defining register Reg.
const MachineOperand & getOperand(unsigned i) const
uint32_t getFlags() const
Return the MI flags bitvector.
A description of a memory reference used in the backend.
@ MOLoad
The memory access reads data.
@ MOStore
The memory access writes data.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
const GlobalValue * getGlobal() const
void setImm(int64_t immVal)
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isCPI() const
isCPI - Tests if this is a MO_ConstantPoolIndex operand.
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
MachineInstr * getParent()
getParent - Return the instruction that this operand belongs to.
static MachineOperand CreateImm(int64_t Val)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
Register getReg() const
getReg - Returns the register number.
void setTargetFlags(unsigned F)
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
bool isIdenticalTo(const MachineOperand &Other) const
Returns true if this operand is identical to the specified operand except for liveness related flags ...
static MachineOperand CreateReg(Register Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isEarlyClobber=false, unsigned SubReg=0, bool isDebug=false, bool isInternalRead=false, bool isRenamable=false)
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
defusechain_iterator - This class provides iterator support for machine operands in the function that...
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
bool isLiveIn(Register Reg) const
const DataLayout & getDataLayout() const
Get the data layout for the module's target platform.
PPCDispatchGroupSBHazardRecognizer - This class implements a scoreboard-based hazard recognizer for P...
uint64_t getTOCSaveOffset() const
getTOCSaveOffset - Return the previous frame offset to save the TOC register – 64-bit SVR4 ABI only.
PPCFunctionInfo - This class is derived from MachineFunction private PowerPC target-specific informat...
bool isLiveInSExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and sign-extended.
bool isLiveInZExt(Register VReg) const
This function returns true if the specified vreg is a live-in register and zero-extended.
PPCHazardRecognizer970 - This class defines a finite state automata that models the dispatch logic on...
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const override
PPCInstrInfo(PPCSubtarget &STI)
bool combineRLWINM(MachineInstr &MI, MachineInstr **ToErase=nullptr) const
bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const override
const TargetRegisterClass * updatedRC(const TargetRegisterClass *RC) const
bool isPredicated(const MachineInstr &MI) const override
bool expandVSXMemPseudo(MachineInstr &MI) const
bool onlyFoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg) const
MCInst getNop() const override
Return the noop instruction to use for a noop.
static int getRecordFormOpcode(unsigned Opcode)
bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &P, bool DoRegPressureReduce) const override
Return true when there is potentially a faster code sequence for an instruction chain ending in <Root...
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const override
Commutes the operands in the given instruction.
bool isXFormMemOp(unsigned Opcode) const
const PPCRegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
void loadRegFromStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool getFMAPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &P, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for a fma chain ending in Root.
unsigned getStoreOpcodeForSpill(const TargetRegisterClass *RC) const
unsigned getLoadOpcodeForSpill(const TargetRegisterClass *RC) const
bool isTOCSaveMI(const MachineInstr &MI) const
ScheduleHazardRecognizer * CreateTargetPostRAHazardRecognizer(const InstrItineraryData *II, const ScheduleDAG *DAG) const override
CreateTargetPostRAHazardRecognizer - Return the postRA hazard recognizer to use for this target when ...
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
bool isBDNZ(unsigned Opcode) const
Check Opcode is BDNZ (Decrement CTR and branch if it is still nonzero).
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
bool isZeroExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
std::pair< unsigned, unsigned > decomposeMachineOperandsTargetFlags(unsigned TF) const override
std::pair< bool, bool > isSignOrZeroExtended(const unsigned Reg, const unsigned BinOpDepth, const MachineRegisterInfo *MRI) const
bool expandPostRAPseudo(MachineInstr &MI) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
void insertNoop(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI) const override
bool isValidToBeChangedReg(MachineInstr *ADDMI, unsigned Index, MachineInstr *&ADDIMI, int64_t &OffsetAddi, int64_t OffsetImm) const
bool optimizeCompareInstr(MachineInstr &CmpInstr, Register SrcReg, Register SrcReg2, int64_t Mask, int64_t Value, const MachineRegisterInfo *MRI) const override
ArrayRef< std::pair< unsigned, const char * > > getSerializableDirectMachineOperandTargetFlags() const override
std::optional< unsigned > getOperandLatency(const InstrItineraryData *ItinData, const MachineInstr &DefMI, unsigned DefIdx, const MachineInstr &UseMI, unsigned UseIdx) const override
void materializeImmPostRA(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, Register Reg, int64_t Imm) const
bool isADDInstrEligibleForFolding(MachineInstr &ADDMI) const
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
Return true if two MIs access different memory addresses and false otherwise.
bool SubsumesPredicate(ArrayRef< MachineOperand > Pred1, ArrayRef< MachineOperand > Pred2) const override
ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const override
CreateTargetHazardRecognizer - Return the hazard recognizer to use for this target when scheduling th...
void finalizeInsInstrs(MachineInstr &Root, MachineCombinerPattern &P, SmallVectorImpl< MachineInstr * > &InsInstrs) const override
Fixup the placeholders we put in genAlternativeCodeSequence() for MachineCombiner.
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool getMemOperandsWithOffsetWidth(const MachineInstr &LdSt, SmallVectorImpl< const MachineOperand * > &BaseOps, int64_t &Offset, bool &OffsetIsScalable, LocationSize &Width, const TargetRegisterInfo *TRI) const override
Get the base operand and byte offset of an instruction that reads/writes memory.
void setSpecialOperandAttr(MachineInstr &MI, uint32_t Flags) const
bool isADDIInstrEligibleForFolding(MachineInstr &ADDIMI, int64_t &Imm) const
void storeRegToStackSlotNoUpd(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI) const
bool foldFrameOffset(MachineInstr &MI) const
void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstrIdxForVirtReg) const override
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
bool isLoadFromConstantPool(MachineInstr *I) const
MachineInstr * findLoopInstr(MachineBasicBlock &PreHeader, SmallPtrSet< MachineBasicBlock *, 8 > &Visited) const
Find the hardware loop instruction used to set-up the specified loop.
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
unsigned getInstrLatency(const InstrItineraryData *ItinData, const MachineInstr &MI, unsigned *PredCost=nullptr) const override
bool isCoalescableExtInstr(const MachineInstr &MI, Register &SrcReg, Register &DstReg, unsigned &SubIdx) const override
bool convertToImmediateForm(MachineInstr &MI, SmallSet< Register, 4 > &RegsToUpdate, MachineInstr **KilledDef=nullptr) const
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc) const override
bool getMemOperandWithOffsetWidth(const MachineInstr &LdSt, const MachineOperand *&BaseOp, int64_t &Offset, LocationSize &Width, const TargetRegisterInfo *TRI) const
Return true if get the base operand, byte offset of an instruction and the memory width.
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool shouldReduceRegisterPressure(const MachineBasicBlock *MBB, const RegisterClassInfo *RegClassInfo) const override
On PowerPC, we leverage machine combiner pass to reduce register pressure when the register pressure ...
bool isSignExtended(const unsigned Reg, const MachineRegisterInfo *MRI) const
void replaceInstrOperandWithImm(MachineInstr &MI, unsigned OpNo, int64_t Imm) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
GetInstSize - Return the number of bytes of code the specified instruction may be.
std::unique_ptr< TargetInstrInfo::PipelinerLoopInfo > analyzeLoopForPipelining(MachineBasicBlock *LoopBB) const override
Analyze loop L, which must be a single-basic-block loop, and if the conditions can be understood enou...
bool shouldClusterMemOps(ArrayRef< const MachineOperand * > BaseOps1, int64_t Offset1, bool OffsetIsScalable1, ArrayRef< const MachineOperand * > BaseOps2, int64_t Offset2, bool OffsetIsScalable2, unsigned ClusterSize, unsigned NumBytes) const override
Returns true if the two given memory operations should be scheduled adjacent.
void replaceInstrWithLI(MachineInstr &MI, const LoadImmediateInfo &LII) const
bool isImmInstrEligibleForFolding(MachineInstr &MI, unsigned &BaseReg, unsigned &XFormOpcode, int64_t &OffsetOfImmInstr, ImmInstrInfo &III) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
bool optimizeCmpPostRA(MachineInstr &MI) const
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
const Constant * getConstantFromConstantPool(MachineInstr *I) const
bool ClobbersPredicate(MachineInstr &MI, std::vector< MachineOperand > &Pred, bool SkipDead) const override
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const override
bool instrHasImmForm(unsigned Opc, bool IsVFReg, ImmInstrInfo &III, bool PostRA) const
MachineInstr * getDefMIPostRA(unsigned Reg, MachineInstr &MI, bool &SeenIntermediateUse) const
unsigned getMappedIdxOpcForImmOpc(unsigned ImmOpcode) const
getMappedIdxOpcForImmOpc - Return the mapped index form load/store opcode for a given imm form load/s...
static void emitAccCopyInfo(MachineBasicBlock &MBB, MCRegister DestReg, MCRegister SrcReg)
const PPCFrameLowering * getFrameLowering() const override
bool isPPC64() const
isPPC64 - Return true if we are generating code for 64-bit pointer mode.
unsigned getCPUDirective() const
getCPUDirective - Returns the -m directive specified for the cpu.
bool isLittleEndian() const
bool isTargetLinux() const
const PPCTargetMachine & getTargetMachine() const
const Triple & getTargetTriple() const
void setGlibcHWCAPAccess(bool Val=true) const
MI-level patchpoint operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Track the current register pressure at some position in the instruction stream, and remember the high...
void closeRegion()
Finalize the region boundaries and recored live ins and live outs.
void recede(SmallVectorImpl< RegisterMaskPair > *LiveUses=nullptr)
Recede across the previous instruction.
RegisterPressure & getPressure()
Get the resulting register pressure over the traversed region.
void recedeSkipDebugValues()
Recede until we find an instruction which is not a DebugValue.
void init(const MachineFunction *mf, const RegisterClassInfo *rci, const LiveIntervals *lis, const MachineBasicBlock *mbb, MachineBasicBlock::const_iterator pos, bool TrackLaneMasks, bool TrackUntiedDefs)
Setup the RegPressureTracker.
MachineBasicBlock::const_iterator getPos() const
Get the MI position corresponding to this register pressure.
List of registers defined and used by a machine instruction.
void collect(const MachineInstr &MI, const TargetRegisterInfo &TRI, const MachineRegisterInfo &MRI, bool TrackLaneMasks, bool IgnoreDead)
Analyze the given instruction MI and fill in the Uses, Defs and DeadDefs list based on the MachineOpe...
Wrapper class representing virtual and physical registers.
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
static constexpr bool isVirtualRegister(unsigned Reg)
Return true if the specified register number is in the virtual register namespace.
const TargetInstrInfo * TII
Target instruction information.
MachineFunction & MF
Machine function.
HazardRecognizer - This determines whether or not an instruction can be issued this cycle,...
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
iterator insert(iterator I, T &&Elt)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
MI-level stackmap operands.
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
StackOffset holds a fixed and a scalable offset in bytes.
Object returned by analyzeLoopForPipelining.
TargetInstrInfo - Interface to description of machine instruction set.
virtual bool findCommutedOpIndices(const MachineInstr &MI, unsigned &SrcOpIdx1, unsigned &SrcOpIdx2) const
Returns true iff the routine could find two commutable operands in the given machine instruction.
virtual bool getMachineCombinerPatterns(MachineInstr &Root, SmallVectorImpl< MachineCombinerPattern > &Patterns, bool DoRegPressureReduce) const
Return true when there is potentially a faster code sequence for an instruction chain ending in Root.
virtual ScheduleHazardRecognizer * CreateTargetHazardRecognizer(const TargetSubtargetInfo *STI, const ScheduleDAG *DAG) const
Allocate and return a hazard recognizer to use for this target when scheduling the machine instructio...
virtual void genAlternativeCodeSequence(MachineInstr &Root, MachineCombinerPattern Pattern, SmallVectorImpl< MachineInstr * > &InsInstrs, SmallVectorImpl< MachineInstr * > &DelInstrs, DenseMap< unsigned, unsigned > &InstIdxForVirtReg) const
When getMachineCombinerPatterns() finds patterns, this function generates the instructions that could...
virtual bool isReallyTriviallyReMaterializable(const MachineInstr &MI) const
For instructions with opcodes for which the M_REMATERIALIZABLE flag is set, this hook lets the target...
virtual bool isSchedulingBoundary(const MachineInstr &MI, const MachineBasicBlock *MBB, const MachineFunction &MF) const
Test if the given instruction should be considered a scheduling boundary.
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
CodeModel::Model getCodeModel() const
Returns the code model.
CodeGenOptLevel getOptLevel() const
Returns the optimization level: None, Less, Default, or Aggressive.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
bool contains(Register Reg) const
Return true if the specified register is included in this register class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
TargetSubtargetInfo - Generic base class for all target subtargets.
bool isOSGlibc() const
Tests whether the OS uses glibc.
The instances of the Type class are immutable: once they are created, they are never changed.
bool isFloatTy() const
Return true if this is 'float', a 32-bit IEEE fp type.
unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
bool isDoubleTy() const
Return true if this is 'double', a 64-bit IEEE fp type.
LLVM Value Representation.
Align getPointerAlignment(const DataLayout &DL) const
Returns an alignment of the pointer value.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
constexpr std::underlying_type_t< E > Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
@ C
The default llvm calling convention, compatible with C.
Predicate getSwappedPredicate(Predicate Opcode)
Assume the condition register is set by MI(a,b), return the predicate if we modify the instructions s...
Predicate
Predicate - These are "(BI << 5) | BO" for various predicates.
int getAltVSXFMAOpcode(uint16_t Opcode)
int getNonRecordFormOpcode(uint16_t)
unsigned getPredicateCondition(Predicate Opcode)
Return the condition without hint bits.
Predicate getPredicate(unsigned Condition, unsigned Hint)
Return predicate consisting of specified condition and hint bits.
Predicate InvertPredicate(Predicate Opcode)
Invert the specified predicate. != -> ==, < -> >=.
unsigned getPredicateHint(Predicate Opcode)
Return the hint bits of the predicate.
static bool isVFRegister(unsigned Reg)
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Define
Register definition.
@ Kill
The last use of a register.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
NodeAddr< InstrNode * > Instr
This is an optimization pass for GlobalISel generic memory operations.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
unsigned getDeadRegState(bool B)
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
static unsigned getCRFromCRBit(unsigned SrcReg)
auto reverse(ContainerTy &&C)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
MachineCombinerPattern
These are instruction patterns matched by the machine combiner pass.
void recomputeLivenessFlags(MachineBasicBlock &MBB)
Recomputes dead and kill flags in MBB.
unsigned getKillRegState(bool B)
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
static bool isRunOfOnes(unsigned Val, unsigned &MB, unsigned &ME)
Returns true iff Val consists of one contiguous run of 1s with any number of 0s on either side.
This struct is a compact representation of a valid (non-zero power of two) alignment.
uint64_t IsSummingOperands
uint64_t OpNoForForwarding
uint64_t ImmMustBeMultipleOf
uint64_t ZeroIsSpecialNew
uint64_t ZeroIsSpecialOrig
static MachinePointerInfo getConstantPool(MachineFunction &MF)
Return a MachinePointerInfo record that refers to the constant pool.
static MachinePointerInfo getFixedStack(MachineFunction &MF, int FI, int64_t Offset=0)
Return a MachinePointerInfo record that refers to the specified FrameIndex.
RegisterPressure computed within a region of instructions delimited by TopPos and BottomPos.
std::vector< unsigned > MaxSetPressure
Map of max reg pressure indexed by pressure set ID, not class ID.