45#define DEBUG_TYPE "thumb2-reduce-size"
46#define THUMB2_SIZE_REDUCE_NAME "Thumb2 instruction size reduce pass"
48STATISTIC(NumNarrows,
"Number of 32-bit instrs reduced to 16-bit ones");
49STATISTIC(Num2Addrs,
"Number of 32-bit instrs reduced to 2addr 16-bit ones");
50STATISTIC(NumLdSts,
"Number of 32-bit load / store reduced to 16-bit ones");
69 unsigned LowRegs1 : 1;
70 unsigned LowRegs2 : 1;
75 unsigned PartFlag : 1;
77 unsigned AvoidMovs: 1;
80 static const ReduceEntry ReduceTable[] = {
82 { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 },
83 { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 },
84 { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 },
85 { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 },
86 { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
87 { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 },
88 { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
89 { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
90 { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 },
93 { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
94 { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 },
95 { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 },
96 { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 },
99 { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
100 { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 },
101 { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
102 { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
103 { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 },
104 { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 },
106 { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 },
107 { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 },
108 { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
109 { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 },
110 { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
111 { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
112 { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
113 { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 },
114 { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
115 { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
116 { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 },
117 { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 },
118 { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
119 { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 },
120 { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
121 { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
122 { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
123 { ARM::t2TEQrr, ARM::tEOR, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
124 { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
125 { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
126 { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
130 { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
131 { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
132 { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
133 { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
134 { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
135 { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
136 { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
137 { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
138 { ARM::t2LDR_POST,ARM::tLDMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 },
139 { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
140 { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
141 { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
142 { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
143 { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
144 { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
145 { ARM::t2STR_POST,ARM::tSTMIA_UPD,0, 0, 0, 1, 0, 0,0, 0,1,0 },
147 { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
148 { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 },
149 { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 },
153 { ARM::t2STMIA, ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
154 { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
155 { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 }
165 Thumb2SizeReduce(std::function<
bool(
const Function &)> Ftor =
nullptr);
171 MachineFunctionProperties::Property::NoVRegs);
186 bool LiveCPSR,
bool &HasCC,
bool &CCDead);
189 const ReduceEntry &Entry);
192 const ReduceEntry &Entry,
bool LiveCPSR,
bool IsSelfLoop);
197 const ReduceEntry &Entry,
bool LiveCPSR,
203 const ReduceEntry &Entry,
bool LiveCPSR,
208 bool IsSelfLoop,
bool SkipPrologueEpilogue);
220 bool HighLatencyCPSR;
224 bool HighLatencyCPSR =
false;
226 bool Visited =
false;
236 char Thumb2SizeReduce::ID = 0;
245 OptimizeSize = MinimizeSize =
false;
246 for (
unsigned i = 0, e = std::size(ReduceTable); i != e; ++i) {
247 unsigned FromOpc = ReduceTable[i].WideOpc;
248 if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
259 switch(Def->getOpcode()) {
285Thumb2SizeReduce::canAddPseudoFlagDep(
MachineInstr *
Use,
bool FirstInSelfLoop) {
287 if (MinimizeSize || !STI->avoidCPSRPartialUpdate())
293 return HighLatencyCPSR || FirstInSelfLoop;
297 if (!MO.isReg() || MO.isUndef() || MO.isUse())
300 if (Reg == 0 || Reg == ARM::CPSR)
306 if (!MO.isReg() || MO.isUndef() || MO.isDef())
319 if (
Use->getOpcode() == ARM::t2MOVi ||
320 Use->getOpcode() == ARM::t2MOVi16)
328Thumb2SizeReduce::VerifyPredAndCC(
MachineInstr *
MI,
const ReduceEntry &Entry,
330 bool LiveCPSR,
bool &HasCC,
bool &CCDead) {
331 if ((is2Addr &&
Entry.PredCC2 == 0) ||
332 (!is2Addr &&
Entry.PredCC1 == 0)) {
351 }
else if ((is2Addr &&
Entry.PredCC2 == 2) ||
352 (!is2Addr &&
Entry.PredCC1 == 2)) {
371 unsigned Opc =
MI->getOpcode();
372 bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA_UPD);
373 bool isLROk = (Opc == ARM::t2STMDB_UPD);
374 bool isSPOk = isPCOk || isLROk;
375 for (
unsigned i = 0, e =
MI->getNumOperands(); i != e; ++i) {
380 if (Reg == 0 || Reg == ARM::CPSR)
382 if (isPCOk && Reg == ARM::PC)
384 if (isLROk && Reg == ARM::LR)
386 if (Reg == ARM::SP) {
389 if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12))
401 const ReduceEntry &Entry) {
406 bool HasImmOffset =
false;
407 bool HasShift =
false;
408 bool HasOffReg =
true;
409 bool isLdStMul =
false;
410 unsigned Opc =
Entry.NarrowOpc1;
414 switch (
Entry.WideOpc) {
419 if (
MI->getOperand(1).getReg() == ARM::SP) {
420 Opc =
Entry.NarrowOpc2;
421 ImmLimit =
Entry.Imm2Limit;
450 case ARM::t2LDR_POST:
451 case ARM::t2STR_POST: {
455 if (!
MI->hasOneMemOperand() ||
456 (*
MI->memoperands_begin())->getAlign() <
Align(4))
462 bool IsStore =
Entry.WideOpc == ARM::t2STR_POST;
463 Register Rt =
MI->getOperand(IsStore ? 1 : 0).getReg();
464 Register Rn =
MI->getOperand(IsStore ? 0 : 1).getReg();
465 unsigned Offset =
MI->getOperand(3).getImm();
466 unsigned PredImm =
MI->getOperand(4).getImm();
467 Register PredReg =
MI->getOperand(5).getReg();
490 MI->eraseFromBundle();
495 Register BaseReg =
MI->getOperand(0).getReg();
502 if (MO.getReg() == BaseReg) {
519 if (!
MI->getOperand(0).isKill())
525 Register BaseReg =
MI->getOperand(0).getReg();
527 if (MO.getReg() == BaseReg)
532 case ARM::t2LDMIA_RET: {
533 Register BaseReg =
MI->getOperand(1).getReg();
534 if (BaseReg != ARM::SP)
536 Opc =
Entry.NarrowOpc2;
541 case ARM::t2LDMIA_UPD:
542 case ARM::t2STMIA_UPD:
543 case ARM::t2STMDB_UPD: {
546 Register BaseReg =
MI->getOperand(1).getReg();
547 if (BaseReg == ARM::SP &&
548 (
Entry.WideOpc == ARM::t2LDMIA_UPD ||
549 Entry.WideOpc == ARM::t2STMDB_UPD)) {
550 Opc =
Entry.NarrowOpc2;
553 (
Entry.WideOpc != ARM::t2LDMIA_UPD &&
554 Entry.WideOpc != ARM::t2STMIA_UPD)) {
563 unsigned OffsetReg = 0;
564 bool OffsetKill =
false;
565 bool OffsetInternal =
false;
567 OffsetReg =
MI->getOperand(2).getReg();
568 OffsetKill =
MI->getOperand(2).isKill();
569 OffsetInternal =
MI->getOperand(2).isInternalRead();
571 if (
MI->getOperand(3).getImm())
576 unsigned OffsetImm = 0;
578 OffsetImm =
MI->getOperand(2).getImm();
579 unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
581 if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset)
592 if (
Entry.WideOpc == ARM::t2STMIA)
596 MIB.
add(
MI->getOperand(0));
597 MIB.
add(
MI->getOperand(1));
600 MIB.
addImm(OffsetImm / Scale);
602 assert((!HasShift || OffsetReg) &&
"Invalid so_reg load / store address!");
620 <<
" to 16-bit: " << *MIB);
629 const ReduceEntry &Entry,
630 bool LiveCPSR,
bool IsSelfLoop) {
631 unsigned Opc =
MI->getOpcode();
632 if (Opc == ARM::t2ADDri) {
635 if (
MI->getOperand(1).getReg() != ARM::SP) {
636 if (ReduceTo2Addr(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop))
638 return ReduceToNarrow(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop);
641 unsigned Imm =
MI->getOperand(2).getImm();
645 if (Imm & 3 || Imm > 1020)
658 TII->get(ARM::tADDrSPi))
659 .
add(
MI->getOperand(0))
660 .
add(
MI->getOperand(1))
668 <<
" to 16-bit: " << *MIB);
678 if (
MI->mayLoadOrStore())
679 return ReduceLoadStore(
MBB,
MI, Entry);
684 case ARM::t2ADDSrr: {
690 if (ReduceTo2Addr(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop))
694 return ReduceToNarrow(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop);
705 if (
MI->getOperand(2).getImm() == 0)
706 return ReduceToNarrow(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop);
711 if (
MI->getOperand(1).isImm())
712 return ReduceToNarrow(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop);
720 static const ReduceEntry NarrowEntry =
721 { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 };
722 if (ReduceToNarrow(
MBB,
MI, NarrowEntry, LiveCPSR, IsSelfLoop))
724 return ReduceToNarrow(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop);
733 if (
MI->getOperand(0).isKill())
734 return ReduceToNarrow(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop);
742 const ReduceEntry &Entry,
743 bool LiveCPSR,
bool IsSelfLoop) {
747 if (!OptimizeSize &&
Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
755 if (
MI->getOpcode() == ARM::t2MUL) {
757 if (!MinimizeSize && STI->avoidMULS())
774 }
else if (Reg0 != Reg1) {
776 unsigned CommOpIdx1 = 1;
778 if (!
TII->findCommutedOpIndices(*
MI, CommOpIdx1, CommOpIdx2) ||
779 MI->getOperand(CommOpIdx2).getReg() != Reg0)
782 TII->commuteInstruction(*
MI,
false, CommOpIdx1, CommOpIdx2);
788 if (
Entry.Imm2Limit) {
789 unsigned Imm =
MI->getOperand(2).getImm();
790 unsigned Limit = (1 <<
Entry.Imm2Limit) - 1;
803 bool SkipPred =
false;
817 HasCC = (
MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
818 if (HasCC &&
MI->getOperand(NumOps-1).isDead())
821 if (!VerifyPredAndCC(
MI, Entry,
true, Pred, LiveCPSR, HasCC, CCDead))
827 canAddPseudoFlagDep(
MI, IsSelfLoop))
833 MIB.
add(
MI->getOperand(0));
839 for (
unsigned i = 1, e =
MI->getNumOperands(); i != e; ++i) {
840 if (i < NumOps && MCID.
operands()[i].isOptionalDef())
842 if (SkipPred && MCID.
operands()[i].isPredicate())
844 MIB.
add(
MI->getOperand(i));
851 <<
" to 16-bit: " << *MIB);
860 const ReduceEntry &Entry,
861 bool LiveCPSR,
bool IsSelfLoop) {
865 if (!OptimizeSize &&
Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
870 unsigned Limit = ~0
U;
872 Limit = (1 <<
Entry.Imm1Limit) - 1;
876 if (MCID.
operands()[i].isPredicate())
881 if (!Reg || Reg == ARM::CPSR)
885 }
else if (MO.
isImm() && !MCID.
operands()[i].isPredicate()) {
886 if (((
unsigned)MO.
getImm()) > Limit)
895 bool SkipPred =
false;
908 HasCC = (
MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
909 if (HasCC &&
MI->getOperand(NumOps-1).isDead())
912 if (!VerifyPredAndCC(
MI, Entry,
false, Pred, LiveCPSR, HasCC, CCDead))
918 canAddPseudoFlagDep(
MI, IsSelfLoop))
929 MIB.
add(
MI->getOperand(0));
936 MIB.
add(
MI->getOperand(0));
938 MIB.
add(
MI->getOperand(0));
945 for (
unsigned i = 1, e =
MI->getNumOperands(); i != e; ++i) {
946 if (i < NumOps && MCID.
operands()[i].isOptionalDef())
948 if ((MCID.
getOpcode() == ARM::t2RSBSri ||
953 MCID.
getOpcode() == ARM::t2UXTH) && i == 2)
956 bool isPred = (i < NumOps && MCID.
operands()[i].isPredicate());
957 if (SkipPred && isPred)
973 <<
" to 16-bit: " << *MIB);
985 if (MO.
getReg() != ARM::CPSR)
993 return HasDef || LiveCPSR;
1000 if (MO.
getReg() != ARM::CPSR)
1002 assert(LiveCPSR &&
"CPSR liveness tracking is wrong!");
1013 bool LiveCPSR,
bool IsSelfLoop,
1014 bool SkipPrologueEpilogue) {
1015 unsigned Opcode =
MI->getOpcode();
1017 if (OPI == ReduceOpcodeMap.end())
1022 const ReduceEntry &
Entry = ReduceTable[OPI->second];
1026 return ReduceSpecial(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop);
1029 if (
Entry.NarrowOpc2 &&
1030 ReduceTo2Addr(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop))
1034 if (
Entry.NarrowOpc1 &&
1035 ReduceToNarrow(
MBB,
MI, Entry, LiveCPSR, IsSelfLoop))
1042 bool SkipPrologueEpilogue) {
1050 HighLatencyCPSR =
false;
1054 const MBBInfo &PInfo = BlockInfo[Pred->getNumber()];
1055 if (!PInfo.Visited) {
1059 if (PInfo.HighLatencyCPSR) {
1060 HighLatencyCPSR =
true;
1070 for (; MII != E; MII = NextMII) {
1071 NextMII = std::next(MII);
1074 if (
MI->isBundle()) {
1078 if (
MI->isDebugInstr())
1084 bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred();
1086 if (ReduceMI(
MBB,
MI, LiveCPSR, IsSelfLoop, SkipPrologueEpilogue)) {
1092 if (NextInSameBundle && !NextMII->isBundledWithPred())
1093 NextMII->bundleWithPred();
1096 if (BundleMI && !NextInSameBundle &&
MI->isInsideBundle()) {
1112 bool DefCPSR =
false;
1117 HighLatencyCPSR =
false;
1119 }
else if (DefCPSR) {
1128 Info.HighLatencyCPSR = HighLatencyCPSR;
1129 Info.Visited =
true;
1134 if (PredicateFtor && !PredicateFtor(MF.
getFunction()))
1164 std::function<
bool(
const Function &)> Ftor) {
1165 return new Thumb2SizeReduce(std::move(Ftor));
Analysis containing CSE Info
Performs the initial survey of the specified function
This file defines the DenseMap class.
const HexagonInstrInfo * TII
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
This file builds on the ADT/GraphTraits.h file to build a generic graph post order iterator.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
This file defines the SmallSet class.
This file defines the SmallVector class.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
static cl::opt< int > ReduceLimit("t2-reduce-limit", cl::init(-1), cl::Hidden)
static cl::opt< int > ReduceLimitLdSt("t2-reduce-limit3", cl::init(-1), cl::Hidden)
static cl::opt< int > ReduceLimit2Addr("t2-reduce-limit2", cl::init(-1), cl::Hidden)
static bool HasImplicitCPSRDef(const MCInstrDesc &MCID)
static bool isHighLatencyCPSR(MachineInstr *Def)
static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR)
static bool VerifyLowRegs(MachineInstr *MI)
#define THUMB2_SIZE_REDUCE_NAME
static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR)
const ARMBaseInstrInfo * getInstrInfo() const override
bool isThumb1Only() const
iterator find(const_arg_type_t< KeyT > Val)
FunctionPass class - This class is used to implement most global optimizations.
bool hasOptSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
bool needsUnwindTableEntry() const
True if this function needs an unwind table.
bool usesWindowsCFI() const
Describe properties that are true of each instruction in the target description file.
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
ArrayRef< MCOperandInfo > operands() const
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
ArrayRef< MCPhysReg > implicit_defs() const
Return a list of registers that are potentially written by any instance of this machine instruction.
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
unsigned getOpcode() const
Return the opcode number for this descriptor.
instr_iterator instr_begin()
instr_iterator erase_instr(MachineInstr *I)
Remove an instruction from the instruction list and delete it.
int getNumber() const
MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a MachineFuncti...
Instructions::iterator instr_iterator
instr_iterator instr_end()
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
iterator_range< pred_iterator > predecessors()
bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
virtual bool runOnMachineFunction(MachineFunction &MF)=0
runOnMachineFunction - This method must be overloaded to perform the desired machine code transformat...
virtual MachineFunctionProperties getRequiredProperties() const
Properties which a MachineFunction may have at a given point in time.
MachineFunctionProperties & set(Property P)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
const MachineInstrBuilder & setMemRefs(ArrayRef< MachineMemOperand * > MMOs) const
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
Representation of each machine instruction.
bool killsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr kills the specified register.
MachineOperand * findRegisterUseOperand(Register Reg, const TargetRegisterInfo *TRI, bool isKill=false)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
const MachineOperand & getOperand(unsigned i) const
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MachineOperand class - Representation of each machine instruction operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
void setIsDead(bool Val=true)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void setIsKill(bool Val=true)
Register getReg() const
getReg - Returns the register number.
void setIsDef(bool Val=true)
Change a def to a use, or a use to a def.
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Wrapper class representing virtual and physical registers.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
StringRef - Represent a constant reference to a string, i.e.
static const unsigned CommuteAnyOperandIndex
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
A Use represents the edge between a Value definition and its users.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
@ Define
Register definition.
Reg
All possible values of the reg field in the ModR/M byte.
initializer< Ty > init(const Ty &Val)
This is an optimization pass for GlobalISel generic memory operations.
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
static bool isARMLowRegister(MCRegister Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
static std::array< MachineOperand, 2 > predOps(ARMCC::CondCodes Pred, unsigned PredReg=0)
Get the operands corresponding to the given Pred value.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned getInternalReadRegState(bool B)
unsigned getKillRegState(bool B)
ARMCC::CondCodes getInstrPredicate(const MachineInstr &MI, Register &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition,...
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
static MachineOperand t1CondCodeOp(bool isDead=false)
Get the operand corresponding to the conditional code result for Thumb1.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
static MachineOperand condCodeOp(unsigned CCReg=0)
Get the operand corresponding to the conditional code result.
FunctionPass * createThumb2SizeReductionPass(std::function< bool(const Function &)> Ftor=nullptr)
createThumb2SizeReductionPass - Returns an instance of the Thumb2 size reduction pass.
Implement std::hash so that hash_code can be used in STL containers.
This struct is a compact representation of a valid (non-zero power of two) alignment.