LLVM  3.7.0
Thumb2SizeReduction.cpp
Go to the documentation of this file.
1 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "ARM.h"
11 #include "ARMBaseInstrInfo.h"
12 #include "ARMSubtarget.h"
14 #include "Thumb2InstrInfo.h"
15 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/Statistic.h"
21 #include "llvm/IR/Function.h" // To access Function attributes
23 #include "llvm/Support/Debug.h"
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "t2-reduce-size"
29 
30 STATISTIC(NumNarrows, "Number of 32-bit instrs reduced to 16-bit ones");
31 STATISTIC(Num2Addrs, "Number of 32-bit instrs reduced to 2addr 16-bit ones");
32 STATISTIC(NumLdSts, "Number of 32-bit load / store reduced to 16-bit ones");
33 
34 static cl::opt<int> ReduceLimit("t2-reduce-limit",
35  cl::init(-1), cl::Hidden);
36 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2",
37  cl::init(-1), cl::Hidden);
38 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3",
39  cl::init(-1), cl::Hidden);
40 
41 namespace {
42  /// ReduceTable - A static table with information on mapping from wide
43  /// opcodes to narrow
44  struct ReduceEntry {
45  uint16_t WideOpc; // Wide opcode
46  uint16_t NarrowOpc1; // Narrow opcode to transform to
47  uint16_t NarrowOpc2; // Narrow opcode when it's two-address
48  uint8_t Imm1Limit; // Limit of immediate field (bits)
49  uint8_t Imm2Limit; // Limit of immediate field when it's two-address
50  unsigned LowRegs1 : 1; // Only possible if low-registers are used
51  unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr)
52  unsigned PredCC1 : 2; // 0 - If predicated, cc is on and vice versa.
53  // 1 - No cc field.
54  // 2 - Always set CPSR.
55  unsigned PredCC2 : 2;
56  unsigned PartFlag : 1; // 16-bit instruction does partial flag update
57  unsigned Special : 1; // Needs to be dealt with specially
58  unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift)
59  };
60 
61  static const ReduceEntry ReduceTable[] = {
62  // Wide, Narrow1, Narrow2, imm1,imm2, lo1, lo2, P/C,PF,S,AM
63  { ARM::t2ADCrr, 0, ARM::tADC, 0, 0, 0, 1, 0,0, 0,0,0 },
64  { ARM::t2ADDri, ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 0,0, 0,1,0 },
65  { ARM::t2ADDrr, ARM::tADDrr, ARM::tADDhirr, 0, 0, 1, 0, 0,1, 0,0,0 },
66  { ARM::t2ADDSri,ARM::tADDi3, ARM::tADDi8, 3, 8, 1, 1, 2,2, 0,1,0 },
67  { ARM::t2ADDSrr,ARM::tADDrr, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
68  { ARM::t2ANDrr, 0, ARM::tAND, 0, 0, 0, 1, 0,0, 1,0,0 },
69  { ARM::t2ASRri, ARM::tASRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
70  { ARM::t2ASRrr, 0, ARM::tASRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
71  { ARM::t2BICrr, 0, ARM::tBIC, 0, 0, 0, 1, 0,0, 1,0,0 },
72  //FIXME: Disable CMN, as CCodes are backwards from compare expectations
73  //{ ARM::t2CMNrr, ARM::tCMN, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
74  { ARM::t2CMNzrr, ARM::tCMNz, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
75  { ARM::t2CMPri, ARM::tCMPi8, 0, 8, 0, 1, 0, 2,0, 0,0,0 },
76  { ARM::t2CMPrr, ARM::tCMPhir, 0, 0, 0, 0, 0, 2,0, 0,1,0 },
77  { ARM::t2EORrr, 0, ARM::tEOR, 0, 0, 0, 1, 0,0, 1,0,0 },
78  // FIXME: adr.n immediate offset must be multiple of 4.
79  //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
80  { ARM::t2LSLri, ARM::tLSLri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
81  { ARM::t2LSLrr, 0, ARM::tLSLrr, 0, 0, 0, 1, 0,0, 1,0,1 },
82  { ARM::t2LSRri, ARM::tLSRri, 0, 5, 0, 1, 0, 0,0, 1,0,1 },
83  { ARM::t2LSRrr, 0, ARM::tLSRrr, 0, 0, 0, 1, 0,0, 1,0,1 },
84  { ARM::t2MOVi, ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,0,0 },
85  { ARM::t2MOVi16,ARM::tMOVi8, 0, 8, 0, 1, 0, 0,0, 1,1,0 },
86  // FIXME: Do we need the 16-bit 'S' variant?
87  { ARM::t2MOVr,ARM::tMOVr, 0, 0, 0, 0, 0, 1,0, 0,0,0 },
88  { ARM::t2MUL, 0, ARM::tMUL, 0, 0, 0, 1, 0,0, 1,0,0 },
89  { ARM::t2MVNr, ARM::tMVN, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
90  { ARM::t2ORRrr, 0, ARM::tORR, 0, 0, 0, 1, 0,0, 1,0,0 },
91  { ARM::t2REV, ARM::tREV, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
92  { ARM::t2REV16, ARM::tREV16, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
93  { ARM::t2REVSH, ARM::tREVSH, 0, 0, 0, 1, 0, 1,0, 0,0,0 },
94  { ARM::t2RORrr, 0, ARM::tROR, 0, 0, 0, 1, 0,0, 1,0,0 },
95  { ARM::t2RSBri, ARM::tRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
96  { ARM::t2RSBSri,ARM::tRSB, 0, 0, 0, 1, 0, 2,0, 0,1,0 },
97  { ARM::t2SBCrr, 0, ARM::tSBC, 0, 0, 0, 1, 0,0, 0,0,0 },
98  { ARM::t2SUBri, ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 0,0, 0,0,0 },
99  { ARM::t2SUBrr, ARM::tSUBrr, 0, 0, 0, 1, 0, 0,0, 0,0,0 },
100  { ARM::t2SUBSri,ARM::tSUBi3, ARM::tSUBi8, 3, 8, 1, 1, 2,2, 0,0,0 },
101  { ARM::t2SUBSrr,ARM::tSUBrr, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
102  { ARM::t2SXTB, ARM::tSXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
103  { ARM::t2SXTH, ARM::tSXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
104  { ARM::t2TSTrr, ARM::tTST, 0, 0, 0, 1, 0, 2,0, 0,0,0 },
105  { ARM::t2UXTB, ARM::tUXTB, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
106  { ARM::t2UXTH, ARM::tUXTH, 0, 0, 0, 1, 0, 1,0, 0,1,0 },
107 
108  // FIXME: Clean this up after splitting each Thumb load / store opcode
109  // into multiple ones.
110  { ARM::t2LDRi12,ARM::tLDRi, ARM::tLDRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
111  { ARM::t2LDRs, ARM::tLDRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
112  { ARM::t2LDRBi12,ARM::tLDRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
113  { ARM::t2LDRBs, ARM::tLDRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
114  { ARM::t2LDRHi12,ARM::tLDRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
115  { ARM::t2LDRHs, ARM::tLDRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
116  { ARM::t2LDRSBs,ARM::tLDRSB, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
117  { ARM::t2LDRSHs,ARM::tLDRSH, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
118  { ARM::t2STRi12,ARM::tSTRi, ARM::tSTRspi, 5, 8, 1, 0, 0,0, 0,1,0 },
119  { ARM::t2STRs, ARM::tSTRr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
120  { ARM::t2STRBi12,ARM::tSTRBi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
121  { ARM::t2STRBs, ARM::tSTRBr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
122  { ARM::t2STRHi12,ARM::tSTRHi, 0, 5, 0, 1, 0, 0,0, 0,1,0 },
123  { ARM::t2STRHs, ARM::tSTRHr, 0, 0, 0, 1, 0, 0,0, 0,1,0 },
124 
125  { ARM::t2LDMIA, ARM::tLDMIA, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
126  { ARM::t2LDMIA_RET,0, ARM::tPOP_RET, 0, 0, 1, 1, 1,1, 0,1,0 },
127  { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0, 0, 1, 1, 1,1, 0,1,0 },
128  // ARM::t2STM (with no basereg writeback) has no Thumb1 equivalent
129  { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0, 0, 0, 1, 1, 1,1, 0,1,0 },
130  { ARM::t2STMDB_UPD, 0, ARM::tPUSH, 0, 0, 1, 1, 1,1, 0,1,0 }
131  };
132 
133  class Thumb2SizeReduce : public MachineFunctionPass {
134  public:
135  static char ID;
136  Thumb2SizeReduce(std::function<bool(const Function &)> Ftor);
137 
138  const Thumb2InstrInfo *TII;
139  const ARMSubtarget *STI;
140 
141  bool runOnMachineFunction(MachineFunction &MF) override;
142 
143  const char *getPassName() const override {
144  return "Thumb2 instruction size reduction pass";
145  }
146 
147  private:
148  /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
149  DenseMap<unsigned, unsigned> ReduceOpcodeMap;
150 
151  bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop);
152 
153  bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
154  bool is2Addr, ARMCC::CondCodes Pred,
155  bool LiveCPSR, bool &HasCC, bool &CCDead);
156 
157  bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
158  const ReduceEntry &Entry);
159 
160  bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
161  const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop);
162 
163  /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
164  /// instruction.
165  bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
166  const ReduceEntry &Entry, bool LiveCPSR,
167  bool IsSelfLoop);
168 
169  /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
170  /// non-two-address instruction.
171  bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
172  const ReduceEntry &Entry, bool LiveCPSR,
173  bool IsSelfLoop);
174 
175  /// ReduceMI - Attempt to reduce MI, return true on success.
176  bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
177  bool LiveCPSR, bool IsSelfLoop);
178 
179  /// ReduceMBB - Reduce width of instructions in the specified basic block.
180  bool ReduceMBB(MachineBasicBlock &MBB);
181 
182  bool OptimizeSize;
183  bool MinimizeSize;
184 
185  // Last instruction to define CPSR in the current block.
186  MachineInstr *CPSRDef;
187  // Was CPSR last defined by a high latency instruction?
188  // When CPSRDef is null, this refers to CPSR defs in predecessors.
189  bool HighLatencyCPSR;
190 
191  struct MBBInfo {
192  // The flags leaving this block have high latency.
193  bool HighLatencyCPSR;
194  // Has this block been visited yet?
195  bool Visited;
196 
197  MBBInfo() : HighLatencyCPSR(false), Visited(false) {}
198  };
199 
200  SmallVector<MBBInfo, 8> BlockInfo;
201 
202  std::function<bool(const Function &)> PredicateFtor;
203  };
204  char Thumb2SizeReduce::ID = 0;
205 }
206 
207 Thumb2SizeReduce::Thumb2SizeReduce(std::function<bool(const Function &)> Ftor)
208  : MachineFunctionPass(ID), PredicateFtor(Ftor) {
209  OptimizeSize = MinimizeSize = false;
210  for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
211  unsigned FromOpc = ReduceTable[i].WideOpc;
212  if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
213  assert(false && "Duplicated entries?");
214  }
215 }
216 
217 static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
218  for (const uint16_t *Regs = MCID.getImplicitDefs(); *Regs; ++Regs)
219  if (*Regs == ARM::CPSR)
220  return true;
221  return false;
222 }
223 
224 // Check for a likely high-latency flag def.
226  switch(Def->getOpcode()) {
227  case ARM::FMSTAT:
228  case ARM::tMUL:
229  return true;
230  }
231  return false;
232 }
233 
234 /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations,
235 /// the 's' 16-bit instruction partially update CPSR. Abort the
236 /// transformation to avoid adding false dependency on last CPSR setting
237 /// instruction which hurts the ability for out-of-order execution engine
238 /// to do register renaming magic.
239 /// This function checks if there is a read-of-write dependency between the
240 /// last instruction that defines the CPSR and the current instruction. If there
241 /// is, then there is no harm done since the instruction cannot be retired
242 /// before the CPSR setting instruction anyway.
243 /// Note, we are not doing full dependency analysis here for the sake of compile
244 /// time. We're not looking for cases like:
245 /// r0 = muls ...
246 /// r1 = add.w r0, ...
247 /// ...
248 /// = mul.w r1
249 /// In this case it would have been ok to narrow the mul.w to muls since there
250 /// are indirect RAW dependency between the muls and the mul.w
251 bool
252 Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) {
253  // Disable the check for -Oz (aka OptimizeForSizeHarder).
254  if (MinimizeSize || !STI->avoidCPSRPartialUpdate())
255  return false;
256 
257  if (!CPSRDef)
258  // If this BB loops back to itself, conservatively avoid narrowing the
259  // first instruction that does partial flag update.
260  return HighLatencyCPSR || FirstInSelfLoop;
261 
263  for (const MachineOperand &MO : CPSRDef->operands()) {
264  if (!MO.isReg() || MO.isUndef() || MO.isUse())
265  continue;
266  unsigned Reg = MO.getReg();
267  if (Reg == 0 || Reg == ARM::CPSR)
268  continue;
269  Defs.insert(Reg);
270  }
271 
272  for (const MachineOperand &MO : Use->operands()) {
273  if (!MO.isReg() || MO.isUndef() || MO.isDef())
274  continue;
275  unsigned Reg = MO.getReg();
276  if (Defs.count(Reg))
277  return false;
278  }
279 
280  // If the current CPSR has high latency, try to avoid the false dependency.
281  if (HighLatencyCPSR)
282  return true;
283 
284  // tMOVi8 usually doesn't start long dependency chains, and there are a lot
285  // of them, so always shrink them when CPSR doesn't have high latency.
286  if (Use->getOpcode() == ARM::t2MOVi ||
287  Use->getOpcode() == ARM::t2MOVi16)
288  return false;
289 
290  // No read-after-write dependency. The narrowing will add false dependency.
291  return true;
292 }
293 
294 bool
295 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
296  bool is2Addr, ARMCC::CondCodes Pred,
297  bool LiveCPSR, bool &HasCC, bool &CCDead) {
298  if ((is2Addr && Entry.PredCC2 == 0) ||
299  (!is2Addr && Entry.PredCC1 == 0)) {
300  if (Pred == ARMCC::AL) {
301  // Not predicated, must set CPSR.
302  if (!HasCC) {
303  // Original instruction was not setting CPSR, but CPSR is not
304  // currently live anyway. It's ok to set it. The CPSR def is
305  // dead though.
306  if (!LiveCPSR) {
307  HasCC = true;
308  CCDead = true;
309  return true;
310  }
311  return false;
312  }
313  } else {
314  // Predicated, must not set CPSR.
315  if (HasCC)
316  return false;
317  }
318  } else if ((is2Addr && Entry.PredCC2 == 2) ||
319  (!is2Addr && Entry.PredCC1 == 2)) {
320  /// Old opcode has an optional def of CPSR.
321  if (HasCC)
322  return true;
323  // If old opcode does not implicitly define CPSR, then it's not ok since
324  // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP.
325  if (!HasImplicitCPSRDef(MI->getDesc()))
326  return false;
327  HasCC = true;
328  } else {
329  // 16-bit instruction does not set CPSR.
330  if (HasCC)
331  return false;
332  }
333 
334  return true;
335 }
336 
337 static bool VerifyLowRegs(MachineInstr *MI) {
338  unsigned Opc = MI->getOpcode();
339  bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA_UPD);
340  bool isLROk = (Opc == ARM::t2STMDB_UPD);
341  bool isSPOk = isPCOk || isLROk;
342  for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
343  const MachineOperand &MO = MI->getOperand(i);
344  if (!MO.isReg() || MO.isImplicit())
345  continue;
346  unsigned Reg = MO.getReg();
347  if (Reg == 0 || Reg == ARM::CPSR)
348  continue;
349  if (isPCOk && Reg == ARM::PC)
350  continue;
351  if (isLROk && Reg == ARM::LR)
352  continue;
353  if (Reg == ARM::SP) {
354  if (isSPOk)
355  continue;
356  if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12))
357  // Special case for these ldr / str with sp as base register.
358  continue;
359  }
360  if (!isARMLowRegister(Reg))
361  return false;
362  }
363  return true;
364 }
365 
366 bool
367 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
368  const ReduceEntry &Entry) {
369  if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt))
370  return false;
371 
372  unsigned Scale = 1;
373  bool HasImmOffset = false;
374  bool HasShift = false;
375  bool HasOffReg = true;
376  bool isLdStMul = false;
377  unsigned Opc = Entry.NarrowOpc1;
378  unsigned OpNum = 3; // First 'rest' of operands.
379  uint8_t ImmLimit = Entry.Imm1Limit;
380 
381  switch (Entry.WideOpc) {
382  default:
383  llvm_unreachable("Unexpected Thumb2 load / store opcode!");
384  case ARM::t2LDRi12:
385  case ARM::t2STRi12:
386  if (MI->getOperand(1).getReg() == ARM::SP) {
387  Opc = Entry.NarrowOpc2;
388  ImmLimit = Entry.Imm2Limit;
389  }
390 
391  Scale = 4;
392  HasImmOffset = true;
393  HasOffReg = false;
394  break;
395  case ARM::t2LDRBi12:
396  case ARM::t2STRBi12:
397  HasImmOffset = true;
398  HasOffReg = false;
399  break;
400  case ARM::t2LDRHi12:
401  case ARM::t2STRHi12:
402  Scale = 2;
403  HasImmOffset = true;
404  HasOffReg = false;
405  break;
406  case ARM::t2LDRs:
407  case ARM::t2LDRBs:
408  case ARM::t2LDRHs:
409  case ARM::t2LDRSBs:
410  case ARM::t2LDRSHs:
411  case ARM::t2STRs:
412  case ARM::t2STRBs:
413  case ARM::t2STRHs:
414  HasShift = true;
415  OpNum = 4;
416  break;
417  case ARM::t2LDMIA: {
418  unsigned BaseReg = MI->getOperand(0).getReg();
419  assert(isARMLowRegister(BaseReg));
420 
421  // For the non-writeback version (this one), the base register must be
422  // one of the registers being loaded.
423  bool isOK = false;
424  for (unsigned i = 3; i < MI->getNumOperands(); ++i) {
425  if (MI->getOperand(i).getReg() == BaseReg) {
426  isOK = true;
427  break;
428  }
429  }
430 
431  if (!isOK)
432  return false;
433 
434  OpNum = 0;
435  isLdStMul = true;
436  break;
437  }
438  case ARM::t2LDMIA_RET: {
439  unsigned BaseReg = MI->getOperand(1).getReg();
440  if (BaseReg != ARM::SP)
441  return false;
442  Opc = Entry.NarrowOpc2; // tPOP_RET
443  OpNum = 2;
444  isLdStMul = true;
445  break;
446  }
447  case ARM::t2LDMIA_UPD:
448  case ARM::t2STMIA_UPD:
449  case ARM::t2STMDB_UPD: {
450  OpNum = 0;
451 
452  unsigned BaseReg = MI->getOperand(1).getReg();
453  if (BaseReg == ARM::SP &&
454  (Entry.WideOpc == ARM::t2LDMIA_UPD ||
455  Entry.WideOpc == ARM::t2STMDB_UPD)) {
456  Opc = Entry.NarrowOpc2; // tPOP or tPUSH
457  OpNum = 2;
458  } else if (!isARMLowRegister(BaseReg) ||
459  (Entry.WideOpc != ARM::t2LDMIA_UPD &&
460  Entry.WideOpc != ARM::t2STMIA_UPD)) {
461  return false;
462  }
463 
464  isLdStMul = true;
465  break;
466  }
467  }
468 
469  unsigned OffsetReg = 0;
470  bool OffsetKill = false;
471  bool OffsetInternal = false;
472  if (HasShift) {
473  OffsetReg = MI->getOperand(2).getReg();
474  OffsetKill = MI->getOperand(2).isKill();
475  OffsetInternal = MI->getOperand(2).isInternalRead();
476 
477  if (MI->getOperand(3).getImm())
478  // Thumb1 addressing mode doesn't support shift.
479  return false;
480  }
481 
482  unsigned OffsetImm = 0;
483  if (HasImmOffset) {
484  OffsetImm = MI->getOperand(2).getImm();
485  unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
486 
487  if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset)
488  // Make sure the immediate field fits.
489  return false;
490  }
491 
492  // Add the 16-bit load / store instruction.
493  DebugLoc dl = MI->getDebugLoc();
494  MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc));
495  if (!isLdStMul) {
496  MIB.addOperand(MI->getOperand(0));
497  MIB.addOperand(MI->getOperand(1));
498 
499  if (HasImmOffset)
500  MIB.addImm(OffsetImm / Scale);
501 
502  assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!");
503 
504  if (HasOffReg)
505  MIB.addReg(OffsetReg, getKillRegState(OffsetKill) |
506  getInternalReadRegState(OffsetInternal));
507  }
508 
509  // Transfer the rest of operands.
510  for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum)
511  MIB.addOperand(MI->getOperand(OpNum));
512 
513  // Transfer memoperands.
514  MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
515 
516  // Transfer MI flags.
517  MIB.setMIFlags(MI->getFlags());
518 
519  DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
520 
521  MBB.erase_instr(MI);
522  ++NumLdSts;
523  return true;
524 }
525 
526 bool
527 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
528  const ReduceEntry &Entry,
529  bool LiveCPSR, bool IsSelfLoop) {
530  unsigned Opc = MI->getOpcode();
531  if (Opc == ARM::t2ADDri) {
532  // If the source register is SP, try to reduce to tADDrSPi, otherwise
533  // it's a normal reduce.
534  if (MI->getOperand(1).getReg() != ARM::SP) {
535  if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
536  return true;
537  return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
538  }
539  // Try to reduce to tADDrSPi.
540  unsigned Imm = MI->getOperand(2).getImm();
541  // The immediate must be in range, the destination register must be a low
542  // reg, the predicate must be "always" and the condition flags must not
543  // be being set.
544  if (Imm & 3 || Imm > 1020)
545  return false;
546  if (!isARMLowRegister(MI->getOperand(0).getReg()))
547  return false;
548  if (MI->getOperand(3).getImm() != ARMCC::AL)
549  return false;
550  const MCInstrDesc &MCID = MI->getDesc();
551  if (MCID.hasOptionalDef() &&
552  MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR)
553  return false;
554 
555  MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(),
556  TII->get(ARM::tADDrSPi))
557  .addOperand(MI->getOperand(0))
558  .addOperand(MI->getOperand(1))
559  .addImm(Imm / 4); // The tADDrSPi has an implied scale by four.
560  AddDefaultPred(MIB);
561 
562  // Transfer MI flags.
563  MIB.setMIFlags(MI->getFlags());
564 
565  DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " <<*MIB);
566 
567  MBB.erase_instr(MI);
568  ++NumNarrows;
569  return true;
570  }
571 
572  if (Entry.LowRegs1 && !VerifyLowRegs(MI))
573  return false;
574 
575  if (MI->mayLoadOrStore())
576  return ReduceLoadStore(MBB, MI, Entry);
577 
578  switch (Opc) {
579  default: break;
580  case ARM::t2ADDSri:
581  case ARM::t2ADDSrr: {
582  unsigned PredReg = 0;
583  if (getInstrPredicate(MI, PredReg) == ARMCC::AL) {
584  switch (Opc) {
585  default: break;
586  case ARM::t2ADDSri: {
587  if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
588  return true;
589  // fallthrough
590  }
591  case ARM::t2ADDSrr:
592  return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
593  }
594  }
595  break;
596  }
597  case ARM::t2RSBri:
598  case ARM::t2RSBSri:
599  case ARM::t2SXTB:
600  case ARM::t2SXTH:
601  case ARM::t2UXTB:
602  case ARM::t2UXTH:
603  if (MI->getOperand(2).getImm() == 0)
604  return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
605  break;
606  case ARM::t2MOVi16:
607  // Can convert only 'pure' immediate operands, not immediates obtained as
608  // globals' addresses.
609  if (MI->getOperand(1).isImm())
610  return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
611  break;
612  case ARM::t2CMPrr: {
613  // Try to reduce to the lo-reg only version first. Why there are two
614  // versions of the instruction is a mystery.
615  // It would be nice to just have two entries in the master table that
616  // are prioritized, but the table assumes a unique entry for each
617  // source insn opcode. So for now, we hack a local entry record to use.
618  static const ReduceEntry NarrowEntry =
619  { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 };
620  if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop))
621  return true;
622  return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
623  }
624  }
625  return false;
626 }
627 
628 bool
629 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
630  const ReduceEntry &Entry,
631  bool LiveCPSR, bool IsSelfLoop) {
632 
633  if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
634  return false;
635 
636  if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
637  STI->avoidMOVsShifterOperand())
638  // Don't issue movs with shifter operand for some CPUs unless we
639  // are optimizing / minimizing for size.
640  return false;
641 
642  unsigned Reg0 = MI->getOperand(0).getReg();
643  unsigned Reg1 = MI->getOperand(1).getReg();
644  // t2MUL is "special". The tied source operand is second, not first.
645  if (MI->getOpcode() == ARM::t2MUL) {
646  unsigned Reg2 = MI->getOperand(2).getReg();
647  // Early exit if the regs aren't all low regs.
648  if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1)
649  || !isARMLowRegister(Reg2))
650  return false;
651  if (Reg0 != Reg2) {
652  // If the other operand also isn't the same as the destination, we
653  // can't reduce.
654  if (Reg1 != Reg0)
655  return false;
656  // Try to commute the operands to make it a 2-address instruction.
657  MachineInstr *CommutedMI = TII->commuteInstruction(MI);
658  if (!CommutedMI)
659  return false;
660  }
661  } else if (Reg0 != Reg1) {
662  // Try to commute the operands to make it a 2-address instruction.
663  unsigned CommOpIdx1, CommOpIdx2;
664  if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
665  CommOpIdx1 != 1 || MI->getOperand(CommOpIdx2).getReg() != Reg0)
666  return false;
667  MachineInstr *CommutedMI = TII->commuteInstruction(MI);
668  if (!CommutedMI)
669  return false;
670  }
671  if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
672  return false;
673  if (Entry.Imm2Limit) {
674  unsigned Imm = MI->getOperand(2).getImm();
675  unsigned Limit = (1 << Entry.Imm2Limit) - 1;
676  if (Imm > Limit)
677  return false;
678  } else {
679  unsigned Reg2 = MI->getOperand(2).getReg();
680  if (Entry.LowRegs2 && !isARMLowRegister(Reg2))
681  return false;
682  }
683 
684  // Check if it's possible / necessary to transfer the predicate.
685  const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2);
686  unsigned PredReg = 0;
687  ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
688  bool SkipPred = false;
689  if (Pred != ARMCC::AL) {
690  if (!NewMCID.isPredicable())
691  // Can't transfer predicate, fail.
692  return false;
693  } else {
694  SkipPred = !NewMCID.isPredicable();
695  }
696 
697  bool HasCC = false;
698  bool CCDead = false;
699  const MCInstrDesc &MCID = MI->getDesc();
700  if (MCID.hasOptionalDef()) {
701  unsigned NumOps = MCID.getNumOperands();
702  HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
703  if (HasCC && MI->getOperand(NumOps-1).isDead())
704  CCDead = true;
705  }
706  if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead))
707  return false;
708 
709  // Avoid adding a false dependency on partial flag update by some 16-bit
710  // instructions which has the 's' bit set.
711  if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
712  canAddPseudoFlagDep(MI, IsSelfLoop))
713  return false;
714 
715  // Add the 16-bit instruction.
716  DebugLoc dl = MI->getDebugLoc();
717  MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
718  MIB.addOperand(MI->getOperand(0));
719  if (NewMCID.hasOptionalDef()) {
720  if (HasCC)
721  AddDefaultT1CC(MIB, CCDead);
722  else
723  AddNoT1CC(MIB);
724  }
725 
726  // Transfer the rest of operands.
727  unsigned NumOps = MCID.getNumOperands();
728  for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
729  if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
730  continue;
731  if (SkipPred && MCID.OpInfo[i].isPredicate())
732  continue;
733  MIB.addOperand(MI->getOperand(i));
734  }
735 
736  // Transfer MI flags.
737  MIB.setMIFlags(MI->getFlags());
738 
739  DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
740 
741  MBB.erase_instr(MI);
742  ++Num2Addrs;
743  return true;
744 }
745 
746 bool
747 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
748  const ReduceEntry &Entry,
749  bool LiveCPSR, bool IsSelfLoop) {
750  if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
751  return false;
752 
753  if (!MinimizeSize && !OptimizeSize && Entry.AvoidMovs &&
754  STI->avoidMOVsShifterOperand())
755  // Don't issue movs with shifter operand for some CPUs unless we
756  // are optimizing / minimizing for size.
757  return false;
758 
759  unsigned Limit = ~0U;
760  if (Entry.Imm1Limit)
761  Limit = (1 << Entry.Imm1Limit) - 1;
762 
763  const MCInstrDesc &MCID = MI->getDesc();
764  for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
765  if (MCID.OpInfo[i].isPredicate())
766  continue;
767  const MachineOperand &MO = MI->getOperand(i);
768  if (MO.isReg()) {
769  unsigned Reg = MO.getReg();
770  if (!Reg || Reg == ARM::CPSR)
771  continue;
772  if (Entry.LowRegs1 && !isARMLowRegister(Reg))
773  return false;
774  } else if (MO.isImm() &&
775  !MCID.OpInfo[i].isPredicate()) {
776  if (((unsigned)MO.getImm()) > Limit)
777  return false;
778  }
779  }
780 
781  // Check if it's possible / necessary to transfer the predicate.
782  const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1);
783  unsigned PredReg = 0;
784  ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
785  bool SkipPred = false;
786  if (Pred != ARMCC::AL) {
787  if (!NewMCID.isPredicable())
788  // Can't transfer predicate, fail.
789  return false;
790  } else {
791  SkipPred = !NewMCID.isPredicable();
792  }
793 
794  bool HasCC = false;
795  bool CCDead = false;
796  if (MCID.hasOptionalDef()) {
797  unsigned NumOps = MCID.getNumOperands();
798  HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
799  if (HasCC && MI->getOperand(NumOps-1).isDead())
800  CCDead = true;
801  }
802  if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead))
803  return false;
804 
805  // Avoid adding a false dependency on partial flag update by some 16-bit
806  // instructions which has the 's' bit set.
807  if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
808  canAddPseudoFlagDep(MI, IsSelfLoop))
809  return false;
810 
811  // Add the 16-bit instruction.
812  DebugLoc dl = MI->getDebugLoc();
813  MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
814  MIB.addOperand(MI->getOperand(0));
815  if (NewMCID.hasOptionalDef()) {
816  if (HasCC)
817  AddDefaultT1CC(MIB, CCDead);
818  else
819  AddNoT1CC(MIB);
820  }
821 
822  // Transfer the rest of operands.
823  unsigned NumOps = MCID.getNumOperands();
824  for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
825  if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
826  continue;
827  if ((MCID.getOpcode() == ARM::t2RSBSri ||
828  MCID.getOpcode() == ARM::t2RSBri ||
829  MCID.getOpcode() == ARM::t2SXTB ||
830  MCID.getOpcode() == ARM::t2SXTH ||
831  MCID.getOpcode() == ARM::t2UXTB ||
832  MCID.getOpcode() == ARM::t2UXTH) && i == 2)
833  // Skip the zero immediate operand, it's now implicit.
834  continue;
835  bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate());
836  if (SkipPred && isPred)
837  continue;
838  const MachineOperand &MO = MI->getOperand(i);
839  if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR)
840  // Skip implicit def of CPSR. Either it's modeled as an optional
841  // def now or it's already an implicit def on the new instruction.
842  continue;
843  MIB.addOperand(MO);
844  }
845  if (!MCID.isPredicable() && NewMCID.isPredicable())
846  AddDefaultPred(MIB);
847 
848  // Transfer MI flags.
849  MIB.setMIFlags(MI->getFlags());
850 
851  DEBUG(errs() << "Converted 32-bit: " << *MI << " to 16-bit: " << *MIB);
852 
853  MBB.erase_instr(MI);
854  ++NumNarrows;
855  return true;
856 }
857 
858 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) {
859  bool HasDef = false;
860  for (const MachineOperand &MO : MI.operands()) {
861  if (!MO.isReg() || MO.isUndef() || MO.isUse())
862  continue;
863  if (MO.getReg() != ARM::CPSR)
864  continue;
865 
866  DefCPSR = true;
867  if (!MO.isDead())
868  HasDef = true;
869  }
870 
871  return HasDef || LiveCPSR;
872 }
873 
874 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
875  for (const MachineOperand &MO : MI.operands()) {
876  if (!MO.isReg() || MO.isUndef() || MO.isDef())
877  continue;
878  if (MO.getReg() != ARM::CPSR)
879  continue;
880  assert(LiveCPSR && "CPSR liveness tracking is wrong!");
881  if (MO.isKill()) {
882  LiveCPSR = false;
883  break;
884  }
885  }
886 
887  return LiveCPSR;
888 }
889 
890 bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
891  bool LiveCPSR, bool IsSelfLoop) {
892  unsigned Opcode = MI->getOpcode();
893  DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode);
894  if (OPI == ReduceOpcodeMap.end())
895  return false;
896  const ReduceEntry &Entry = ReduceTable[OPI->second];
897 
898  // Don't attempt normal reductions on "special" cases for now.
899  if (Entry.Special)
900  return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
901 
902  // Try to transform to a 16-bit two-address instruction.
903  if (Entry.NarrowOpc2 &&
904  ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
905  return true;
906 
907  // Try to transform to a 16-bit non-two-address instruction.
908  if (Entry.NarrowOpc1 &&
909  ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
910  return true;
911 
912  return false;
913 }
914 
915 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
916  bool Modified = false;
917 
918  // Yes, CPSR could be livein.
919  bool LiveCPSR = MBB.isLiveIn(ARM::CPSR);
920  MachineInstr *BundleMI = nullptr;
921 
922  CPSRDef = nullptr;
923  HighLatencyCPSR = false;
924 
925  // Check predecessors for the latest CPSRDef.
926  for (auto *Pred : MBB.predecessors()) {
927  const MBBInfo &PInfo = BlockInfo[Pred->getNumber()];
928  if (!PInfo.Visited) {
929  // Since blocks are visited in RPO, this must be a back-edge.
930  continue;
931  }
932  if (PInfo.HighLatencyCPSR) {
933  HighLatencyCPSR = true;
934  break;
935  }
936  }
937 
938  // If this BB loops back to itself, conservatively avoid narrowing the
939  // first instruction that does partial flag update.
940  bool IsSelfLoop = MBB.isSuccessor(&MBB);
943  for (; MII != E; MII = NextMII) {
944  NextMII = std::next(MII);
945 
946  MachineInstr *MI = &*MII;
947  if (MI->isBundle()) {
948  BundleMI = MI;
949  continue;
950  }
951  if (MI->isDebugValue())
952  continue;
953 
954  LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR);
955 
956  // Does NextMII belong to the same bundle as MI?
957  bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred();
958 
959  if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) {
960  Modified = true;
961  MachineBasicBlock::instr_iterator I = std::prev(NextMII);
962  MI = &*I;
963  // Removing and reinserting the first instruction in a bundle will break
964  // up the bundle. Fix the bundling if it was broken.
965  if (NextInSameBundle && !NextMII->isBundledWithPred())
966  NextMII->bundleWithPred();
967  }
968 
969  if (!NextInSameBundle && MI->isInsideBundle()) {
970  // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill
971  // marker is only on the BUNDLE instruction. Process the BUNDLE
972  // instruction as we finish with the bundled instruction to work around
973  // the inconsistency.
974  if (BundleMI->killsRegister(ARM::CPSR))
975  LiveCPSR = false;
976  MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR);
977  if (MO && !MO->isDead())
978  LiveCPSR = true;
979  MO = BundleMI->findRegisterUseOperand(ARM::CPSR);
980  if (MO && !MO->isKill())
981  LiveCPSR = true;
982  }
983 
984  bool DefCPSR = false;
985  LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR);
986  if (MI->isCall()) {
987  // Calls don't really set CPSR.
988  CPSRDef = nullptr;
989  HighLatencyCPSR = false;
990  IsSelfLoop = false;
991  } else if (DefCPSR) {
992  // This is the last CPSR defining instruction.
993  CPSRDef = MI;
994  HighLatencyCPSR = isHighLatencyCPSR(CPSRDef);
995  IsSelfLoop = false;
996  }
997  }
998 
999  MBBInfo &Info = BlockInfo[MBB.getNumber()];
1000  Info.HighLatencyCPSR = HighLatencyCPSR;
1001  Info.Visited = true;
1002  return Modified;
1003 }
1004 
1005 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
1006  if (PredicateFtor && !PredicateFtor(*MF.getFunction()))
1007  return false;
1008 
1009  STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
1010  if (STI->isThumb1Only() || STI->prefers32BitThumb())
1011  return false;
1012 
1013  TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
1014 
1015  // Optimizing / minimizing size?
1016  OptimizeSize = MF.getFunction()->hasFnAttribute(Attribute::OptimizeForSize);
1017  MinimizeSize = MF.getFunction()->hasFnAttribute(Attribute::MinSize);
1018 
1019  BlockInfo.clear();
1020  BlockInfo.resize(MF.getNumBlockIDs());
1021 
1022  // Visit blocks in reverse post-order so LastCPSRDef is known for all
1023  // predecessors.
1025  bool Modified = false;
1027  I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
1028  Modified |= ReduceMBB(**I);
1029  return Modified;
1030 }
1031 
1032 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size
1033 /// reduction pass.
1035  std::function<bool(const Function &)> Ftor) {
1036  return new Thumb2SizeReduce(Ftor);
1037 }
static bool VerifyLowRegs(MachineInstr *MI)
bool isInsideBundle() const
Return true if MI is in a bundle (but not the first MI in a bundle).
Definition: MachineInstr.h:205
bool isImplicit() const
const uint16_t * getImplicitDefs() const
Return a list of registers that are potentially written by any instance of this machine instruction...
Definition: MCInstrDesc.h:497
raw_ostream & errs()
This returns a reference to a raw_ostream for standard error.
instr_iterator instr_begin()
bool isPredicate() const
Set if this is one of the operands that made up of the predicate operand that controls an isPredicabl...
Definition: MCInstrDesc.h:82
instr_iterator instr_end()
STATISTIC(NumFunctions,"Total number of functions")
MachineOperand * findRegisterDefOperand(unsigned Reg, bool isDead=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
Definition: MachineInstr.h:912
int getNumber() const
getNumber - MachineBasicBlocks are uniquely numbered at the function level, unless they're not in a M...
bool hasOptionalDef() const
Set if this instruction has an optional definition, e.g.
Definition: MCInstrDesc.h:204
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:138
bool isDead() const
MachineOperand * findRegisterUseOperand(unsigned Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
Definition: MachineInstr.h:894
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:264
unsigned getInternalReadRegState(bool B)
A debug info location.
Definition: DebugLoc.h:34
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
static cl::opt< int > ReduceLimit2Addr("t2-reduce-limit2", cl::init(-1), cl::Hidden)
Instructions::iterator instr_iterator
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:295
static const MachineInstrBuilder & AddNoT1CC(const MachineInstrBuilder &MIB)
static const MachineInstrBuilder & AddDefaultPred(const MachineInstrBuilder &MIB)
unsigned getNumBlockIDs() const
getNumBlockIDs - Return the number of MBB ID's allocated.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
Definition: ErrorHandling.h:98
A Use represents the edge between a Value definition and its users.
Definition: Use.h:69
bool isReg() const
isReg - Tests if this is a MO_Register operand.
Reg
All possible values of the reg field in the ModR/M byte.
bool isUndef() const
#define false
Definition: ConvertUTF.c:65
const MachineInstrBuilder & addImm(int64_t Val) const
addImm - Add a new immediate operand.
unsigned getNumOperands() const
Access to explicit operands of the instruction.
Definition: MachineInstr.h:271
bool isPredicable() const
Return true if this instruction has a predicate operand that controls execution.
Definition: MCInstrDesc.h:264
bool isKill() const
int64_t getImm() const
unsigned getKillRegState(bool B)
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:267
LLVM_CONSTEXPR size_t array_lengthof(T(&)[N])
Find the length of an array.
Definition: STLExtras.h:247
bool isDebugValue() const
Definition: MachineInstr.h:748
mmo_iterator memoperands_end() const
Definition: MachineInstr.h:341
bool isBundle() const
Definition: MachineInstr.h:775
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:325
instr_iterator erase_instr(MachineInstr *I)
Remove an instruction from the instruction list and delete it.
bool isOptionalDef() const
Set if this operand is a optional def.
Definition: MCInstrDesc.h:85
ARMCC::CondCodes getInstrPredicate(const MachineInstr *MI, unsigned &PredReg)
getInstrPredicate - If instruction is predicated, returns its predicate condition, otherwise returns AL.
SmallSet - This maintains a set of unique values, optimizing for the case when the set is small (less...
Definition: SmallSet.h:32
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:273
const MachineInstrBuilder & setMIFlags(unsigned Flags) const
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:294
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition: SmallSet.h:69
iterator_range< pred_iterator > predecessors()
MachineInstrBuilder BuildMI(MachineFunction &MF, DebugLoc DL, const MCInstrDesc &MCID)
BuildMI - Builder interface.
static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR)
std::vector< NodeType * >::reverse_iterator rpo_iterator
unsigned getOpcode() const
Return the opcode number for this descriptor.
Definition: MCInstrDesc.h:178
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.
Definition: SmallSet.h:53
static cl::opt< int > ReduceLimit("t2-reduce-limit", cl::init(-1), cl::Hidden)
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:861
bool isSuccessor(const MachineBasicBlock *MBB) const
isSuccessor - Return true if the specified MBB is a successor of this block.
static cl::opt< int > ReduceLimitLdSt("t2-reduce-limit3", cl::init(-1), cl::Hidden)
static bool HasImplicitCPSRDef(const MCInstrDesc &MCID)
static bool isHighLatencyCPSR(MachineInstr *Def)
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:238
static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR)
static bool isARMLowRegister(unsigned Reg)
isARMLowRegister - Returns true if the register is a low register (r0-r7).
Definition: ARMBaseInfo.h:210
Representation of each machine instruction.
Definition: MachineInstr.h:51
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
Definition: Function.h:217
bool isLiveIn(unsigned Reg) const
isLiveIn - Return true if the specified register is in the live in set.
bool mayLoadOrStore(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read or modify memory.
Definition: MachineInstr.h:589
#define I(x, y, z)
Definition: MD5.cpp:54
bool isCall(QueryType Type=AnyInBundle) const
Definition: MachineInstr.h:403
iterator find(const KeyT &Val)
Definition: DenseMap.h:124
uint8_t getFlags() const
Return the MI flags bitvector.
Definition: MachineInstr.h:145
unsigned getReg() const
getReg - Returns the register number.
bool killsRegister(unsigned Reg, const TargetRegisterInfo *TRI=nullptr) const
Return true if the MachineInstr kills the specified register.
Definition: MachineInstr.h:857
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:185
const MCOperandInfo * OpInfo
Definition: MCInstrDesc.h:149
const MachineInstrBuilder & addOperand(const MachineOperand &MO) const
#define DEBUG(X)
Definition: Debug.h:92
print Print MemDeps of function
FunctionPass * createThumb2SizeReductionPass(std::function< bool(const Function &)> Ftor=nullptr)
createThumb2SizeReductionPass - Returns an instance of the Thumb2 size reduction pass.
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
addReg - Add a new virtual register operand...
void setMemRefs(mmo_iterator NewMemRefs, mmo_iterator NewMemRefsEnd)
Assign this MachineInstr's memory reference descriptor list.
static const MachineInstrBuilder & AddDefaultT1CC(const MachineInstrBuilder &MIB, bool isDead=false)
bool isInternalRead() const
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:340