LLVM  8.0.0svn
X86FixupLEAs.cpp
Go to the documentation of this file.
1 //===-- X86FixupLEAs.cpp - use or replace LEA instructions -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the pass that finds instructions that can be
11 // re-written as LEA instructions in order to reduce pipeline delays.
12 // When optimizing for size it replaces suitable LEAs with INC or DEC.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "X86.h"
17 #include "X86InstrInfo.h"
18 #include "X86Subtarget.h"
19 #include "llvm/ADT/Statistic.h"
22 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/Support/Debug.h"
26 using namespace llvm;
27 
28 #define FIXUPLEA_DESC "X86 LEA Fixup"
29 #define FIXUPLEA_NAME "x86-fixup-LEAs"
30 
31 #define DEBUG_TYPE FIXUPLEA_NAME
32 
33 STATISTIC(NumLEAs, "Number of LEA instructions created");
34 
35 namespace {
36 class FixupLEAPass : public MachineFunctionPass {
37  enum RegUsageState { RU_NotUsed, RU_Write, RU_Read };
38 
39  /// Loop over all of the instructions in the basic block
40  /// replacing applicable instructions with LEA instructions,
41  /// where appropriate.
42  bool processBasicBlock(MachineFunction &MF, MachineFunction::iterator MFI,
43  bool IsSlowLEA, bool IsSlow3OpsLEA);
44 
45  /// Given a machine register, look for the instruction
46  /// which writes it in the current basic block. If found,
47  /// try to replace it with an equivalent LEA instruction.
48  /// If replacement succeeds, then also process the newly created
49  /// instruction.
50  void seekLEAFixup(MachineOperand &p, MachineBasicBlock::iterator &I,
52 
53  /// Given a memory access or LEA instruction
54  /// whose address mode uses a base and/or index register, look for
55  /// an opportunity to replace the instruction which sets the base or index
56  /// register with an equivalent LEA instruction.
57  void processInstruction(MachineBasicBlock::iterator &I,
59 
60  /// Given a LEA instruction which is unprofitable
61  /// on SlowLEA targets try to replace it with an equivalent ADD instruction.
62  void processInstructionForSlowLEA(MachineBasicBlock::iterator &I,
64 
65  /// Given a LEA instruction which is unprofitable
66  /// on SNB+ try to replace it with other instructions.
67  /// According to Intel's Optimization Reference Manual:
68  /// " For LEA instructions with three source operands and some specific
69  /// situations, instruction latency has increased to 3 cycles, and must
70  /// dispatch via port 1:
71  /// - LEA that has all three source operands: base, index, and offset
72  /// - LEA that uses base and index registers where the base is EBP, RBP,
73  /// or R13
74  /// - LEA that uses RIP relative addressing mode
75  /// - LEA that uses 16-bit addressing mode "
76  /// This function currently handles the first 2 cases only.
77  MachineInstr *processInstrForSlow3OpLEA(MachineInstr &MI,
79 
80  /// Look for LEAs that add 1 to reg or subtract 1 from reg
81  /// and convert them to INC or DEC respectively.
82  bool fixupIncDec(MachineBasicBlock::iterator &I,
83  MachineFunction::iterator MFI) const;
84 
85  /// Determine if an instruction references a machine register
86  /// and, if so, whether it reads or writes the register.
87  RegUsageState usesRegister(MachineOperand &p, MachineBasicBlock::iterator I);
88 
89  /// Step backwards through a basic block, looking
90  /// for an instruction which writes a register within
91  /// a maximum of INSTR_DISTANCE_THRESHOLD instruction latency cycles.
95 
96  /// if an instruction can be converted to an
97  /// equivalent LEA, insert the new instruction into the basic block
98  /// and return a pointer to it. Otherwise, return zero.
99  MachineInstr *postRAConvertToLEA(MachineFunction::iterator &MFI,
100  MachineBasicBlock::iterator &MBBI) const;
101 
102 public:
103  static char ID;
104 
105  StringRef getPassName() const override { return FIXUPLEA_DESC; }
106 
107  FixupLEAPass() : MachineFunctionPass(ID) {
109  }
110 
111  /// Loop over all of the basic blocks,
112  /// replacing instructions by equivalent LEA instructions
113  /// if needed and when possible.
114  bool runOnMachineFunction(MachineFunction &MF) override;
115 
116  // This pass runs after regalloc and doesn't support VReg operands.
117  MachineFunctionProperties getRequiredProperties() const override {
120  }
121 
122 private:
123  TargetSchedModel TSM;
124  MachineFunction *MF;
125  const X86InstrInfo *TII; // Machine instruction info.
126  bool OptIncDec;
127  bool OptLEA;
128 };
129 }
130 
131 char FixupLEAPass::ID = 0;
132 
133 INITIALIZE_PASS(FixupLEAPass, FIXUPLEA_NAME, FIXUPLEA_DESC, false, false)
134 
135 MachineInstr *
136 FixupLEAPass::postRAConvertToLEA(MachineFunction::iterator &MFI,
137  MachineBasicBlock::iterator &MBBI) const {
138  MachineInstr &MI = *MBBI;
139  switch (MI.getOpcode()) {
140  case X86::MOV32rr:
141  case X86::MOV64rr: {
142  const MachineOperand &Src = MI.getOperand(1);
143  const MachineOperand &Dest = MI.getOperand(0);
144  MachineInstr *NewMI =
145  BuildMI(*MF, MI.getDebugLoc(),
146  TII->get(MI.getOpcode() == X86::MOV32rr ? X86::LEA32r
147  : X86::LEA64r))
148  .add(Dest)
149  .add(Src)
150  .addImm(1)
151  .addReg(0)
152  .addImm(0)
153  .addReg(0);
154  MFI->insert(MBBI, NewMI); // Insert the new inst
155  return NewMI;
156  }
157  case X86::ADD64ri32:
158  case X86::ADD64ri8:
159  case X86::ADD64ri32_DB:
160  case X86::ADD64ri8_DB:
161  case X86::ADD32ri:
162  case X86::ADD32ri8:
163  case X86::ADD32ri_DB:
164  case X86::ADD32ri8_DB:
165  case X86::ADD16ri:
166  case X86::ADD16ri8:
167  case X86::ADD16ri_DB:
168  case X86::ADD16ri8_DB:
169  if (!MI.getOperand(2).isImm()) {
170  // convertToThreeAddress will call getImm()
171  // which requires isImm() to be true
172  return nullptr;
173  }
174  break;
175  case X86::ADD16rr:
176  case X86::ADD16rr_DB:
177  if (MI.getOperand(1).getReg() != MI.getOperand(2).getReg()) {
178  // if src1 != src2, then convertToThreeAddress will
179  // need to create a Virtual register, which we cannot do
180  // after register allocation.
181  return nullptr;
182  }
183  }
184  return TII->convertToThreeAddress(MFI, MI, nullptr);
185 }
186 
187 FunctionPass *llvm::createX86FixupLEAs() { return new FixupLEAPass(); }
188 
189 bool FixupLEAPass::runOnMachineFunction(MachineFunction &Func) {
190  if (skipFunction(Func.getFunction()))
191  return false;
192 
193  MF = &Func;
194  const X86Subtarget &ST = Func.getSubtarget<X86Subtarget>();
195  bool IsSlowLEA = ST.slowLEA();
196  bool IsSlow3OpsLEA = ST.slow3OpsLEA();
197 
198  OptIncDec = !ST.slowIncDec() || Func.getFunction().optForMinSize();
199  OptLEA = ST.LEAusesAG() || IsSlowLEA || IsSlow3OpsLEA;
200 
201  if (!OptLEA && !OptIncDec)
202  return false;
203 
204  TSM.init(&Func.getSubtarget());
205  TII = ST.getInstrInfo();
206 
207  LLVM_DEBUG(dbgs() << "Start X86FixupLEAs\n";);
208  // Process all basic blocks.
209  for (MachineFunction::iterator I = Func.begin(), E = Func.end(); I != E; ++I)
210  processBasicBlock(Func, I, IsSlowLEA, IsSlow3OpsLEA);
211  LLVM_DEBUG(dbgs() << "End X86FixupLEAs\n";);
212 
213  return true;
214 }
215 
216 FixupLEAPass::RegUsageState
217 FixupLEAPass::usesRegister(MachineOperand &p, MachineBasicBlock::iterator I) {
218  RegUsageState RegUsage = RU_NotUsed;
219  MachineInstr &MI = *I;
220 
221  for (unsigned int i = 0; i < MI.getNumOperands(); ++i) {
222  MachineOperand &opnd = MI.getOperand(i);
223  if (opnd.isReg() && opnd.getReg() == p.getReg()) {
224  if (opnd.isDef())
225  return RU_Write;
226  RegUsage = RU_Read;
227  }
228  }
229  return RegUsage;
230 }
231 
232 /// getPreviousInstr - Given a reference to an instruction in a basic
233 /// block, return a reference to the previous instruction in the block,
234 /// wrapping around to the last instruction of the block if the block
235 /// branches to itself.
238  if (I == MFI->begin()) {
239  if (MFI->isPredecessor(&*MFI)) {
240  I = --MFI->end();
241  return true;
242  } else
243  return false;
244  }
245  --I;
246  return true;
247 }
248 
250 FixupLEAPass::searchBackwards(MachineOperand &p, MachineBasicBlock::iterator &I,
252  int InstrDistance = 1;
254  static const int INSTR_DISTANCE_THRESHOLD = 5;
255 
256  CurInst = I;
257  bool Found;
258  Found = getPreviousInstr(CurInst, MFI);
259  while (Found && I != CurInst) {
260  if (CurInst->isCall() || CurInst->isInlineAsm())
261  break;
262  if (InstrDistance > INSTR_DISTANCE_THRESHOLD)
263  break; // too far back to make a difference
264  if (usesRegister(p, CurInst) == RU_Write) {
265  return CurInst;
266  }
267  InstrDistance += TSM.computeInstrLatency(&*CurInst);
268  Found = getPreviousInstr(CurInst, MFI);
269  }
271 }
272 
273 static inline bool isLEA(const int Opcode) {
274  return Opcode == X86::LEA16r || Opcode == X86::LEA32r ||
275  Opcode == X86::LEA64r || Opcode == X86::LEA64_32r;
276 }
277 
278 static inline bool isInefficientLEAReg(unsigned int Reg) {
279  return Reg == X86::EBP || Reg == X86::RBP ||
280  Reg == X86::R13D || Reg == X86::R13;
281 }
282 
283 static inline bool isRegOperand(const MachineOperand &Op) {
284  return Op.isReg() && Op.getReg() != X86::NoRegister;
285 }
286 
287 /// Returns true if this LEA uses base an index registers, and the base register
288 /// is known to be inefficient for the subtarget.
289 // TODO: use a variant scheduling class to model the latency profile
290 // of LEA instructions, and implement this logic as a scheduling predicate.
291 static inline bool hasInefficientLEABaseReg(const MachineOperand &Base,
292  const MachineOperand &Index) {
293  return Base.isReg() && isInefficientLEAReg(Base.getReg()) &&
294  isRegOperand(Index);
295 }
296 
297 static inline bool hasLEAOffset(const MachineOperand &Offset) {
298  return (Offset.isImm() && Offset.getImm() != 0) || Offset.isGlobal();
299 }
300 
301 static inline int getADDrrFromLEA(int LEAOpcode) {
302  switch (LEAOpcode) {
303  default:
304  llvm_unreachable("Unexpected LEA instruction");
305  case X86::LEA16r:
306  return X86::ADD16rr;
307  case X86::LEA32r:
308  return X86::ADD32rr;
309  case X86::LEA64_32r:
310  case X86::LEA64r:
311  return X86::ADD64rr;
312  }
313 }
314 
315 static inline int getADDriFromLEA(int LEAOpcode, const MachineOperand &Offset) {
316  bool IsInt8 = Offset.isImm() && isInt<8>(Offset.getImm());
317  switch (LEAOpcode) {
318  default:
319  llvm_unreachable("Unexpected LEA instruction");
320  case X86::LEA16r:
321  return IsInt8 ? X86::ADD16ri8 : X86::ADD16ri;
322  case X86::LEA32r:
323  case X86::LEA64_32r:
324  return IsInt8 ? X86::ADD32ri8 : X86::ADD32ri;
325  case X86::LEA64r:
326  return IsInt8 ? X86::ADD64ri8 : X86::ADD64ri32;
327  }
328 }
329 
330 /// isLEASimpleIncOrDec - Does this LEA have one these forms:
331 /// lea %reg, 1(%reg)
332 /// lea %reg, -1(%reg)
333 static inline bool isLEASimpleIncOrDec(MachineInstr &LEA) {
334  unsigned SrcReg = LEA.getOperand(1 + X86::AddrBaseReg).getReg();
335  unsigned DstReg = LEA.getOperand(0).getReg();
336  unsigned AddrDispOp = 1 + X86::AddrDisp;
337  return SrcReg == DstReg &&
338  LEA.getOperand(1 + X86::AddrIndexReg).getReg() == 0 &&
339  LEA.getOperand(1 + X86::AddrSegmentReg).getReg() == 0 &&
340  LEA.getOperand(AddrDispOp).isImm() &&
341  (LEA.getOperand(AddrDispOp).getImm() == 1 ||
342  LEA.getOperand(AddrDispOp).getImm() == -1);
343 }
344 
345 bool FixupLEAPass::fixupIncDec(MachineBasicBlock::iterator &I,
346  MachineFunction::iterator MFI) const {
347  MachineInstr &MI = *I;
348  int Opcode = MI.getOpcode();
349  if (!isLEA(Opcode))
350  return false;
351 
352  if (isLEASimpleIncOrDec(MI) && TII->isSafeToClobberEFLAGS(*MFI, I)) {
353  int NewOpcode;
354  bool isINC = MI.getOperand(4).getImm() == 1;
355  switch (Opcode) {
356  case X86::LEA16r:
357  NewOpcode = isINC ? X86::INC16r : X86::DEC16r;
358  break;
359  case X86::LEA32r:
360  case X86::LEA64_32r:
361  NewOpcode = isINC ? X86::INC32r : X86::DEC32r;
362  break;
363  case X86::LEA64r:
364  NewOpcode = isINC ? X86::INC64r : X86::DEC64r;
365  break;
366  }
367 
368  MachineInstr *NewMI =
369  BuildMI(*MFI, I, MI.getDebugLoc(), TII->get(NewOpcode))
370  .add(MI.getOperand(0))
371  .add(MI.getOperand(1));
372  MFI->erase(I);
373  I = static_cast<MachineBasicBlock::iterator>(NewMI);
374  return true;
375  }
376  return false;
377 }
378 
379 void FixupLEAPass::processInstruction(MachineBasicBlock::iterator &I,
381  // Process a load, store, or LEA instruction.
382  MachineInstr &MI = *I;
383  const MCInstrDesc &Desc = MI.getDesc();
384  int AddrOffset = X86II::getMemoryOperandNo(Desc.TSFlags);
385  if (AddrOffset >= 0) {
386  AddrOffset += X86II::getOperandBias(Desc);
387  MachineOperand &p = MI.getOperand(AddrOffset + X86::AddrBaseReg);
388  if (p.isReg() && p.getReg() != X86::ESP) {
389  seekLEAFixup(p, I, MFI);
390  }
391  MachineOperand &q = MI.getOperand(AddrOffset + X86::AddrIndexReg);
392  if (q.isReg() && q.getReg() != X86::ESP) {
393  seekLEAFixup(q, I, MFI);
394  }
395  }
396 }
397 
398 void FixupLEAPass::seekLEAFixup(MachineOperand &p,
401  MachineBasicBlock::iterator MBI = searchBackwards(p, I, MFI);
402  if (MBI != MachineBasicBlock::iterator()) {
403  MachineInstr *NewMI = postRAConvertToLEA(MFI, MBI);
404  if (NewMI) {
405  ++NumLEAs;
406  LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MBI->dump(););
407  // now to replace with an equivalent LEA...
408  LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: "; NewMI->dump(););
409  MFI->erase(MBI);
411  static_cast<MachineBasicBlock::iterator>(NewMI);
412  processInstruction(J, MFI);
413  }
414  }
415 }
416 
417 void FixupLEAPass::processInstructionForSlowLEA(MachineBasicBlock::iterator &I,
419  MachineInstr &MI = *I;
420  const int Opcode = MI.getOpcode();
421  if (!isLEA(Opcode))
422  return;
423  if (MI.getOperand(5).getReg() != 0 || !MI.getOperand(4).isImm() ||
424  !TII->isSafeToClobberEFLAGS(*MFI, I))
425  return;
426  const unsigned DstR = MI.getOperand(0).getReg();
427  const unsigned SrcR1 = MI.getOperand(1).getReg();
428  const unsigned SrcR2 = MI.getOperand(3).getReg();
429  if ((SrcR1 == 0 || SrcR1 != DstR) && (SrcR2 == 0 || SrcR2 != DstR))
430  return;
431  if (MI.getOperand(2).getImm() > 1)
432  return;
433  LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; I->dump(););
434  LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: ";);
435  MachineInstr *NewMI = nullptr;
436  // Make ADD instruction for two registers writing to LEA's destination
437  if (SrcR1 != 0 && SrcR2 != 0) {
438  const MCInstrDesc &ADDrr = TII->get(getADDrrFromLEA(Opcode));
439  const MachineOperand &Src = MI.getOperand(SrcR1 == DstR ? 3 : 1);
440  NewMI =
441  BuildMI(*MFI, I, MI.getDebugLoc(), ADDrr, DstR).addReg(DstR).add(Src);
442  LLVM_DEBUG(NewMI->dump(););
443  }
444  // Make ADD instruction for immediate
445  if (MI.getOperand(4).getImm() != 0) {
446  const MCInstrDesc &ADDri =
447  TII->get(getADDriFromLEA(Opcode, MI.getOperand(4)));
448  const MachineOperand &SrcR = MI.getOperand(SrcR1 == DstR ? 1 : 3);
449  NewMI = BuildMI(*MFI, I, MI.getDebugLoc(), ADDri, DstR)
450  .add(SrcR)
451  .addImm(MI.getOperand(4).getImm());
452  LLVM_DEBUG(NewMI->dump(););
453  }
454  if (NewMI) {
455  MFI->erase(I);
456  I = NewMI;
457  }
458 }
459 
460 MachineInstr *
461 FixupLEAPass::processInstrForSlow3OpLEA(MachineInstr &MI,
463 
464  const int LEAOpcode = MI.getOpcode();
465  if (!isLEA(LEAOpcode))
466  return nullptr;
467 
468  const MachineOperand &Dst = MI.getOperand(0);
469  const MachineOperand &Base = MI.getOperand(1);
470  const MachineOperand &Scale = MI.getOperand(2);
471  const MachineOperand &Index = MI.getOperand(3);
472  const MachineOperand &Offset = MI.getOperand(4);
473  const MachineOperand &Segment = MI.getOperand(5);
474 
475  if (!(TII->isThreeOperandsLEA(MI) ||
476  hasInefficientLEABaseReg(Base, Index)) ||
477  !TII->isSafeToClobberEFLAGS(*MFI, MI) ||
478  Segment.getReg() != X86::NoRegister)
479  return nullptr;
480 
481  unsigned int DstR = Dst.getReg();
482  unsigned int BaseR = Base.getReg();
483  unsigned int IndexR = Index.getReg();
484  unsigned SSDstR =
485  (LEAOpcode == X86::LEA64_32r) ? getX86SubSuperRegister(DstR, 64) : DstR;
486  bool IsScale1 = Scale.getImm() == 1;
487  bool IsInefficientBase = isInefficientLEAReg(BaseR);
488  bool IsInefficientIndex = isInefficientLEAReg(IndexR);
489 
490  // Skip these cases since it takes more than 2 instructions
491  // to replace the LEA instruction.
492  if (IsInefficientBase && SSDstR == BaseR && !IsScale1)
493  return nullptr;
494  if (LEAOpcode == X86::LEA64_32r && IsInefficientBase &&
495  (IsInefficientIndex || !IsScale1))
496  return nullptr;
497 
498  const DebugLoc DL = MI.getDebugLoc();
499  const MCInstrDesc &ADDrr = TII->get(getADDrrFromLEA(LEAOpcode));
500  const MCInstrDesc &ADDri = TII->get(getADDriFromLEA(LEAOpcode, Offset));
501 
502  LLVM_DEBUG(dbgs() << "FixLEA: Candidate to replace:"; MI.dump(););
503  LLVM_DEBUG(dbgs() << "FixLEA: Replaced by: ";);
504 
505  // First try to replace LEA with one or two (for the 3-op LEA case)
506  // add instructions:
507  // 1.lea (%base,%index,1), %base => add %index,%base
508  // 2.lea (%base,%index,1), %index => add %base,%index
509  if (IsScale1 && (DstR == BaseR || DstR == IndexR)) {
510  const MachineOperand &Src = DstR == BaseR ? Index : Base;
511  MachineInstr *NewMI =
512  BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Src);
513  LLVM_DEBUG(NewMI->dump(););
514  // Create ADD instruction for the Offset in case of 3-Ops LEA.
515  if (hasLEAOffset(Offset)) {
516  NewMI = BuildMI(*MFI, MI, DL, ADDri, DstR).addReg(DstR).add(Offset);
517  LLVM_DEBUG(NewMI->dump(););
518  }
519  return NewMI;
520  }
521  // If the base is inefficient try switching the index and base operands,
522  // otherwise just break the 3-Ops LEA inst into 2-Ops LEA + ADD instruction:
523  // lea offset(%base,%index,scale),%dst =>
524  // lea (%base,%index,scale); add offset,%dst
525  if (!IsInefficientBase || (!IsInefficientIndex && IsScale1)) {
526  MachineInstr *NewMI = BuildMI(*MFI, MI, DL, TII->get(LEAOpcode))
527  .add(Dst)
528  .add(IsInefficientBase ? Index : Base)
529  .add(Scale)
530  .add(IsInefficientBase ? Base : Index)
531  .addImm(0)
532  .add(Segment);
533  LLVM_DEBUG(NewMI->dump(););
534  // Create ADD instruction for the Offset in case of 3-Ops LEA.
535  if (hasLEAOffset(Offset)) {
536  NewMI = BuildMI(*MFI, MI, DL, ADDri, DstR).addReg(DstR).add(Offset);
537  LLVM_DEBUG(NewMI->dump(););
538  }
539  return NewMI;
540  }
541  // Handle the rest of the cases with inefficient base register:
542  assert(SSDstR != BaseR && "SSDstR == BaseR should be handled already!");
543  assert(IsInefficientBase && "efficient base should be handled already!");
544 
545  // lea (%base,%index,1), %dst => mov %base,%dst; add %index,%dst
546  if (IsScale1 && !hasLEAOffset(Offset)) {
547  bool BIK = Base.isKill() && BaseR != IndexR;
548  TII->copyPhysReg(*MFI, MI, DL, DstR, BaseR, BIK);
549  LLVM_DEBUG(MI.getPrevNode()->dump(););
550 
551  MachineInstr *NewMI =
552  BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Index);
553  LLVM_DEBUG(NewMI->dump(););
554  return NewMI;
555  }
556  // lea offset(%base,%index,scale), %dst =>
557  // lea offset( ,%index,scale), %dst; add %base,%dst
558  MachineInstr *NewMI = BuildMI(*MFI, MI, DL, TII->get(LEAOpcode))
559  .add(Dst)
560  .addReg(0)
561  .add(Scale)
562  .add(Index)
563  .add(Offset)
564  .add(Segment);
565  LLVM_DEBUG(NewMI->dump(););
566 
567  NewMI = BuildMI(*MFI, MI, DL, ADDrr, DstR).addReg(DstR).add(Base);
568  LLVM_DEBUG(NewMI->dump(););
569  return NewMI;
570 }
571 
572 bool FixupLEAPass::processBasicBlock(MachineFunction &MF,
574  bool IsSlowLEA, bool IsSlow3OpsLEA) {
575  for (MachineBasicBlock::iterator I = MFI->begin(); I != MFI->end(); ++I) {
576  if (OptIncDec)
577  if (fixupIncDec(I, MFI))
578  continue;
579 
580  if (OptLEA) {
581  if (IsSlowLEA) {
582  processInstructionForSlowLEA(I, MFI);
583  continue;
584  }
585 
586  if (IsSlow3OpsLEA) {
587  if (auto *NewMI = processInstrForSlow3OpLEA(*I, MFI)) {
588  MFI->erase(I);
589  I = NewMI;
590  }
591  continue;
592  }
593 
594  processInstruction(I, MFI);
595  }
596  }
597  return false;
598 }
const MachineInstrBuilder & add(const MachineOperand &MO) const
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
static bool isRegOperand(const MachineOperand &Op)
This class represents lattice values for constants.
Definition: AllocatorList.h:24
static bool isLEASimpleIncOrDec(MachineInstr &LEA)
isLEASimpleIncOrDec - Does this LEA have one these forms: lea reg, 1(reg) lea reg, -1(reg)
const X86InstrInfo * getInstrInfo() const override
Definition: X86Subtarget.h:481
bool slowLEA() const
Definition: X86Subtarget.h:648
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:164
unsigned getReg() const
getReg - Returns the register number.
unsigned Reg
static bool getPreviousInstr(MachineBasicBlock::iterator &I, MachineFunction::iterator MFI)
getPreviousInstr - Given a reference to an instruction in a basic block, return a reference to the pr...
constexpr bool isInt< 8 >(int64_t x)
Definition: MathExtras.h:303
STATISTIC(NumFunctions, "Total number of functions")
A debug info location.
Definition: DebugLoc.h:34
static bool isLEA(const int Opcode)
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
static int getADDrrFromLEA(int LEAOpcode)
static bool hasInefficientLEABaseReg(const MachineOperand &Base, const MachineOperand &Index)
Returns true if this LEA uses base an index registers, and the base register is known to be inefficie...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
Provide an instruction scheduling machine model to CodeGen passes.
const HexagonInstrInfo * TII
AddrSegmentReg - The operand # of the segment in the memory operand.
Definition: X86BaseInfo.h:39
unsigned getNumOperands() const
Retuns the total number of operands.
Definition: MachineInstr.h:412
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:409
bool slow3OpsLEA() const
Definition: X86Subtarget.h:649
const MCInstrDesc & getDesc() const
Returns the target instruction descriptor of this MachineInstr.
Definition: MachineInstr.h:406
FunctionPass * createX86FixupLEAs()
Return a pass that selectively replaces certain instructions (like add, sub, inc, dec...
static int getADDriFromLEA(int LEAOpcode, const MachineOperand &Offset)
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
MachineInstrBundleIterator< MachineInstr > iterator
static bool isInefficientLEAReg(unsigned int Reg)
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
void initializeFixupLEAPassPass(PassRegistry &)
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
unsigned getOperandBias(const MCInstrDesc &Desc)
getOperandBias - compute whether all of the def operands are repeated in the uses and therefore shoul...
Definition: X86BaseInfo.h:658
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
bool slowIncDec() const
Definition: X86Subtarget.h:650
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:34
#define FIXUPLEA_DESC
Iterator for intrusive lists based on ilist_node.
bool LEAusesAG() const
Definition: X86Subtarget.h:647
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperand class - Representation of each machine instruction operand.
int64_t getImm() const
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
Definition: Debug.cpp:133
unsigned getX86SubSuperRegister(unsigned, unsigned, bool High=false)
Returns the sub or super register of a specific X86 register.
#define FIXUPLEA_NAME
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override
Emit instructions to copy a pair of physical registers.
MachineFunctionProperties & set(Property P)
Representation of each machine instruction.
Definition: MachineInstr.h:64
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
static bool hasLEAOffset(const MachineOperand &Offset)
#define I(x, y, z)
Definition: MD5.cpp:58
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:595
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
#define LLVM_DEBUG(X)
Definition: Debug.h:123
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414
Properties which a MachineFunction may have at a given point in time.
int getMemoryOperandNo(uint64_t TSFlags)
getMemoryOperandNo - The function returns the MCInst operand # for the first field of the memory oper...
Definition: X86BaseInfo.h:699