LLVM  4.0.0
X86RegisterInfo.cpp
Go to the documentation of this file.
1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
12 // on X86.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "X86RegisterInfo.h"
17 #include "X86FrameLowering.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/ADT/BitVector.h"
23 #include "llvm/ADT/STLExtras.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/MC/MCAsmInfo.h"
40 
41 using namespace llvm;
42 
43 #define GET_REGINFO_TARGET_DESC
44 #include "X86GenRegisterInfo.inc"
45 
46 static cl::opt<bool>
47 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
48  cl::desc("Enable use of a base pointer for complex stack frames"));
49 
51  : X86GenRegisterInfo((TT.isArch64Bit() ? X86::RIP : X86::EIP),
52  X86_MC::getDwarfRegFlavour(TT, false),
53  X86_MC::getDwarfRegFlavour(TT, true),
54  (TT.isArch64Bit() ? X86::RIP : X86::EIP)) {
56 
57  // Cache some information.
58  Is64Bit = TT.isArch64Bit();
59  IsWin64 = Is64Bit && TT.isOSWindows();
60 
61  // Use a callee-saved register as the base pointer. These registers must
62  // not conflict with any ABI requirements. For example, in 32-bit mode PIC
63  // requires GOT in the EBX register before function calls via PLT GOT pointer.
64  if (Is64Bit) {
65  SlotSize = 8;
66  // This matches the simplified 32-bit pointer code in the data layout
67  // computation.
68  // FIXME: Should use the data layout?
69  bool Use64BitReg = TT.getEnvironment() != Triple::GNUX32;
70  StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
71  FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
72  BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
73  } else {
74  SlotSize = 4;
75  StackPtr = X86::ESP;
76  FramePtr = X86::EBP;
77  BasePtr = X86::ESI;
78  }
79 }
80 
81 bool
83  // ExeDepsFixer and PostRAScheduler require liveness.
84  return true;
85 }
86 
87 int
89  return getEncodingValue(i);
90 }
91 
92 const TargetRegisterClass *
94  unsigned Idx) const {
95  // The sub_8bit sub-register index is more constrained in 32-bit mode.
96  // It behaves just like the sub_8bit_hi index.
97  if (!Is64Bit && Idx == X86::sub_8bit)
98  Idx = X86::sub_8bit_hi;
99 
100  // Forward to TableGen's default version.
101  return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
102 }
103 
104 const TargetRegisterClass *
106  const TargetRegisterClass *B,
107  unsigned SubIdx) const {
108  // The sub_8bit sub-register index is more constrained in 32-bit mode.
109  if (!Is64Bit && SubIdx == X86::sub_8bit) {
110  A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
111  if (!A)
112  return nullptr;
113  }
114  return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
115 }
116 
117 const TargetRegisterClass *
119  const MachineFunction &MF) const {
120  // Don't allow super-classes of GR8_NOREX. This class is only used after
121  // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
122  // to the full GR8 register class in 64-bit mode, so we cannot allow the
123  // reigster class inflation.
124  //
125  // The GR8_NOREX class is always used in a way that won't be constrained to a
126  // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
127  // full GR8 class.
128  if (RC == &X86::GR8_NOREXRegClass)
129  return RC;
130 
131  const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
132 
133  const TargetRegisterClass *Super = RC;
135  do {
136  switch (Super->getID()) {
137  case X86::FR32RegClassID:
138  case X86::FR64RegClassID:
139  // If AVX-512 isn't supported we should only inflate to these classes.
140  if (!Subtarget.hasAVX512() && Super->getSize() == RC->getSize())
141  return Super;
142  break;
143  case X86::VR128RegClassID:
144  case X86::VR256RegClassID:
145  // If VLX isn't supported we should only inflate to these classes.
146  if (!Subtarget.hasVLX() && Super->getSize() == RC->getSize())
147  return Super;
148  break;
149  case X86::VR128XRegClassID:
150  case X86::VR256XRegClassID:
151  // If VLX isn't support we shouldn't inflate to these classes.
152  if (Subtarget.hasVLX() && Super->getSize() == RC->getSize())
153  return Super;
154  break;
155  case X86::FR32XRegClassID:
156  case X86::FR64XRegClassID:
157  // If AVX-512 isn't support we shouldn't inflate to these classes.
158  if (Subtarget.hasAVX512() && Super->getSize() == RC->getSize())
159  return Super;
160  break;
161  case X86::GR8RegClassID:
162  case X86::GR16RegClassID:
163  case X86::GR32RegClassID:
164  case X86::GR64RegClassID:
165  case X86::RFP32RegClassID:
166  case X86::RFP64RegClassID:
167  case X86::RFP80RegClassID:
168  case X86::VR512RegClassID:
169  // Don't return a super-class that would shrink the spill size.
170  // That can happen with the vector and float classes.
171  if (Super->getSize() == RC->getSize())
172  return Super;
173  }
174  Super = *I++;
175  } while (Super);
176  return RC;
177 }
178 
179 const TargetRegisterClass *
181  unsigned Kind) const {
182  const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
183  switch (Kind) {
184  default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
185  case 0: // Normal GPRs.
186  if (Subtarget.isTarget64BitLP64())
187  return &X86::GR64RegClass;
188  // If the target is 64bit but we have been told to use 32bit addresses,
189  // we can still use 64-bit register as long as we know the high bits
190  // are zeros.
191  // Reflect that in the returned register class.
192  if (Is64Bit) {
193  // When the target also allows 64-bit frame pointer and we do have a
194  // frame, this is fine to use it for the address accesses as well.
195  const X86FrameLowering *TFI = getFrameLowering(MF);
196  return TFI->hasFP(MF) && TFI->Uses64BitFramePtr
197  ? &X86::LOW32_ADDR_ACCESS_RBPRegClass
198  : &X86::LOW32_ADDR_ACCESSRegClass;
199  }
200  return &X86::GR32RegClass;
201  case 1: // Normal GPRs except the stack pointer (for encoding reasons).
202  if (Subtarget.isTarget64BitLP64())
203  return &X86::GR64_NOSPRegClass;
204  // NOSP does not contain RIP, so no special case here.
205  return &X86::GR32_NOSPRegClass;
206  case 2: // NOREX GPRs.
207  if (Subtarget.isTarget64BitLP64())
208  return &X86::GR64_NOREXRegClass;
209  return &X86::GR32_NOREXRegClass;
210  case 3: // NOREX GPRs except the stack pointer (for encoding reasons).
211  if (Subtarget.isTarget64BitLP64())
212  return &X86::GR64_NOREX_NOSPRegClass;
213  // NOSP does not contain RIP, so no special case here.
214  return &X86::GR32_NOREX_NOSPRegClass;
215  case 4: // Available for tailcall (not callee-saved GPRs).
216  return getGPRsForTailCall(MF);
217  }
218 }
219 
220 const TargetRegisterClass *
222  const Function *F = MF.getFunction();
223  if (IsWin64 || (F && F->getCallingConv() == CallingConv::X86_64_Win64))
224  return &X86::GR64_TCW64RegClass;
225  else if (Is64Bit)
226  return &X86::GR64_TCRegClass;
227 
228  bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
229  if (hasHipeCC)
230  return &X86::GR32RegClass;
231  return &X86::GR32_TCRegClass;
232 }
233 
234 const TargetRegisterClass *
236  if (RC == &X86::CCRRegClass) {
237  if (Is64Bit)
238  return &X86::GR64RegClass;
239  else
240  return &X86::GR32RegClass;
241  }
242  return RC;
243 }
244 
245 unsigned
247  MachineFunction &MF) const {
248  const X86FrameLowering *TFI = getFrameLowering(MF);
249 
250  unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
251  switch (RC->getID()) {
252  default:
253  return 0;
254  case X86::GR32RegClassID:
255  return 4 - FPDiff;
256  case X86::GR64RegClassID:
257  return 12 - FPDiff;
258  case X86::VR128RegClassID:
259  return Is64Bit ? 10 : 4;
260  case X86::VR64RegClassID:
261  return 4;
262  }
263 }
264 
265 const MCPhysReg *
267  assert(MF && "MachineFunction required");
268 
269  const X86Subtarget &Subtarget = MF->getSubtarget<X86Subtarget>();
270  bool HasSSE = Subtarget.hasSSE1();
271  bool HasAVX = Subtarget.hasAVX();
272  bool HasAVX512 = Subtarget.hasAVX512();
273  bool CallsEHReturn = MF->callsEHReturn();
274 
275  switch (MF->getFunction()->getCallingConv()) {
276  case CallingConv::GHC:
277  case CallingConv::HiPE:
278  return CSR_NoRegs_SaveList;
279  case CallingConv::AnyReg:
280  if (HasAVX)
281  return CSR_64_AllRegs_AVX_SaveList;
282  return CSR_64_AllRegs_SaveList;
284  return CSR_64_RT_MostRegs_SaveList;
286  if (HasAVX)
287  return CSR_64_RT_AllRegs_AVX_SaveList;
288  return CSR_64_RT_AllRegs_SaveList;
290  if (Is64Bit)
291  return MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR() ?
292  CSR_64_CXX_TLS_Darwin_PE_SaveList : CSR_64_TLS_Darwin_SaveList;
293  break;
295  if (HasAVX512 && IsWin64)
296  return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
297  if (HasAVX512 && Is64Bit)
298  return CSR_64_Intel_OCL_BI_AVX512_SaveList;
299  if (HasAVX && IsWin64)
300  return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
301  if (HasAVX && Is64Bit)
302  return CSR_64_Intel_OCL_BI_AVX_SaveList;
303  if (!HasAVX && !IsWin64 && Is64Bit)
304  return CSR_64_Intel_OCL_BI_SaveList;
305  break;
306  }
307  case CallingConv::HHVM:
308  return CSR_64_HHVM_SaveList;
310  if (Is64Bit) {
311  if (IsWin64) {
312  return (HasSSE ? CSR_Win64_RegCall_SaveList :
313  CSR_Win64_RegCall_NoSSE_SaveList);
314  } else {
315  return (HasSSE ? CSR_SysV64_RegCall_SaveList :
316  CSR_SysV64_RegCall_NoSSE_SaveList);
317  }
318  } else {
319  return (HasSSE ? CSR_32_RegCall_SaveList :
320  CSR_32_RegCall_NoSSE_SaveList);
321  }
322  case CallingConv::Cold:
323  if (Is64Bit)
324  return CSR_64_MostRegs_SaveList;
325  break;
327  if (!HasSSE)
328  return CSR_Win64_NoSSE_SaveList;
329  return CSR_Win64_SaveList;
331  if (CallsEHReturn)
332  return CSR_64EHRet_SaveList;
333  return CSR_64_SaveList;
335  if (Is64Bit) {
336  if (HasAVX512)
337  return CSR_64_AllRegs_AVX512_SaveList;
338  if (HasAVX)
339  return CSR_64_AllRegs_AVX_SaveList;
340  return CSR_64_AllRegs_SaveList;
341  } else {
342  if (HasAVX512)
343  return CSR_32_AllRegs_AVX512_SaveList;
344  if (HasAVX)
345  return CSR_32_AllRegs_AVX_SaveList;
346  if (HasSSE)
347  return CSR_32_AllRegs_SSE_SaveList;
348  return CSR_32_AllRegs_SaveList;
349  }
350  default:
351  break;
352  }
353 
354  if (Is64Bit) {
355  if (IsWin64) {
356  if (!HasSSE)
357  return CSR_Win64_NoSSE_SaveList;
358  return CSR_Win64_SaveList;
359  }
360  if (CallsEHReturn)
361  return CSR_64EHRet_SaveList;
362  if (Subtarget.getTargetLowering()->supportSwiftError() &&
364  Attribute::SwiftError))
365  return CSR_64_SwiftError_SaveList;
366  return CSR_64_SaveList;
367  }
368  if (CallsEHReturn)
369  return CSR_32EHRet_SaveList;
370  return CSR_32_SaveList;
371 }
372 
374  const MachineFunction *MF) const {
375  assert(MF && "Invalid MachineFunction pointer.");
377  MF->getInfo<X86MachineFunctionInfo>()->isSplitCSR())
378  return CSR_64_CXX_TLS_Darwin_ViaCopy_SaveList;
379  return nullptr;
380 }
381 
382 const uint32_t *
384  CallingConv::ID CC) const {
385  const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
386  bool HasSSE = Subtarget.hasSSE1();
387  bool HasAVX = Subtarget.hasAVX();
388  bool HasAVX512 = Subtarget.hasAVX512();
389 
390  switch (CC) {
391  case CallingConv::GHC:
392  case CallingConv::HiPE:
393  return CSR_NoRegs_RegMask;
394  case CallingConv::AnyReg:
395  if (HasAVX)
396  return CSR_64_AllRegs_AVX_RegMask;
397  return CSR_64_AllRegs_RegMask;
399  return CSR_64_RT_MostRegs_RegMask;
401  if (HasAVX)
402  return CSR_64_RT_AllRegs_AVX_RegMask;
403  return CSR_64_RT_AllRegs_RegMask;
405  if (Is64Bit)
406  return CSR_64_TLS_Darwin_RegMask;
407  break;
409  if (HasAVX512 && IsWin64)
410  return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
411  if (HasAVX512 && Is64Bit)
412  return CSR_64_Intel_OCL_BI_AVX512_RegMask;
413  if (HasAVX && IsWin64)
414  return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
415  if (HasAVX && Is64Bit)
416  return CSR_64_Intel_OCL_BI_AVX_RegMask;
417  if (!HasAVX && !IsWin64 && Is64Bit)
418  return CSR_64_Intel_OCL_BI_RegMask;
419  break;
420  }
421  case CallingConv::HHVM:
422  return CSR_64_HHVM_RegMask;
424  if (Is64Bit) {
425  if (IsWin64) {
426  return (HasSSE ? CSR_Win64_RegCall_RegMask :
427  CSR_Win64_RegCall_NoSSE_RegMask);
428  } else {
429  return (HasSSE ? CSR_SysV64_RegCall_RegMask :
430  CSR_SysV64_RegCall_NoSSE_RegMask);
431  }
432  } else {
433  return (HasSSE ? CSR_32_RegCall_RegMask :
434  CSR_32_RegCall_NoSSE_RegMask);
435  }
436  case CallingConv::Cold:
437  if (Is64Bit)
438  return CSR_64_MostRegs_RegMask;
439  break;
441  return CSR_Win64_RegMask;
443  return CSR_64_RegMask;
445  if (Is64Bit) {
446  if (HasAVX512)
447  return CSR_64_AllRegs_AVX512_RegMask;
448  if (HasAVX)
449  return CSR_64_AllRegs_AVX_RegMask;
450  return CSR_64_AllRegs_RegMask;
451  } else {
452  if (HasAVX512)
453  return CSR_32_AllRegs_AVX512_RegMask;
454  if (HasAVX)
455  return CSR_32_AllRegs_AVX_RegMask;
456  if (HasSSE)
457  return CSR_32_AllRegs_SSE_RegMask;
458  return CSR_32_AllRegs_RegMask;
459  }
460  default:
461  break;
462  }
463 
464  // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
465  // callsEHReturn().
466  if (Is64Bit) {
467  if (IsWin64)
468  return CSR_Win64_RegMask;
469  if (Subtarget.getTargetLowering()->supportSwiftError() &&
471  Attribute::SwiftError))
472  return CSR_64_SwiftError_RegMask;
473  return CSR_64_RegMask;
474  }
475  return CSR_32_RegMask;
476 }
477 
478 const uint32_t*
480  return CSR_NoRegs_RegMask;
481 }
482 
484  return CSR_64_TLS_Darwin_RegMask;
485 }
486 
488  BitVector Reserved(getNumRegs());
489  const X86FrameLowering *TFI = getFrameLowering(MF);
490 
491  // Set the stack-pointer register and its aliases as reserved.
492  for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
493  ++I)
494  Reserved.set(*I);
495 
496  // Set the instruction pointer register and its aliases as reserved.
497  for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
498  ++I)
499  Reserved.set(*I);
500 
501  // Set the frame-pointer register and its aliases as reserved if needed.
502  if (TFI->hasFP(MF)) {
503  for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
504  ++I)
505  Reserved.set(*I);
506  }
507 
508  // Set the base-pointer register and its aliases as reserved if needed.
509  if (hasBasePointer(MF)) {
511  const uint32_t *RegMask = getCallPreservedMask(MF, CC);
514  "Stack realignment in presence of dynamic allocas is not supported with"
515  "this calling convention.");
516 
517  unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), 64);
518  for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
519  I.isValid(); ++I)
520  Reserved.set(*I);
521  }
522 
523  // Mark the segment registers as reserved.
524  Reserved.set(X86::CS);
525  Reserved.set(X86::SS);
526  Reserved.set(X86::DS);
527  Reserved.set(X86::ES);
528  Reserved.set(X86::FS);
529  Reserved.set(X86::GS);
530 
531  // Mark the floating point stack registers as reserved.
532  for (unsigned n = 0; n != 8; ++n)
533  Reserved.set(X86::ST0 + n);
534 
535  // Reserve the registers that only exist in 64-bit mode.
536  if (!Is64Bit) {
537  // These 8-bit registers are part of the x86-64 extension even though their
538  // super-registers are old 32-bits.
539  Reserved.set(X86::SIL);
540  Reserved.set(X86::DIL);
541  Reserved.set(X86::BPL);
542  Reserved.set(X86::SPL);
543 
544  for (unsigned n = 0; n != 8; ++n) {
545  // R8, R9, ...
546  for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
547  Reserved.set(*AI);
548 
549  // XMM8, XMM9, ...
550  for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
551  Reserved.set(*AI);
552  }
553  }
554  if (!Is64Bit || !MF.getSubtarget<X86Subtarget>().hasAVX512()) {
555  for (unsigned n = 16; n != 32; ++n) {
556  for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
557  Reserved.set(*AI);
558  }
559  }
560 
561  assert(checkAllSuperRegsMarked(Reserved,
562  {X86::SIL, X86::DIL, X86::BPL, X86::SPL}));
563  return Reserved;
564 }
565 
567  // Check if the EFLAGS register is marked as live-out. This shouldn't happen,
568  // because the calling convention defines the EFLAGS register as NOT
569  // preserved.
570  //
571  // Unfortunatelly the EFLAGS show up as live-out after branch folding. Adding
572  // an assert to track this and clear the register afterwards to avoid
573  // unnecessary crashes during release builds.
574  assert(!(Mask[X86::EFLAGS / 32] & (1U << (X86::EFLAGS % 32))) &&
575  "EFLAGS are not live-out from a patchpoint.");
576 
577  // Also clean other registers that don't need preserving (IP).
578  for (auto Reg : {X86::EFLAGS, X86::RIP, X86::EIP, X86::IP})
579  Mask[Reg / 32] &= ~(1U << (Reg % 32));
580 }
581 
582 //===----------------------------------------------------------------------===//
583 // Stack Frame Processing methods
584 //===----------------------------------------------------------------------===//
585 
586 static bool CantUseSP(const MachineFrameInfo &MFI) {
587  return MFI.hasVarSizedObjects() || MFI.hasOpaqueSPAdjustment();
588 }
589 
591  const MachineFrameInfo &MFI = MF.getFrameInfo();
592 
593  if (!EnableBasePointer)
594  return false;
595 
596  // When we need stack realignment, we can't address the stack from the frame
597  // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
598  // can't address variables from the stack pointer. MS inline asm can
599  // reference locals while also adjusting the stack pointer. When we can't
600  // use both the SP and the FP, we need a separate base pointer register.
601  bool CantUseFP = needsStackRealignment(MF);
602  return CantUseFP && CantUseSP(MFI);
603 }
604 
607  return false;
608 
609  const MachineFrameInfo &MFI = MF.getFrameInfo();
610  const MachineRegisterInfo *MRI = &MF.getRegInfo();
611 
612  // Stack realignment requires a frame pointer. If we already started
613  // register allocation with frame pointer elimination, it is too late now.
614  if (!MRI->canReserveReg(FramePtr))
615  return false;
616 
617  // If a base pointer is necessary. Check that it isn't too late to reserve
618  // it.
619  if (CantUseSP(MFI))
620  return MRI->canReserveReg(BasePtr);
621  return true;
622 }
623 
625  unsigned Reg, int &FrameIdx) const {
626  // Since X86 defines assignCalleeSavedSpillSlots which always return true
627  // this function neither used nor tested.
628  llvm_unreachable("Unused function on X86. Otherwise need a test case.");
629 }
630 
631 // tryOptimizeLEAtoMOV - helper function that tries to replace a LEA instruction
632 // of the form 'lea (%esp), %ebx' --> 'mov %esp, %ebx'.
633 // TODO: In this case we should be really trying first to entirely eliminate
634 // this instruction which is a plain copy.
636  MachineInstr &MI = *II;
637  unsigned Opc = II->getOpcode();
638  // Check if this is a LEA of the form 'lea (%esp), %ebx'
639  if ((Opc != X86::LEA32r && Opc != X86::LEA64r && Opc != X86::LEA64_32r) ||
640  MI.getOperand(2).getImm() != 1 ||
641  MI.getOperand(3).getReg() != X86::NoRegister ||
642  MI.getOperand(4).getImm() != 0 ||
643  MI.getOperand(5).getReg() != X86::NoRegister)
644  return false;
645  unsigned BasePtr = MI.getOperand(1).getReg();
646  // In X32 mode, ensure the base-pointer is a 32-bit operand, so the LEA will
647  // be replaced with a 32-bit operand MOV which will zero extend the upper
648  // 32-bits of the super register.
649  if (Opc == X86::LEA64_32r)
650  BasePtr = getX86SubSuperRegister(BasePtr, 32);
651  unsigned NewDestReg = MI.getOperand(0).getReg();
652  const X86InstrInfo *TII =
653  MI.getParent()->getParent()->getSubtarget<X86Subtarget>().getInstrInfo();
654  TII->copyPhysReg(*MI.getParent(), II, MI.getDebugLoc(), NewDestReg, BasePtr,
655  MI.getOperand(1).isKill());
656  MI.eraseFromParent();
657  return true;
658 }
659 
660 void
662  int SPAdj, unsigned FIOperandNum,
663  RegScavenger *RS) const {
664  MachineInstr &MI = *II;
665  MachineFunction &MF = *MI.getParent()->getParent();
666  const X86FrameLowering *TFI = getFrameLowering(MF);
667  int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
668  unsigned BasePtr;
669 
670  unsigned Opc = MI.getOpcode();
671  bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm ||
672  Opc == X86::TCRETURNmi || Opc == X86::TCRETURNmi64;
673 
674  if (hasBasePointer(MF))
675  BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
676  else if (needsStackRealignment(MF))
677  BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
678  else if (AfterFPPop)
679  BasePtr = StackPtr;
680  else
681  BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
682 
683  // LOCAL_ESCAPE uses a single offset, with no register. It only works in the
684  // simple FP case, and doesn't work with stack realignment. On 32-bit, the
685  // offset is from the traditional base pointer location. On 64-bit, the
686  // offset is from the SP at the end of the prologue, not the FP location. This
687  // matches the behavior of llvm.frameaddress.
688  unsigned IgnoredFrameReg;
689  if (Opc == TargetOpcode::LOCAL_ESCAPE) {
690  MachineOperand &FI = MI.getOperand(FIOperandNum);
691  int Offset;
692  Offset = TFI->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
693  FI.ChangeToImmediate(Offset);
694  return;
695  }
696 
697  // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
698  // register as source operand, semantic is the same and destination is
699  // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
700  // Don't change BasePtr since it is used later for stack adjustment.
701  unsigned MachineBasePtr = BasePtr;
702  if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
703  MachineBasePtr = getX86SubSuperRegister(BasePtr, 64);
704 
705  // This must be part of a four operand memory reference. Replace the
706  // FrameIndex with base register. Add an offset to the offset.
707  MI.getOperand(FIOperandNum).ChangeToRegister(MachineBasePtr, false);
708 
709  // Now add the frame object offset to the offset from EBP.
710  int FIOffset;
711  if (AfterFPPop) {
712  // Tail call jmp happens after FP is popped.
713  const MachineFrameInfo &MFI = MF.getFrameInfo();
714  FIOffset = MFI.getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
715  } else
716  FIOffset = TFI->getFrameIndexReference(MF, FrameIndex, IgnoredFrameReg);
717 
718  if (BasePtr == StackPtr)
719  FIOffset += SPAdj;
720 
721  // The frame index format for stackmaps and patchpoints is different from the
722  // X86 format. It only has a FI and an offset.
723  if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
724  assert(BasePtr == FramePtr && "Expected the FP as base register");
725  int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
726  MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
727  return;
728  }
729 
730  if (MI.getOperand(FIOperandNum+3).isImm()) {
731  // Offset is a 32-bit integer.
732  int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
733  int Offset = FIOffset + Imm;
734  assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
735  "Requesting 64-bit offset in 32-bit immediate!");
736  if (Offset != 0 || !tryOptimizeLEAtoMOV(II))
737  MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
738  } else {
739  // Offset is symbolic. This is extremely rare.
740  uint64_t Offset = FIOffset +
741  (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
742  MI.getOperand(FIOperandNum + 3).setOffset(Offset);
743  }
744 }
745 
747  const X86FrameLowering *TFI = getFrameLowering(MF);
748  return TFI->hasFP(MF) ? FramePtr : StackPtr;
749 }
750 
751 unsigned
753  const X86Subtarget &Subtarget = MF.getSubtarget<X86Subtarget>();
754  unsigned FrameReg = getFrameRegister(MF);
755  if (Subtarget.isTarget64BitILP32())
756  FrameReg = getX86SubSuperRegister(FrameReg, 32);
757  return FrameReg;
758 }
const TargetRegisterClass * getMatchingSuperRegClass(const TargetRegisterClass *A, const TargetRegisterClass *B, unsigned Idx) const override
getMatchingSuperRegClass - Return a subclass of the specified register class A so that each register ...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
BitVector & set()
Definition: BitVector.h:219
Intel_OCL_BI - Calling conventions for Intel OpenCL built-ins.
Definition: CallingConv.h:139
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, unsigned DestReg, unsigned SrcReg, bool KillSrc) const override
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
size_t i
unsigned getRegPressureLimit(const TargetRegisterClass *RC, MachineFunction &MF) const override
static bool tryOptimizeLEAtoMOV(MachineBasicBlock::iterator II)
void ChangeToRegister(unsigned Reg, bool isDef, bool isImp=false, bool isKill=false, bool isDead=false, bool isUndef=false, bool isDebug=false)
ChangeToRegister - Replace this operand with a new register operand of the specified value...
bool hasBasePointer(const MachineFunction &MF) const
bool callsEHReturn() const
Calling convention used by HipHop Virtual Machine (HHVM) to perform calls to and from translation cac...
Definition: CallingConv.h:159
unsigned getID() const
Return the register class ID number.
uint16_t MCPhysReg
An unsigned integer type large enough to represent all physical registers, but not necessarily virtua...
bool hasVLX() const
Definition: X86Subtarget.h:481
const Function * getFunction() const
getFunction - Return the LLVM function that this machine code represents
unsigned getDwarfRegFlavour(const Triple &TT, bool isEH)
bool canReserveReg(unsigned PhysReg) const
canReserveReg - Returns true if PhysReg can be used as a reserved register.
return AArch64::GPR64RegClass contains(Reg)
CallingConv::ID getCallingConv() const
getCallingConv()/setCallingConv(CC) - These method get and set the calling convention of this functio...
Definition: Function.h:165
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:540
X86MachineFunctionInfo - This class is derived from MachineFunction and contains private X86 target-s...
bool Uses64BitFramePtr
True if the 64-bit frame or stack pointer should be used.
unsigned getSize() const
Return the size of the register in bytes, which is also the size of a stack slot allocated to hold a ...
X86RegisterInfo(const Triple &TT)
const TargetRegisterClass * getPointerRegClass(const MachineFunction &MF, unsigned Kind=0) const override
getPointerRegClass - Returns a TargetRegisterClass used for pointer values.
bool supportSwiftError() const override
Return true if the target supports swifterror attribute.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
const HexagonInstrInfo * TII
const uint32_t * getDarwinTLSCallPreservedMask() const
X86_INTR - x86 hardware interrupt context.
Definition: CallingConv.h:169
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
Reg
All possible values of the reg field in the ModR/M byte.
Register calling convention used for parameters transfer optimization.
Definition: CallingConv.h:197
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted...
bool isArch64Bit() const
Test whether the architecture is 64-bit.
Definition: Triple.cpp:1202
bool hasFP(const MachineFunction &MF) const override
hasFP - Return true if the specified function should have a dedicated frame pointer register...
#define F(x, y, z)
Definition: MD5.cpp:51
bool isKill() const
static cl::opt< bool > EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true), cl::desc("Enable use of a base pointer for complex stack frames"))
Function Alias Analysis false
static GCRegistry::Add< OcamlGC > B("ocaml","ocaml 3.10-compatible GC")
BitVector getReservedRegs(const MachineFunction &MF) const override
getReservedRegs - Returns a bitset indexed by physical register number indicating if a register is a ...
int64_t getImm() const
void adjustStackMapLiveOutMask(uint32_t *Mask) const override
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:273
void ChangeToImmediate(int64_t ImmVal)
ChangeToImmediate - Replace this operand with a new immediate operand of the specified value...
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:131
const X86TargetLowering * getTargetLowering() const override
Definition: X86Subtarget.h:341
const TargetRegisterClass * getLargestLegalSuperClass(const TargetRegisterClass *RC, const MachineFunction &MF) const override
bool hasReservedSpillSlot(const MachineFunction &MF, unsigned Reg, int &FrameIdx) const override
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:395
unsigned const MachineRegisterInfo * MRI
bool trackLivenessAfterRegAlloc(const MachineFunction &MF) const override
Code Generation virtual methods...
const uint32_t * getNoPreservedMask() const override
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
This file contains the declarations for the subclasses of Constant, which represent the different fla...
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:279
MCRegAliasIterator enumerates all registers aliasing Reg.
uint32_t Offset
const uint32_t * getCallPreservedMask(const MachineFunction &MF, CallingConv::ID) const override
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
const TargetRegisterClass * getGPRsForTailCall(const MachineFunction &MF) const
getGPRsForTailCall - Returns a register class with registers that can be used in forming tail calls...
int64_t getOffset() const
Return the offset from the symbol in this operand.
void setOffset(int64_t Offset)
const TargetRegisterClass *const * sc_iterator
MCSubRegIterator enumerates all sub-registers of Reg.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned getBaseRegister() const
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:44
int64_t getObjectOffset(int ObjectIdx) const
Return the assigned stack offset of the specified object from the incoming stack pointer.
void eliminateFrameIndex(MachineBasicBlock::iterator MI, int SPAdj, unsigned FIOperandNum, RegScavenger *RS=nullptr) const override
constexpr bool isInt< 32 >(int64_t x)
Definition: MathExtras.h:274
void initLLVMToSEHAndCVRegMapping(MCRegisterInfo *MRI)
bool isTarget64BitILP32() const
Is this x86_64 with the ILP32 programming model (x32 ABI)?
Definition: X86Subtarget.h:394
MachineOperand class - Representation of each machine instruction operand.
int getOffsetOfLocalArea() const
getOffsetOfLocalArea - This method returns the offset of the local area from the stack pointer on ent...
int getSEHRegNum(unsigned i) const
const MCPhysReg * getCalleeSavedRegs(const MachineFunction *MF) const override
getCalleeSavedRegs - Return a null-terminated list of all of the callee-save registers on this target...
const TargetRegisterClass * getCrossCopyRegClass(const TargetRegisterClass *RC) const override
getCrossCopyRegClass - Returns a legal register class to copy a register in the specified class to or...
unsigned getX86SubSuperRegister(unsigned, unsigned, bool High=false)
Returns the sub or super register of a specific X86 register.
AttributeSet getAttributes() const
Return the attribute list for this Function.
Definition: Function.h:176
static bool clobbersPhysReg(const uint32_t *RegMask, unsigned PhysReg)
clobbersPhysReg - Returns true if this RegMask clobbers PhysReg.
The C convention as specified in the x86-64 supplement to the System V ABI, used on most non-Windows ...
Definition: CallingConv.h:143
The C convention as implemented on Windows/x86-64.
Definition: CallingConv.h:149
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:250
bool hasSSE1() const
Definition: X86Subtarget.h:410
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Representation of each machine instruction.
Definition: MachineInstr.h:52
Basic Alias true
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool isTarget64BitLP64() const
Is this x86_64 with the LP64 programming model (standard AMD64, no x32)?
Definition: X86Subtarget.h:400
#define I(x, y, z)
Definition: MD5.cpp:54
bool hasAttrSomewhere(Attribute::AttrKind Kind, unsigned *Index=nullptr) const
Return true if the specified attribute is set for at least one parameter or for the return value...
virtual bool canRealignStack(const MachineFunction &MF) const
True if the stack can be realigned for the target.
bool hasAVX512() const
Definition: X86Subtarget.h:418
const TargetRegisterClass * getSubClassWithSubReg(const TargetRegisterClass *RC, unsigned Idx) const override
bool hasVarSizedObjects() const
This method may be called any time after instruction selection is complete to determine if the stack ...
bool canRealignStack(const MachineFunction &MF) const override
const MCPhysReg * getCalleeSavedRegsViaCopy(const MachineFunction *MF) const
static bool CantUseSP(const MachineFrameInfo &MFI)
EnvironmentType getEnvironment() const
getEnvironment - Get the parsed environment type of this triple.
Definition: Triple.h:288
const unsigned Kind
unsigned getReg() const
getReg - Returns the register number.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
Definition: BitmaskEnum.h:81
int getFrameIndexReference(const MachineFunction &MF, int FI, unsigned &FrameReg) const override
getFrameIndexReference - This method should return the base register and offset used to reference a f...
bool hasOpaqueSPAdjustment() const
Returns true if the function contains opaque dynamic stack adjustments.
IRTranslator LLVM IR MI
unsigned getFrameRegister(const MachineFunction &MF) const override
static GCRegistry::Add< ErlangGC > A("erlang","erlang-compatible garbage collector")
sc_iterator getSuperClasses() const
Returns a NULL-terminated list of super-classes.
bool hasAVX() const
Definition: X86Subtarget.h:416
unsigned getPtrSizedFrameRegister(const MachineFunction &MF) const